From 975f66f2eebe9dadba04f275774d4ab83f74cf25 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 13 Apr 2024 14:04:41 +0200 Subject: Adding upstream version 7.7.0+dfsg. Signed-off-by: Daniel Baumann --- ansible_collections/amazon/aws/.coveragerc | 7 + ansible_collections/amazon/aws/.github/BOTMETA.yml | 110 + .../aws/.github/ISSUE_TEMPLATE/bug_report.yml | 161 + .../aws/.github/ISSUE_TEMPLATE/ci_report.yml | 76 + .../amazon/aws/.github/ISSUE_TEMPLATE/config.yml | 27 + .../ISSUE_TEMPLATE/documentation_report.yml | 130 + .../aws/.github/ISSUE_TEMPLATE/feature_request.yml | 74 + .../amazon/aws/.github/patchback.yml | 4 + .../amazon/aws/.github/settings.yml | 5 + .../amazon/aws/.github/workflows/docs-pr.yml | 73 + .../amazon/aws/.github/workflows/docs-push.yml | 39 + ansible_collections/amazon/aws/.gitignore | 390 + ansible_collections/amazon/aws/CHANGELOG.rst | 1013 +++ ansible_collections/amazon/aws/CONTRIBUTING.md | 81 + ansible_collections/amazon/aws/COPYING | 675 ++ ansible_collections/amazon/aws/FILES.json | 9028 ++++++++++++++++++++ ansible_collections/amazon/aws/MANIFEST.json | 32 + ansible_collections/amazon/aws/PSF-license.txt | 48 + ansible_collections/amazon/aws/README.md | 131 + ansible_collections/amazon/aws/bindep.txt | 4 + .../amazon/aws/changelogs/changelog.yaml | 1734 ++++ .../amazon/aws/changelogs/config.yaml | 29 + .../amazon/aws/changelogs/fragments/.keep | 0 .../amazon/aws/docs/docsite/extra-docs.yml | 14 + .../amazon/aws/docs/docsite/links.yml | 41 + .../amazon/aws/docs/docsite/rst/CHANGELOG.rst | 1013 +++ .../amazon/aws/docs/docsite/rst/aws_ec2_guide.rst | 590 ++ .../amazon/aws/docs/docsite/rst/dev_guidelines.rst | 1050 +++ .../amazon/aws/docs/docsite/rst/guide_aws.rst | 302 + ansible_collections/amazon/aws/meta/runtime.yml | 128 + .../amazon/aws/plugins/action/s3_object.py | 75 + .../aws/plugins/callback/aws_resource_actions.py | 71 + .../amazon/aws/plugins/doc_fragments/aws.py | 143 + .../aws/plugins/doc_fragments/aws_credentials.py | 45 + .../amazon/aws/plugins/doc_fragments/aws_region.py | 21 + .../amazon/aws/plugins/doc_fragments/boto3.py | 19 + .../amazon/aws/plugins/doc_fragments/ec2.py | 30 + .../amazon/aws/plugins/doc_fragments/tags.py | 62 + .../amazon/aws/plugins/inventory/aws_ec2.py | 926 ++ .../amazon/aws/plugins/inventory/aws_rds.py | 403 + .../aws/plugins/lookup/aws_account_attribute.py | 136 + .../amazon/aws/plugins/lookup/aws_secret.py | 295 + .../aws/plugins/lookup/aws_service_ip_ranges.py | 90 + .../amazon/aws/plugins/lookup/aws_ssm.py | 286 + .../amazon/aws/plugins/module_utils/_version.py | 344 + .../amazon/aws/plugins/module_utils/acm.py | 222 + .../amazon/aws/plugins/module_utils/arn.py | 69 + .../amazon/aws/plugins/module_utils/batch.py | 58 + .../amazon/aws/plugins/module_utils/botocore.py | 394 + .../amazon/aws/plugins/module_utils/cloud.py | 213 + .../aws/plugins/module_utils/cloudfront_facts.py | 229 + .../amazon/aws/plugins/module_utils/core.py | 77 + .../aws/plugins/module_utils/direct_connect.py | 89 + .../amazon/aws/plugins/module_utils/ec2.py | 310 + .../amazon/aws/plugins/module_utils/elb_utils.py | 109 + .../amazon/aws/plugins/module_utils/elbv2.py | 1092 +++ .../amazon/aws/plugins/module_utils/iam.py | 75 + .../amazon/aws/plugins/module_utils/modules.py | 447 + .../amazon/aws/plugins/module_utils/policy.py | 179 + .../amazon/aws/plugins/module_utils/rds.py | 390 + .../amazon/aws/plugins/module_utils/retries.py | 78 + .../amazon/aws/plugins/module_utils/route53.py | 64 + .../amazon/aws/plugins/module_utils/s3.py | 102 + .../amazon/aws/plugins/module_utils/tagging.py | 181 + .../amazon/aws/plugins/module_utils/tower.py | 83 + .../aws/plugins/module_utils/transformation.py | 140 + .../amazon/aws/plugins/module_utils/urls.py | 238 + .../amazon/aws/plugins/module_utils/version.py | 18 + .../amazon/aws/plugins/module_utils/waf.py | 224 + .../amazon/aws/plugins/module_utils/waiters.py | 1265 +++ .../aws/plugins/modules/autoscaling_group.py | 1966 +++++ .../aws/plugins/modules/autoscaling_group_info.py | 473 + .../amazon/aws/plugins/modules/aws_az_info.py | 186 + .../amazon/aws/plugins/modules/aws_caller_info.py | 108 + .../amazon/aws/plugins/modules/cloudformation.py | 794 ++ .../aws/plugins/modules/cloudformation_info.py | 461 + .../amazon/aws/plugins/modules/cloudtrail.py | 642 ++ .../amazon/aws/plugins/modules/cloudtrail_info.py | 238 + .../aws/plugins/modules/cloudwatch_metric_alarm.py | 499 ++ .../modules/cloudwatch_metric_alarm_info.py | 323 + .../aws/plugins/modules/cloudwatchevent_rule.py | 517 ++ .../plugins/modules/cloudwatchlogs_log_group.py | 351 + .../modules/cloudwatchlogs_log_group_info.py | 139 + .../cloudwatchlogs_log_group_metric_filter.py | 218 + .../amazon/aws/plugins/modules/ec2_ami.py | 820 ++ .../amazon/aws/plugins/modules/ec2_ami_info.py | 287 + .../amazon/aws/plugins/modules/ec2_eip.py | 666 ++ .../amazon/aws/plugins/modules/ec2_eip_info.py | 147 + .../amazon/aws/plugins/modules/ec2_eni.py | 876 ++ .../amazon/aws/plugins/modules/ec2_eni_info.py | 299 + .../amazon/aws/plugins/modules/ec2_instance.py | 2170 +++++ .../aws/plugins/modules/ec2_instance_info.py | 588 ++ .../amazon/aws/plugins/modules/ec2_key.py | 397 + .../aws/plugins/modules/ec2_metadata_facts.py | 629 ++ .../aws/plugins/modules/ec2_security_group.py | 1483 ++++ .../aws/plugins/modules/ec2_security_group_info.py | 305 + .../amazon/aws/plugins/modules/ec2_snapshot.py | 421 + .../aws/plugins/modules/ec2_snapshot_info.py | 295 + .../aws/plugins/modules/ec2_spot_instance.py | 650 ++ .../aws/plugins/modules/ec2_spot_instance_info.py | 301 + .../amazon/aws/plugins/modules/ec2_tag.py | 167 + .../amazon/aws/plugins/modules/ec2_tag_info.py | 73 + .../amazon/aws/plugins/modules/ec2_vol.py | 862 ++ .../amazon/aws/plugins/modules/ec2_vol_info.py | 213 + .../aws/plugins/modules/ec2_vpc_dhcp_option.py | 537 ++ .../plugins/modules/ec2_vpc_dhcp_option_info.py | 216 + .../amazon/aws/plugins/modules/ec2_vpc_endpoint.py | 482 ++ .../aws/plugins/modules/ec2_vpc_endpoint_info.py | 298 + .../modules/ec2_vpc_endpoint_service_info.py | 180 + .../amazon/aws/plugins/modules/ec2_vpc_igw.py | 266 + .../amazon/aws/plugins/modules/ec2_vpc_igw_info.py | 177 + .../aws/plugins/modules/ec2_vpc_nat_gateway.py | 967 +++ .../plugins/modules/ec2_vpc_nat_gateway_info.py | 217 + .../amazon/aws/plugins/modules/ec2_vpc_net.py | 720 ++ .../amazon/aws/plugins/modules/ec2_vpc_net_info.py | 271 + .../aws/plugins/modules/ec2_vpc_route_table.py | 843 ++ .../plugins/modules/ec2_vpc_route_table_info.py | 279 + .../amazon/aws/plugins/modules/ec2_vpc_subnet.py | 574 ++ .../aws/plugins/modules/ec2_vpc_subnet_info.py | 225 + .../aws/plugins/modules/elb_application_lb.py | 835 ++ .../aws/plugins/modules/elb_application_lb_info.py | 343 + .../amazon/aws/plugins/modules/elb_classic_lb.py | 2147 +++++ .../amazon/aws/plugins/modules/iam_policy.py | 351 + .../amazon/aws/plugins/modules/iam_policy_info.py | 209 + .../amazon/aws/plugins/modules/iam_user.py | 581 ++ .../amazon/aws/plugins/modules/iam_user_info.py | 199 + .../amazon/aws/plugins/modules/kms_key.py | 1079 +++ .../amazon/aws/plugins/modules/kms_key_info.py | 531 ++ .../amazon/aws/plugins/modules/lambda.py | 911 ++ .../amazon/aws/plugins/modules/lambda_alias.py | 331 + .../amazon/aws/plugins/modules/lambda_event.py | 457 + .../amazon/aws/plugins/modules/lambda_execute.py | 285 + .../amazon/aws/plugins/modules/lambda_info.py | 545 ++ .../amazon/aws/plugins/modules/lambda_layer.py | 368 + .../aws/plugins/modules/lambda_layer_info.py | 221 + .../amazon/aws/plugins/modules/lambda_policy.py | 426 + .../amazon/aws/plugins/modules/rds_cluster.py | 1143 +++ .../amazon/aws/plugins/modules/rds_cluster_info.py | 309 + .../aws/plugins/modules/rds_cluster_snapshot.py | 374 + .../amazon/aws/plugins/modules/rds_instance.py | 1481 ++++ .../aws/plugins/modules/rds_instance_info.py | 424 + .../aws/plugins/modules/rds_instance_snapshot.py | 386 + .../amazon/aws/plugins/modules/rds_option_group.py | 667 ++ .../aws/plugins/modules/rds_option_group_info.py | 327 + .../amazon/aws/plugins/modules/rds_param_group.py | 341 + .../aws/plugins/modules/rds_snapshot_info.py | 389 + .../amazon/aws/plugins/modules/rds_subnet_group.py | 374 + .../amazon/aws/plugins/modules/route53.py | 797 ++ .../aws/plugins/modules/route53_health_check.py | 672 ++ .../amazon/aws/plugins/modules/route53_info.py | 874 ++ .../amazon/aws/plugins/modules/route53_zone.py | 556 ++ .../amazon/aws/plugins/modules/s3_bucket.py | 1247 +++ .../amazon/aws/plugins/modules/s3_object.py | 1287 +++ .../amazon/aws/plugins/modules/s3_object_info.py | 818 ++ ansible_collections/amazon/aws/requirements.txt | 6 + .../amazon/aws/test-requirements.txt | 17 + ansible_collections/amazon/aws/tests/.gitignore | 3 + ansible_collections/amazon/aws/tests/config.yml | 2 + .../amazon/aws/tests/integration/constraints.txt | 7 + .../amazon/aws/tests/integration/inventory | 2 + .../amazon/aws/tests/integration/requirements.txt | 11 + .../amazon/aws/tests/integration/requirements.yml | 4 + .../integration/targets/autoscaling_group/aliases | 7 + .../targets/autoscaling_group/inventory | 8 + .../integration/targets/autoscaling_group/main.yml | 35 + .../targets/autoscaling_group/meta/main.yml | 2 + .../roles/ec2_asg/defaults/main.yml | 2 + .../roles/ec2_asg/tasks/create_update_delete.yml | 593 ++ .../roles/ec2_asg/tasks/env_cleanup.yml | 116 + .../roles/ec2_asg/tasks/env_setup.yml | 51 + .../roles/ec2_asg/tasks/instance_detach.yml | 256 + .../autoscaling_group/roles/ec2_asg/tasks/main.yml | 40 + .../roles/ec2_asg/tasks/tag_operations.yml | 339 + .../integration/targets/autoscaling_group/runme.sh | 12 + .../tests/integration/targets/aws_az_info/aliases | 1 + .../tests/integration/targets/aws_az_info/main.yml | 5 + .../integration/targets/aws_az_info/meta/main.yml | 1 + .../integration/targets/aws_az_info/tasks/main.yml | 193 + .../integration/targets/aws_caller_info/aliases | 1 + .../targets/aws_caller_info/meta/main.yml | 1 + .../targets/aws_caller_info/tasks/main.yaml | 18 + .../targets/callback_aws_resource_actions/aliases | 1 + .../callback_aws_resource_actions/inventory | 6 + .../targets/callback_aws_resource_actions/main.yml | 35 + .../callback_aws_resource_actions/meta/main.yml | 1 + .../targets/callback_aws_resource_actions/runme.sh | 19 + .../integration/targets/cloudformation/aliases | 2 + .../targets/cloudformation/defaults/main.yml | 8 + .../targets/cloudformation/files/cf_template.json | 37 + .../cloudformation/files/update_policy.json | 10 + .../targets/cloudformation/meta/main.yml | 2 + .../targets/cloudformation/tasks/main.yml | 491 ++ .../tests/integration/targets/cloudtrail/aliases | 2 + .../targets/cloudtrail/defaults/main.yml | 8 + .../tests/integration/targets/cloudtrail/main.yml | 6 + .../integration/targets/cloudtrail/meta/main.yml | 1 + .../tests/integration/targets/cloudtrail/runme.sh | 27 + .../integration/targets/cloudtrail/tasks/main.yml | 1595 ++++ .../targets/cloudtrail/tasks/tagging.yml | 252 + .../templates/cloudtrail-no-kms-assume-policy.j2 | 11 + .../templates/cloudtrail-no-kms-policy.j2 | 11 + .../templates/cloudwatch-assume-policy.j2 | 13 + .../cloudtrail/templates/cloudwatch-policy.j2 | 17 + .../targets/cloudtrail/templates/kms-policy.j2 | 34 + .../targets/cloudtrail/templates/s3-policy.j2 | 34 + .../targets/cloudtrail/templates/sns-policy.j2 | 34 + .../targets/cloudwatch_metric_alarm/aliases | 1 + .../cloudwatch_metric_alarm/defaults/main.yml | 4 + .../targets/cloudwatch_metric_alarm/meta/main.yml | 2 + .../cloudwatch_metric_alarm/tasks/env_cleanup.yml | 94 + .../cloudwatch_metric_alarm/tasks/env_setup.yml | 62 + .../targets/cloudwatch_metric_alarm/tasks/main.yml | 518 ++ .../targets/cloudwatchevent_rule/aliases | 1 + .../targets/cloudwatchevent_rule/defaults/main.yml | 9 + .../targets/cloudwatchevent_rule/tasks/main.yml | 96 + .../integration/targets/cloudwatchlogs/aliases | 5 + .../targets/cloudwatchlogs/defaults/main.yml | 2 + .../targets/cloudwatchlogs/meta/main.yml | 1 + .../cloudwatchlogs/tasks/cloudwatchlogs_tests.yml | 151 + .../cloudwatchlogs/tasks/create-delete-tags.yml | 444 + .../targets/cloudwatchlogs/tasks/main.yml | 16 + .../aws/tests/integration/targets/ec2_ami/aliases | 5 + .../integration/targets/ec2_ami/defaults/main.yml | 11 + .../integration/targets/ec2_ami/meta/main.yml | 5 + .../integration/targets/ec2_ami/tasks/main.yml | 786 ++ .../integration/targets/ec2_ami/vars/main.yml | 20 + .../aws/tests/integration/targets/ec2_eip/aliases | 5 + .../integration/targets/ec2_eip/defaults/main.yml | 5 + .../integration/targets/ec2_eip/meta/main.yml | 2 + .../integration/targets/ec2_eip/tasks/main.yml | 1442 ++++ .../aws/tests/integration/targets/ec2_eni/aliases | 3 + .../integration/targets/ec2_eni/defaults/main.yml | 16 + .../integration/targets/ec2_eni/meta/main.yml | 2 + .../integration/targets/ec2_eni/tasks/main.yaml | 159 + .../targets/ec2_eni/tasks/test_attachment.yaml | 278 + .../tasks/test_create_attached_multiple.yml | 121 + .../targets/ec2_eni/tasks/test_deletion.yaml | 118 + .../ec2_eni/tasks/test_eni_basic_creation.yaml | 263 + .../ec2_eni/tasks/test_ipaddress_assign.yaml | 325 + .../test_modifying_delete_on_termination.yaml | 214 + .../tasks/test_modifying_source_dest_check.yaml | 98 + .../targets/ec2_eni/tasks/test_modifying_tags.yaml | 251 + .../targets/ec2_instance_block_devices/aliases | 6 + .../ec2_instance_block_devices/defaults/main.yml | 4 + .../ec2_instance_block_devices/meta/main.yml | 6 + .../ec2_instance_block_devices/tasks/main.yml | 110 + .../targets/ec2_instance_checkmode_tests/aliases | 6 + .../ec2_instance_checkmode_tests/defaults/main.yml | 4 + .../ec2_instance_checkmode_tests/meta/main.yml | 6 + .../ec2_instance_checkmode_tests/tasks/main.yml | 208 + .../targets/ec2_instance_cpu_options/aliases | 6 + .../ec2_instance_cpu_options/defaults/main.yml | 4 + .../targets/ec2_instance_cpu_options/meta/main.yml | 6 + .../ec2_instance_cpu_options/tasks/main.yml | 85 + .../targets/ec2_instance_default_vpc_tests/aliases | 6 + .../defaults/main.yml | 4 + .../ec2_instance_default_vpc_tests/meta/main.yml | 6 + .../ec2_instance_default_vpc_tests/tasks/main.yml | 63 + .../targets/ec2_instance_ebs_optimized/aliases | 6 + .../ec2_instance_ebs_optimized/defaults/main.yml | 4 + .../ec2_instance_ebs_optimized/meta/main.yml | 6 + .../ec2_instance_ebs_optimized/tasks/main.yml | 31 + .../ec2_instance_external_resource_attach/aliases | 6 + .../defaults/main.yml | 4 + .../meta/main.yml | 6 + .../tasks/main.yml | 161 + .../ec2_instance_hibernation_options/aliases | 6 + .../defaults/main.yml | 4 + .../ec2_instance_hibernation_options/meta/main.yml | 9 + .../tasks/main.yml | 145 + .../targets/ec2_instance_iam_instance_role/aliases | 6 + .../defaults/main.yml | 7 + .../files/assume-role-policy.json | 13 + .../ec2_instance_iam_instance_role/meta/main.yml | 6 + .../ec2_instance_iam_instance_role/tasks/main.yml | 131 + .../targets/ec2_instance_instance_minimal/aliases | 6 + .../defaults/main.yml | 4 + .../ec2_instance_instance_minimal/meta/main.yml | 6 + .../ec2_instance_instance_minimal/tasks/main.yml | 699 ++ .../targets/ec2_instance_instance_multiple/aliases | 6 + .../defaults/main.yml | 4 + .../ec2_instance_instance_multiple/meta/main.yml | 6 + .../ec2_instance_instance_multiple/tasks/main.yml | 443 + .../targets/ec2_instance_instance_no_wait/aliases | 6 + .../defaults/main.yml | 4 + .../ec2_instance_instance_no_wait/meta/main.yml | 6 + .../ec2_instance_instance_no_wait/tasks/main.yml | 58 + .../targets/ec2_instance_metadata_options/aliases | 6 + .../defaults/main.yml | 4 + .../ec2_instance_metadata_options/meta/main.yml | 9 + .../ec2_instance_metadata_options/tasks/main.yml | 98 + .../targets/ec2_instance_security_group/aliases | 6 + .../ec2_instance_security_group/defaults/main.yml | 4 + .../ec2_instance_security_group/meta/main.yml | 6 + .../ec2_instance_security_group/tasks/main.yml | 87 + .../ec2_instance_state_config_updates/aliases | 6 + .../defaults/main.yml | 4 + .../meta/main.yml | 6 + .../tasks/main.yml | 133 + .../ec2_instance_tags_and_vpc_settings/aliases | 6 + .../defaults/main.yml | 4 + .../meta/main.yml | 6 + .../tasks/main.yml | 179 + .../ec2_instance_termination_protection/aliases | 6 + .../defaults/main.yml | 4 + .../meta/main.yml | 6 + .../tasks/main.yml | 250 + .../targets/ec2_instance_uptime/aliases | 6 + .../targets/ec2_instance_uptime/defaults/main.yml | 4 + .../targets/ec2_instance_uptime/meta/main.yml | 6 + .../targets/ec2_instance_uptime/tasks/main.yml | 63 + .../aws/tests/integration/targets/ec2_key/aliases | 5 + .../integration/targets/ec2_key/defaults/main.yml | 3 + .../integration/targets/ec2_key/meta/main.yml | 5 + .../integration/targets/ec2_key/tasks/main.yml | 461 + .../integration/targets/ec2_metadata_facts/aliases | 5 + .../targets/ec2_metadata_facts/meta/main.yml | 7 + .../targets/ec2_metadata_facts/playbooks/setup.yml | 182 + .../ec2_metadata_facts/playbooks/teardown.yml | 84 + .../ec2_metadata_facts/playbooks/test_metadata.yml | 18 + .../targets/ec2_metadata_facts/runme.sh | 22 + .../ec2_metadata_facts/templates/inventory.j2 | 34 + .../integration/targets/ec2_security_group/aliases | 6 + .../targets/ec2_security_group/defaults/main.yml | 7 + .../targets/ec2_security_group/meta/main.yml | 1 + .../ec2_security_group/tasks/data_validation.yml | 33 + .../targets/ec2_security_group/tasks/diff_mode.yml | 167 + .../ec2_security_group/tasks/egress_tests.yml | 177 + .../ec2_security_group/tasks/group_info.yml | 96 + .../ec2_security_group/tasks/icmp_verbs.yml | 221 + .../tasks/ipv6_default_tests.yml | 90 + .../targets/ec2_security_group/tasks/main.yml | 1368 +++ .../ec2_security_group/tasks/multi_account.yml | 124 + .../tasks/multi_nested_target.yml | 213 + .../ec2_security_group/tasks/numeric_protos.yml | 60 + .../ec2_security_group/tasks/rule_group_create.yml | 127 + .../tests/integration/targets/ec2_snapshot/aliases | 10 + .../targets/ec2_snapshot/defaults/main.yml | 2 + .../integration/targets/ec2_snapshot/meta/main.yml | 2 + .../targets/ec2_snapshot/tasks/main.yml | 400 + .../integration/targets/ec2_spot_instance/aliases | 2 + .../targets/ec2_spot_instance/defaults/main.yml | 14 + .../targets/ec2_spot_instance/meta/main.yml | 2 + .../targets/ec2_spot_instance/tasks/main.yaml | 315 + .../tasks/terminate_associated_instances.yml | 109 + .../aws/tests/integration/targets/ec2_tag/aliases | 2 + .../integration/targets/ec2_tag/defaults/main.yml | 2 + .../integration/targets/ec2_tag/meta/main.yml | 1 + .../integration/targets/ec2_tag/tasks/main.yml | 136 + .../integration/targets/ec2_tag/vars/main.yml | 2 + .../aws/tests/integration/targets/ec2_vol/aliases | 4 + .../integration/targets/ec2_vol/defaults/main.yml | 8 + .../integration/targets/ec2_vol/meta/main.yml | 2 + .../integration/targets/ec2_vol/tasks/main.yml | 1002 +++ .../targets/ec2_vpc_dhcp_option/aliases | 1 + .../targets/ec2_vpc_dhcp_option/defaults/main.yml | 5 + .../targets/ec2_vpc_dhcp_option/meta/main.yml | 1 + .../targets/ec2_vpc_dhcp_option/tasks/main.yml | 948 ++ .../integration/targets/ec2_vpc_endpoint/aliases | 3 + .../targets/ec2_vpc_endpoint/defaults/main.yml | 7 + .../targets/ec2_vpc_endpoint/meta/main.yml | 1 + .../targets/ec2_vpc_endpoint/tasks/main.yml | 862 ++ .../targets/ec2_vpc_endpoint_service_info/aliases | 2 + .../defaults/main.yml | 3 + .../ec2_vpc_endpoint_service_info/meta/main.yml | 1 + .../ec2_vpc_endpoint_service_info/tasks/main.yml | 135 + .../tests/integration/targets/ec2_vpc_igw/aliases | 3 + .../targets/ec2_vpc_igw/defaults/main.yml | 3 + .../integration/targets/ec2_vpc_igw/meta/main.yml | 1 + .../integration/targets/ec2_vpc_igw/tasks/main.yml | 550 ++ .../targets/ec2_vpc_nat_gateway/aliases | 5 + .../targets/ec2_vpc_nat_gateway/defaults/main.yml | 4 + .../targets/ec2_vpc_nat_gateway/meta/main.yml | 1 + .../targets/ec2_vpc_nat_gateway/tasks/main.yml | 978 +++ .../tests/integration/targets/ec2_vpc_net/aliases | 2 + .../targets/ec2_vpc_net/defaults/main.yml | 8 + .../integration/targets/ec2_vpc_net/meta/main.yml | 1 + .../integration/targets/ec2_vpc_net/tasks/main.yml | 1560 ++++ .../targets/ec2_vpc_route_table/aliases | 5 + .../targets/ec2_vpc_route_table/defaults/main.yml | 4 + .../targets/ec2_vpc_route_table/meta/main.yml | 2 + .../targets/ec2_vpc_route_table/tasks/main.yml | 1499 ++++ .../integration/targets/ec2_vpc_subnet/aliases | 2 + .../targets/ec2_vpc_subnet/defaults/main.yml | 9 + .../targets/ec2_vpc_subnet/meta/main.yml | 2 + .../targets/ec2_vpc_subnet/tasks/main.yml | 683 ++ .../integration/targets/elb_application_lb/aliases | 3 + .../targets/elb_application_lb/defaults/main.yml | 28 + .../targets/elb_application_lb/tasks/main.yml | 1558 ++++ .../elb_application_lb/templates/policy.json | 13 + .../integration/targets/elb_classic_lb/aliases | 4 + .../targets/elb_classic_lb/defaults/main.yml | 170 + .../targets/elb_classic_lb/meta/main.yml | 3 + .../elb_classic_lb/tasks/basic_internal.yml | 292 + .../targets/elb_classic_lb/tasks/basic_public.yml | 273 + .../elb_classic_lb/tasks/cleanup_instances.yml | 9 + .../targets/elb_classic_lb/tasks/cleanup_s3.yml | 32 + .../targets/elb_classic_lb/tasks/cleanup_vpc.yml | 29 + .../elb_classic_lb/tasks/complex_changes.yml | 330 + .../elb_classic_lb/tasks/describe_region.yml | 10 + .../elb_classic_lb/tasks/https_listeners.yml | 132 + .../targets/elb_classic_lb/tasks/main.yml | 58 + .../elb_classic_lb/tasks/missing_params.yml | 203 + .../targets/elb_classic_lb/tasks/schema_change.yml | 189 + .../elb_classic_lb/tasks/setup_instances.yml | 25 + .../targets/elb_classic_lb/tasks/setup_s3.yml | 26 + .../targets/elb_classic_lb/tasks/setup_vpc.yml | 103 + .../elb_classic_lb/tasks/simple_changes.yml | 79 + .../elb_classic_lb/tasks/simple_cross_az.yml | 100 + .../tasks/simple_draining_timeout.yml | 148 + .../elb_classic_lb/tasks/simple_healthcheck.yml | 116 + .../elb_classic_lb/tasks/simple_idle_timeout.yml | 50 + .../elb_classic_lb/tasks/simple_instances.yml | 415 + .../elb_classic_lb/tasks/simple_listeners.yml | 196 + .../elb_classic_lb/tasks/simple_logging.yml | 587 ++ .../elb_classic_lb/tasks/simple_proxy_policy.yml | 141 + .../elb_classic_lb/tasks/simple_securitygroups.yml | 106 + .../elb_classic_lb/tasks/simple_stickiness.yml | 390 + .../targets/elb_classic_lb/tasks/simple_tags.yml | 141 + .../targets/elb_classic_lb/templates/s3_policy.j2 | 15 + .../targets/elb_classic_lb/vars/main.yml | 2 + .../tests/integration/targets/iam_policy/aliases | 8 + .../targets/iam_policy/defaults/main.yml | 5 + .../targets/iam_policy/files/no_access.json | 10 + .../iam_policy/files/no_access_with_id.json | 11 + .../iam_policy/files/no_access_with_second_id.json | 11 + .../targets/iam_policy/files/no_trust.json | 10 + .../integration/targets/iam_policy/meta/main.yml | 1 + .../integration/targets/iam_policy/tasks/main.yml | 70 + .../targets/iam_policy/tasks/object.yml | 1169 +++ .../aws/tests/integration/targets/iam_user/aliases | 9 + .../integration/targets/iam_user/defaults/main.yml | 10 + .../integration/targets/iam_user/meta/main.yml | 1 + .../integration/targets/iam_user/tasks/main.yml | 798 ++ .../integration/targets/inventory_aws_ec2/aliases | 3 + .../targets/inventory_aws_ec2/meta/main.yml | 1 + .../playbooks/create_environment_script.yml | 9 + .../playbooks/create_inventory_config.yml | 11 + .../playbooks/empty_inventory_config.yml | 9 + .../inventory_aws_ec2/playbooks/populate_cache.yml | 55 + .../targets/inventory_aws_ec2/playbooks/setup.yml | 52 + .../inventory_aws_ec2/playbooks/tear_down.yml | 31 + .../test_invalid_aws_ec2_inventory_config.yml | 9 + .../playbooks/test_inventory_cache.yml | 18 + .../playbooks/test_populating_inventory.yml | 78 + ...est_populating_inventory_with_concatenation.yml | 56 + .../test_populating_inventory_with_constructed.yml | 69 + ...ulating_inventory_with_hostnames_using_tags.yml | 62 + ...inventory_with_hostnames_using_tags_classic.yml | 62 + ...ating_inventory_with_hostvars_prefix_suffix.yml | 65 + ...g_inventory_with_include_or_exclude_filters.yml | 103 + ...st_populating_inventory_with_literal_string.yml | 56 + ...ting_inventory_with_use_contrib_script_keys.yml | 57 + .../playbooks/test_refresh_inventory.yml | 61 + .../integration/targets/inventory_aws_ec2/runme.sh | 67 + .../inventory_aws_ec2/templates/inventory.yml.j2 | 14 + .../templates/inventory_with_cache.yml.j2 | 14 + .../templates/inventory_with_concatenation.yml.j2 | 15 + .../templates/inventory_with_constructed.yml.j2 | 22 + .../inventory_with_hostnames_using_tags.yml.j2 | 21 + ...entory_with_hostnames_using_tags_classic.yml.j2 | 21 + .../inventory_with_hostvars_prefix_suffix.yml.j2 | 19 + ...nventory_with_include_or_exclude_filters.yml.j2 | 23 + .../templates/inventory_with_literal_string.yml.j2 | 15 + .../templates/inventory_with_template.yml.j2 | 14 + .../inventory_with_use_contrib_script_keys.yml.j2 | 15 + .../targets/inventory_aws_ec2/test.aws_ec2.yml | 0 .../integration/targets/inventory_aws_rds/aliases | 2 + .../targets/inventory_aws_rds/meta/main.yml | 1 + .../playbooks/create_inventory_config.yml | 11 + .../playbooks/empty_inventory_config.yml | 9 + .../inventory_aws_rds/playbooks/populate_cache.yml | 57 + .../test_invalid_aws_rds_inventory_config.yml | 9 + .../playbooks/test_inventory_cache.yml | 18 + .../test_inventory_with_hostvars_prefix_suffix.yml | 63 + .../playbooks/test_populating_inventory.yml | 77 + .../test_populating_inventory_with_constructed.yml | 65 + .../playbooks/test_refresh_inventory.yml | 67 + .../integration/targets/inventory_aws_rds/runme.sh | 47 + .../inventory_aws_rds/templates/inventory.j2 | 10 + .../templates/inventory_with_cache.j2 | 13 + .../templates/inventory_with_constructed.j2 | 17 + .../inventory_with_hostvars_prefix_suffix.j2 | 16 + .../targets/inventory_aws_rds/test.aws_rds.yml | 0 .../aws/tests/integration/targets/kms_key/aliases | 11 + .../tests/integration/targets/kms_key/inventory | 12 + .../aws/tests/integration/targets/kms_key/main.yml | 9 + .../integration/targets/kms_key/meta/main.yml | 1 + .../kms_key/roles/aws_kms/defaults/main.yml | 2 + .../targets/kms_key/roles/aws_kms/tasks/main.yml | 11 + .../kms_key/roles/aws_kms/tasks/test_grants.yml | 350 + .../kms_key/roles/aws_kms/tasks/test_modify.yml | 279 + .../roles/aws_kms/tasks/test_multi_region.yml | 100 + .../kms_key/roles/aws_kms/tasks/test_states.yml | 522 ++ .../kms_key/roles/aws_kms/tasks/test_tagging.yml | 187 + .../templates/console-policy-no-key-rotation.j2 | 81 + .../roles/aws_kms/templates/console-policy.j2 | 72 + .../aws/tests/integration/targets/kms_key/runme.sh | 12 + .../aws/tests/integration/targets/lambda/aliases | 4 + .../integration/targets/lambda/defaults/main.yml | 13 + .../targets/lambda/files/mini_lambda.py | 48 + .../targets/lambda/files/minimal_trust_policy.json | 12 + .../tests/integration/targets/lambda/meta/main.yml | 5 + .../integration/targets/lambda/tasks/main.yml | 788 ++ .../integration/targets/lambda/tasks/tagging.yml | 246 + .../tests/integration/targets/lambda_alias/aliases | 1 + .../targets/lambda_alias/defaults/main.yml | 6 + .../targets/lambda_alias/files/mini_lambda.py | 48 + .../lambda_alias/files/minimal_trust_policy.json | 12 + .../integration/targets/lambda_alias/meta/main.yml | 1 + .../targets/lambda_alias/tasks/main.yml | 622 ++ .../tests/integration/targets/lambda_event/aliases | 1 + .../targets/lambda_event/defaults/main.yml | 10 + .../targets/lambda_event/files/mini_lambda.py | 48 + .../lambda_event/files/minimal_trust_policy.json | 12 + .../integration/targets/lambda_event/meta/main.yml | 5 + .../targets/lambda_event/tasks/main.yml | 117 + .../targets/lambda_event/tasks/setup.yml | 83 + .../targets/lambda_event/tasks/teardown.yml | 33 + .../tests/integration/targets/lambda_layer/aliases | 2 + .../targets/lambda_layer/defaults/main.yml | 10 + .../targets/lambda_layer/tasks/main.yml | 248 + .../integration/targets/lambda_policy/aliases | 1 + .../targets/lambda_policy/defaults/main.yml | 6 + .../lambda_policy/files/mini_http_lambda.py | 40 + .../lambda_policy/files/minimal_trust_policy.json | 12 + .../targets/lambda_policy/meta/main.yml | 1 + .../targets/lambda_policy/tasks/main.yml | 144 + .../templates/endpoint-test-swagger-api.yml.j2 | 39 + .../targets/legacy_missing_tests/README.md | 5 + .../targets/legacy_missing_tests/aliases | 5 + .../targets/legacy_missing_tests/meta/main.yml | 1 + .../targets/lookup_aws_account_attribute/aliases | 1 + .../lookup_aws_account_attribute/meta/main.yml | 1 + .../lookup_aws_account_attribute/tasks/main.yaml | 130 + .../integration/targets/lookup_aws_secret/aliases | 1 + .../targets/lookup_aws_secret/meta/main.yml | 1 + .../targets/lookup_aws_secret/tasks/main.yaml | 120 + .../targets/lookup_aws_service_ip_ranges/aliases | 1 + .../lookup_aws_service_ip_ranges/meta/main.yml | 1 + .../lookup_aws_service_ip_ranges/tasks/main.yaml | 148 + .../integration/targets/lookup_aws_ssm/aliases | 1 + .../targets/lookup_aws_ssm/defaults/main.yml | 2 + .../targets/lookup_aws_ssm/meta/main.yml | 1 + .../targets/lookup_aws_ssm/tasks/main.yml | 276 + .../targets/module_utils_botocore_recorder/aliases | 1 + .../module_utils_botocore_recorder/main.yml | 12 + .../module_utils_botocore_recorder/record.sh | 23 + .../recording.tar.gz | Bin 0 -> 967 bytes .../module_utils_botocore_recorder/runme.sh | 15 + .../integration/targets/module_utils_core/aliases | 4 + .../targets/module_utils_core/inventory | 6 + .../integration/targets/module_utils_core/main.yml | 8 + .../targets/module_utils_core/meta/main.yml | 1 + .../ansibleawsmodule.client/files/amazonroot.pem | 20 + .../ansibleawsmodule.client/files/isrg-x1.pem | 31 + .../library/example_module.py | 46 + .../roles/ansibleawsmodule.client/meta/main.yml | 3 + .../ansibleawsmodule.client/tasks/ca_bundle.yml | 202 + .../ansibleawsmodule.client/tasks/credentials.yml | 281 + .../ansibleawsmodule.client/tasks/endpoints.yml | 123 + .../roles/ansibleawsmodule.client/tasks/main.yml | 12 + .../ansibleawsmodule.client/tasks/profiles.yml | 74 + .../integration/targets/module_utils_core/runme.sh | 16 + .../targets/module_utils_core/setup.yml | 40 + .../module_utils_core/templates/boto_config.j2 | 5 + .../templates/session_credentials.yml.j2 | 3 + .../targets/module_utils_waiter/aliases | 1 + .../targets/module_utils_waiter/inventory | 6 + .../targets/module_utils_waiter/main.yml | 7 + .../targets/module_utils_waiter/meta/main.yml | 1 + .../roles/get_waiter/library/example_module.py | 39 + .../roles/get_waiter/meta/main.yml | 3 + .../roles/get_waiter/tasks/main.yml | 36 + .../targets/module_utils_waiter/runme.sh | 8 + .../tests/integration/targets/rds_cluster/aliases | 5 + .../integration/targets/rds_cluster/inventory | 23 + .../tests/integration/targets/rds_cluster/main.yml | 10 + .../integration/targets/rds_cluster/meta/main.yml | 1 + .../roles/rds_cluster/defaults/main.yml | 36 + .../rds_cluster/roles/rds_cluster/meta/main.yml | 1 + .../rds_cluster/roles/rds_cluster/tasks/main.yml | 10 + .../roles/rds_cluster/tasks/test_create.yml | 123 + .../roles/rds_cluster/tasks/test_create_sgs.yml | 208 + .../roles/rds_cluster/tasks/test_modify.yml | 270 + .../roles/rds_cluster/tasks/test_promote.yml | 187 + .../roles/rds_cluster/tasks/test_restore.yml | 185 + .../roles/rds_cluster/tasks/test_tag.yml | 290 + .../rds_cluster/roles/rds_cluster/vars/main.yml | 1 + .../tests/integration/targets/rds_cluster/runme.sh | 12 + .../targets/rds_cluster_multi_az/aliases | 7 + .../targets/rds_cluster_multi_az/defaults/main.yml | 7 + .../targets/rds_cluster_multi_az/meta/main.yml | 5 + .../targets/rds_cluster_multi_az/tasks/main.yml | 79 + .../targets/rds_cluster_snapshot/aliases | 5 + .../targets/rds_cluster_snapshot/defaults/main.yml | 13 + .../targets/rds_cluster_snapshot/tasks/main.yml | 480 ++ .../targets/rds_cluster_snapshot/vars/main.yml | 1 + .../targets/rds_instance_aurora/aliases | 3 + .../targets/rds_instance_aurora/defaults/main.yml | 9 + .../targets/rds_instance_aurora/tasks/main.yml | 122 + .../targets/rds_instance_complex/aliases | 3 + .../targets/rds_instance_complex/defaults/main.yml | 16 + .../files/enhanced_monitoring_assume_policy.json | 13 + .../files/s3_integration_policy.json | 16 + .../files/s3_integration_trust_policy.json | 13 + .../targets/rds_instance_complex/tasks/main.yml | 205 + .../targets/rds_instance_modify/aliases | 3 + .../targets/rds_instance_modify/defaults/main.yml | 9 + .../targets/rds_instance_modify/tasks/main.yml | 206 + .../targets/rds_instance_processor/aliases | 3 + .../rds_instance_processor/defaults/main.yml | 12 + .../targets/rds_instance_processor/tasks/main.yml | 141 + .../targets/rds_instance_replica/aliases | 3 + .../targets/rds_instance_replica/defaults/main.yml | 6 + .../targets/rds_instance_replica/tasks/main.yml | 234 + .../targets/rds_instance_restore/aliases | 3 + .../targets/rds_instance_restore/defaults/main.yml | 5 + .../targets/rds_instance_restore/tasks/main.yml | 131 + .../targets/rds_instance_sgroups/aliases | 3 + .../targets/rds_instance_sgroups/defaults/main.yml | 5 + .../targets/rds_instance_sgroups/tasks/main.yml | 332 + .../targets/rds_instance_snapshot/aliases | 5 + .../rds_instance_snapshot/defaults/main.yml | 14 + .../targets/rds_instance_snapshot/tasks/main.yml | 505 ++ .../targets/rds_instance_snapshot/vars/main.yml | 1 + .../targets/rds_instance_snapshot_mgmt/aliases | 4 + .../rds_instance_snapshot_mgmt/defaults/main.yml | 9 + .../rds_instance_snapshot_mgmt/tasks/main.yml | 224 + .../targets/rds_instance_states/aliases | 4 + .../targets/rds_instance_states/defaults/main.yml | 5 + .../targets/rds_instance_states/tasks/main.yml | 320 + .../targets/rds_instance_tagging/aliases | 3 + .../targets/rds_instance_tagging/defaults/main.yml | 7 + .../targets/rds_instance_tagging/tasks/main.yml | 202 + .../tasks/test_tagging_gp3.yml | 190 + .../targets/rds_instance_upgrade/aliases | 3 + .../targets/rds_instance_upgrade/defaults/main.yml | 10 + .../targets/rds_instance_upgrade/tasks/main.yml | 128 + .../integration/targets/rds_option_group/aliases | 3 + .../targets/rds_option_group/defaults/main.yml | 17 + .../targets/rds_option_group/meta/main.yml | 1 + .../targets/rds_option_group/tasks/main.yml | 948 ++ .../integration/targets/rds_param_group/aliases | 1 + .../targets/rds_param_group/defaults/main.yml | 29 + .../targets/rds_param_group/meta/main.yml | 1 + .../targets/rds_param_group/tasks/main.yml | 517 ++ .../integration/targets/rds_subnet_group/aliases | 1 + .../targets/rds_subnet_group/defaults/main.yml | 9 + .../targets/rds_subnet_group/meta/main.yml | 1 + .../targets/rds_subnet_group/tasks/main.yml | 112 + .../targets/rds_subnet_group/tasks/params.yml | 29 + .../targets/rds_subnet_group/tasks/tests.yml | 675 ++ .../aws/tests/integration/targets/route53/aliases | 4 + .../integration/targets/route53/defaults/main.yml | 2 + .../integration/targets/route53/meta/main.yml | 1 + .../integration/targets/route53/tasks/main.yml | 1126 +++ .../integration/targets/route53/vars/main.yml | 0 .../targets/route53_health_check/aliases | 1 + .../targets/route53_health_check/defaults/main.yml | 36 + .../targets/route53_health_check/meta/main.yml | 2 + .../tasks/create_multiple_health_checks.yml | 134 + .../targets/route53_health_check/tasks/main.yml | 1822 ++++ .../tasks/update_delete_by_id.yml | 303 + .../tests/integration/targets/route53_zone/aliases | 1 + .../integration/targets/route53_zone/meta/main.yml | 1 + .../targets/route53_zone/tasks/main.yml | 617 ++ .../tests/integration/targets/s3_bucket/aliases | 1 + .../tests/integration/targets/s3_bucket/inventory | 17 + .../tests/integration/targets/s3_bucket/main.yml | 12 + .../integration/targets/s3_bucket/meta/main.yml | 2 + .../s3_bucket/roles/s3_bucket/defaults/main.yml | 2 + .../s3_bucket/roles/s3_bucket/meta/main.yml | 1 + .../s3_bucket/roles/s3_bucket/tasks/acl.yml | 68 + .../s3_bucket/roles/s3_bucket/tasks/complex.yml | 150 + .../s3_bucket/roles/s3_bucket/tasks/dotted.yml | 55 + .../s3_bucket/tasks/encryption_bucket_key.yml | 103 + .../roles/s3_bucket/tasks/encryption_kms.yml | 92 + .../roles/s3_bucket/tasks/encryption_sse.yml | 93 + .../s3_bucket/roles/s3_bucket/tasks/main.yml | 20 + .../s3_bucket/roles/s3_bucket/tasks/missing.yml | 28 + .../roles/s3_bucket/tasks/object_lock.yml | 131 + .../roles/s3_bucket/tasks/ownership_controls.yml | 143 + .../roles/s3_bucket/tasks/public_access.yml | 115 + .../s3_bucket/roles/s3_bucket/tasks/simple.yml | 67 + .../s3_bucket/roles/s3_bucket/tasks/tags.yml | 257 + .../roles/s3_bucket/templates/policy-updated.json | 12 + .../roles/s3_bucket/templates/policy.json | 12 + .../tests/integration/targets/s3_bucket/runme.sh | 12 + .../tests/integration/targets/s3_object/aliases | 3 + .../targets/s3_object/defaults/main.yml | 5 + .../integration/targets/s3_object/files/hello.txt | 1 + .../integration/targets/s3_object/files/test.png | Bin 0 -> 99 bytes .../integration/targets/s3_object/meta/main.yml | 6 + .../targets/s3_object/tasks/copy_object.yml | 135 + .../tasks/copy_object_acl_disabled_bucket.yml | 111 + .../targets/s3_object/tasks/delete_bucket.yml | 25 + .../integration/targets/s3_object/tasks/main.yml | 1092 +++ .../targets/s3_object/templates/policy.json.j2 | 21 + .../s3_object/templates/put-template.txt.j2 | 2 + .../targets/setup_botocore_pip/defaults/main.yml | 2 + .../targets/setup_botocore_pip/handlers/main.yml | 2 + .../targets/setup_botocore_pip/meta/main.yml | 1 + .../targets/setup_botocore_pip/tasks/cleanup.yml | 5 + .../targets/setup_botocore_pip/tasks/main.yml | 43 + .../targets/setup_ec2_facts/defaults/main.yml | 4 + .../targets/setup_ec2_facts/meta/main.yml | 1 + .../targets/setup_ec2_facts/tasks/main.yml | 53 + .../setup_ec2_instance_env/defaults/main.yml | 24 + .../setup_ec2_instance_env/handlers/main.yml | 2 + .../targets/setup_ec2_instance_env/meta/main.yml | 1 + .../setup_ec2_instance_env/tasks/cleanup.yml | 118 + .../targets/setup_ec2_instance_env/tasks/main.yml | 88 + .../targets/setup_remote_tmp_dir/handlers/main.yml | 5 + .../targets/setup_remote_tmp_dir/meta/main.yml | 1 + .../setup_remote_tmp_dir/tasks/default-cleanup.yml | 5 + .../targets/setup_remote_tmp_dir/tasks/default.yml | 12 + .../targets/setup_remote_tmp_dir/tasks/main.yml | 10 + .../setup_remote_tmp_dir/tasks/windows-cleanup.yml | 4 + .../targets/setup_remote_tmp_dir/tasks/windows.yml | 10 + .../targets/setup_sshkey/files/ec2-fingerprint.py | 34 + .../integration/targets/setup_sshkey/meta/main.yml | 1 + .../targets/setup_sshkey/tasks/main.yml | 71 + .../amazon/aws/tests/sanity/ignore-2.10.txt | 1 + .../amazon/aws/tests/sanity/ignore-2.11.txt | 1 + .../amazon/aws/tests/sanity/ignore-2.12.txt | 1 + .../amazon/aws/tests/sanity/ignore-2.13.txt | 1 + .../amazon/aws/tests/sanity/ignore-2.14.txt | 1 + .../amazon/aws/tests/sanity/ignore-2.15.txt | 1 + .../amazon/aws/tests/sanity/ignore-2.9.txt | 7 + .../amazon/aws/tests/unit/compat/__init__.py | 0 .../amazon/aws/tests/unit/compat/builtins.py | 33 + .../amazon/aws/tests/unit/compat/mock.py | 122 + .../amazon/aws/tests/unit/compat/unittest.py | 38 + .../amazon/aws/tests/unit/constraints.txt | 7 + .../amazon/aws/tests/unit/mock/loader.py | 116 + .../amazon/aws/tests/unit/mock/path.py | 8 + .../amazon/aws/tests/unit/mock/procenv.py | 90 + .../amazon/aws/tests/unit/mock/vault_helper.py | 39 + .../amazon/aws/tests/unit/mock/yaml_helper.py | 124 + .../unit/module_utils/arn/test_is_outpost_arn.py | 27 + .../unit/module_utils/arn/test_parse_aws_arn.py | 95 + .../botocore/test_is_boto3_error_code.py | 214 + .../botocore/test_is_boto3_error_message.py | 145 + .../botocore/test_normalize_boto3_result.py | 59 + .../module_utils/cloud/test_backoff_iterator.py | 45 + .../unit/module_utils/cloud/test_cloud_retry.py | 236 + .../cloud/test_decorator_generation.py | 156 + .../unit/module_utils/cloud/test_retries_found.py | 34 + .../unit/module_utils/cloud/test_retry_func.py | 129 + .../amazon/aws/tests/unit/module_utils/conftest.py | 81 + .../tests/unit/module_utils/elbv2/test_prune.py | 188 + .../ansible_aws_module/test_fail_json_aws.py | 330 + .../ansible_aws_module/test_minimal_versions.py | 191 + .../ansible_aws_module/test_require_at_least.py | 220 + .../module_utils/policy/test_compare_policies.py | 339 + .../unit/module_utils/retries/test_awsretry.py | 96 + .../aws/tests/unit/module_utils/test_elbv2.py | 214 + .../amazon/aws/tests/unit/module_utils/test_iam.py | 300 + .../amazon/aws/tests/unit/module_utils/test_rds.py | 805 ++ .../amazon/aws/tests/unit/module_utils/test_s3.py | 86 + .../aws/tests/unit/module_utils/test_tagging.py | 203 + .../aws/tests/unit/module_utils/test_tower.py | 40 + .../test_ansible_dict_to_boto3_filter_list.py | 73 + .../transformation/test_map_complex_type.py | 100 + .../transformation/test_scrub_none_parameters.py | 88 + .../tests/unit/plugins/inventory/test_aws_ec2.py | 514 ++ .../aws/tests/unit/plugins/modules/conftest.py | 31 + .../ec2_instance/test_build_run_instance_spec.py | 126 + .../ec2_instance/test_determine_iam_role.py | 102 + .../unit/plugins/modules/fixtures/certs/a.pem | 31 + .../unit/plugins/modules/fixtures/certs/b.pem | 47 + .../plugins/modules/fixtures/certs/chain-1.0.cert | 121 + .../plugins/modules/fixtures/certs/chain-1.1.cert | 69 + .../plugins/modules/fixtures/certs/chain-1.2.cert | 113 + .../plugins/modules/fixtures/certs/chain-1.3.cert | 124 + .../plugins/modules/fixtures/certs/chain-1.4.cert | 86 + .../plugins/modules/fixtures/certs/chain-4.cert | 121 + .../modules/fixtures/certs/simple-chain-a.cert | 18 + .../modules/fixtures/certs/simple-chain-b.cert | 18 + .../tests/unit/plugins/modules/fixtures/thezip.zip | Bin 0 -> 162 bytes .../plugins/modules/placebo_recordings/.gitkeep | 0 .../cloudformation.CreateStack_1.json | 17 + .../cloudformation.DeleteStack_1.json | 16 + .../cloudformation.DescribeStackEvents_1.json | 38 + .../cloudformation.DescribeStackEvents_2.json | 80 + .../cloudformation.DescribeStackEvents_3.json | 80 + .../cloudformation.DescribeStackEvents_4.json | 80 + .../cloudformation.DescribeStackEvents_5.json | 80 + .../cloudformation.DescribeStackEvents_6.json | 100 + .../cloudformation.DescribeStackEvents_7.json | 119 + .../cloudformation.DescribeStacks_1.json | 40 + .../cloudformation.DescribeStacks_2.json | 39 + .../cloudformation.DescribeStacks_3.json | 39 + .../cloudformation.DescribeStacks_4.json | 39 + .../cloudformation.DescribeStacks_5.json | 39 + .../cloudformation.DescribeStacks_6.json | 39 + .../cloudformation.DescribeStacks_7.json | 45 + .../cloudformation.CreateStack_1.json | 17 + .../cloudformation.DeleteStack_1.json | 16 + .../cloudformation.DescribeStackEvents_1.json | 39 + .../cloudformation.DescribeStackEvents_2.json | 83 + .../cloudformation.DescribeStackEvents_3.json | 83 + .../cloudformation.DescribeStackEvents_4.json | 83 + .../cloudformation.DescribeStackEvents_5.json | 83 + .../cloudformation.DescribeStackEvents_6.json | 104 + .../cloudformation.DescribeStackEvents_7.json | 124 + .../cloudformation.DescribeStacks_1.json | 40 + .../cloudformation.DescribeStacks_2.json | 39 + .../cloudformation.DescribeStacks_3.json | 39 + .../cloudformation.DescribeStacks_4.json | 39 + .../cloudformation.DescribeStacks_5.json | 39 + .../cloudformation.DescribeStacks_6.json | 39 + .../cloudformation.DescribeStacks_7.json | 45 + .../cloudformation.DescribeStackEvents_1.json | 22 + .../cloudformation.DescribeStackEvents_2.json | 22 + .../cloudformation.DescribeStacks_1.json | 22 + .../cloudformation.DescribeStacks_1.json | 22 + .../cloudformation.CreateStack_1.json | 22 + .../cloudformation.CreateStack_1.json | 17 + .../cloudformation.DescribeStackEvents_1.json | 38 + .../cloudformation.DescribeStackEvents_2.json | 101 + .../cloudformation.DescribeStackEvents_3.json | 121 + .../cloudformation.DescribeStackEvents_4.json | 180 + .../cloudformation.DescribeStackEvents_5.json | 180 + .../cloudformation.DescribeStacks_1.json | 42 + .../cloudformation.DescribeStacks_2.json | 41 + .../cloudformation.DescribeStacks_3.json | 52 + .../cloudformation.DescribeStacks_4.json | 51 + .../cloudformation.DescribeStacks_5.json | 50 + .../cloudformation.CreateStack_1.json | 17 + .../cloudformation.DeleteStack_1.json | 16 + .../cloudformation.DescribeStackEvents_1.json | 38 + .../cloudformation.DescribeStackEvents_2.json | 121 + .../cloudformation.DescribeStacks_1.json | 42 + .../cloudformation.DescribeStacks_2.json | 42 + .../cloudformation.CreateStack_1.json | 17 + .../cloudformation.DeleteStack_1.json | 16 + .../cloudformation.DescribeStackEvents_1.json | 38 + .../cloudformation.DescribeStackEvents_2.json | 121 + .../cloudformation.DescribeStackEvents_3.json | 180 + .../cloudformation.DescribeStacks_1.json | 42 + .../cloudformation.DescribeStacks_2.json | 52 + .../cloudformation.DescribeStacks_3.json | 51 + .../unit/plugins/modules/test_cloudformation.py | 227 + .../aws/tests/unit/plugins/modules/test_ec2_ami.py | 44 + .../aws/tests/unit/plugins/modules/test_ec2_key.py | 654 ++ .../plugins/modules/test_ec2_security_group.py | 83 + .../plugins/modules/test_ec2_vpc_dhcp_option.py | 71 + .../aws/tests/unit/plugins/modules/test_kms_key.py | 82 + .../unit/plugins/modules/test_lambda_layer.py | 493 ++ .../unit/plugins/modules/test_lambda_layer_info.py | 358 + .../tests/unit/plugins/modules/test_s3_object.py | 29 + .../amazon/aws/tests/unit/plugins/modules/utils.py | 50 + .../amazon/aws/tests/unit/requirements.txt | 5 + .../tests/unit/utils/amazon_placebo_fixtures.py | 213 + ansible_collections/amazon/aws/tox.ini | 27 + 857 files changed, 142367 insertions(+) create mode 100644 ansible_collections/amazon/aws/.coveragerc create mode 100644 ansible_collections/amazon/aws/.github/BOTMETA.yml create mode 100644 ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/bug_report.yml create mode 100644 ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/ci_report.yml create mode 100644 ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/config.yml create mode 100644 ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/documentation_report.yml create mode 100644 ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/feature_request.yml create mode 100644 ansible_collections/amazon/aws/.github/patchback.yml create mode 100644 ansible_collections/amazon/aws/.github/settings.yml create mode 100644 ansible_collections/amazon/aws/.github/workflows/docs-pr.yml create mode 100644 ansible_collections/amazon/aws/.github/workflows/docs-push.yml create mode 100644 ansible_collections/amazon/aws/.gitignore create mode 100644 ansible_collections/amazon/aws/CHANGELOG.rst create mode 100644 ansible_collections/amazon/aws/CONTRIBUTING.md create mode 100644 ansible_collections/amazon/aws/COPYING create mode 100644 ansible_collections/amazon/aws/FILES.json create mode 100644 ansible_collections/amazon/aws/MANIFEST.json create mode 100644 ansible_collections/amazon/aws/PSF-license.txt create mode 100644 ansible_collections/amazon/aws/README.md create mode 100644 ansible_collections/amazon/aws/bindep.txt create mode 100644 ansible_collections/amazon/aws/changelogs/changelog.yaml create mode 100644 ansible_collections/amazon/aws/changelogs/config.yaml create mode 100644 ansible_collections/amazon/aws/changelogs/fragments/.keep create mode 100644 ansible_collections/amazon/aws/docs/docsite/extra-docs.yml create mode 100644 ansible_collections/amazon/aws/docs/docsite/links.yml create mode 100644 ansible_collections/amazon/aws/docs/docsite/rst/CHANGELOG.rst create mode 100644 ansible_collections/amazon/aws/docs/docsite/rst/aws_ec2_guide.rst create mode 100644 ansible_collections/amazon/aws/docs/docsite/rst/dev_guidelines.rst create mode 100644 ansible_collections/amazon/aws/docs/docsite/rst/guide_aws.rst create mode 100644 ansible_collections/amazon/aws/meta/runtime.yml create mode 100644 ansible_collections/amazon/aws/plugins/action/s3_object.py create mode 100644 ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py create mode 100644 ansible_collections/amazon/aws/plugins/doc_fragments/aws.py create mode 100644 ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py create mode 100644 ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py create mode 100644 ansible_collections/amazon/aws/plugins/doc_fragments/boto3.py create mode 100644 ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py create mode 100644 ansible_collections/amazon/aws/plugins/doc_fragments/tags.py create mode 100644 ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py create mode 100644 ansible_collections/amazon/aws/plugins/inventory/aws_rds.py create mode 100644 ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py create mode 100644 ansible_collections/amazon/aws/plugins/lookup/aws_secret.py create mode 100644 ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py create mode 100644 ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/_version.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/acm.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/arn.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/batch.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/botocore.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/cloud.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/core.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/ec2.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/elbv2.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/iam.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/modules.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/policy.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/rds.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/retries.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/route53.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/s3.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/tagging.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/tower.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/transformation.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/urls.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/version.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/waf.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/waiters.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/autoscaling_group_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/aws_az_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/cloudformation.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/cloudtrail.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/cloudtrail_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/cloudwatchevent_rule.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_ami.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_eip.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_eni.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_instance.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_key.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_tag.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vol.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_service_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/elb_application_lb_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/iam_policy.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/iam_policy_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/iam_user.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/iam_user_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/kms_key.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/kms_key_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/lambda.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/lambda_alias.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/lambda_event.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/lambda_execute.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/lambda_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/lambda_layer.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/lambda_layer_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/lambda_policy.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/rds_cluster.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/rds_cluster_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/rds_cluster_snapshot.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/rds_instance.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/rds_instance_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/rds_instance_snapshot.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/rds_option_group.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/rds_option_group_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/rds_param_group.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/rds_snapshot_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/rds_subnet_group.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/route53.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/route53_health_check.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/route53_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/route53_zone.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/s3_bucket.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/s3_object.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/s3_object_info.py create mode 100644 ansible_collections/amazon/aws/requirements.txt create mode 100644 ansible_collections/amazon/aws/test-requirements.txt create mode 100644 ansible_collections/amazon/aws/tests/.gitignore create mode 100644 ansible_collections/amazon/aws/tests/config.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/constraints.txt create mode 100644 ansible_collections/amazon/aws/tests/integration/inventory create mode 100644 ansible_collections/amazon/aws/tests/integration/requirements.txt create mode 100644 ansible_collections/amazon/aws/tests/integration/requirements.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/inventory create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_cleanup.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_setup.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/instance_detach.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/tag_operations.yml create mode 100755 ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/runme.sh create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/tasks/main.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/inventory create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/meta/main.yml create mode 100755 ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/runme.sh create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudformation/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudformation/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/cf_template.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/update_policy.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudformation/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/meta/main.yml create mode 100755 ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/runme.sh create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/tagging.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-assume-policy.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-policy.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-policy.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/kms-policy.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/s3-policy.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/sns-policy.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_cleanup.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_setup.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/cloudwatchlogs_tests.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/create-delete-tags.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/vars/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/main.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_attachment.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_create_attached_multiple.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_deletion.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/files/assume-role-policy.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_key/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_key/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_key/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_key/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml create mode 100755 ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/runme.sh create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/templates/inventory.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/data_validation.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/diff_mode.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/egress_tests.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/group_info.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/icmp_verbs.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/ipv6_default_tests.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_account.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_nested_target.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/numeric_protos.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/rule_group_create.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/main.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/terminate_associated_instances.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/vars/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/templates/policy.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_public.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/complex_changes.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/describe_region.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/https_listeners.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/missing_params.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/schema_change.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/templates/s3_policy.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/vars/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_policy/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_policy/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access_with_id.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access_with_second_id.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_trust.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_policy/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/object.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_user/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_user/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_user/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_environment_script.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_include_or_exclude_filters.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_literal_string.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_use_contrib_script_keys.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml create mode 100755 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/runme.sh create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags.yml.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags_classic.yml.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostvars_prefix_suffix.yml.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_include_or_exclude_filters.yml.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_literal_string.yml.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_template.yml.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_use_contrib_script_keys.yml.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/test.aws_ec2.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_with_hostvars_prefix_suffix.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml create mode 100755 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/runme.sh create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_hostvars_prefix_suffix.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/test.aws_rds.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/inventory create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_grants.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_modify.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_multi_region.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_states.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_tagging.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy-no-key-rotation.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy.j2 create mode 100755 ansible_collections/amazon/aws/tests/integration/targets/kms_key/runme.sh create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda/files/mini_lambda.py create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda/files/minimal_trust_policy.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/tagging.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/mini_lambda.py create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/minimal_trust_policy.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_event/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_event/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/mini_lambda.py create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/minimal_trust_policy.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_event/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/mini_http_lambda.py create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/minimal_trust_policy.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/README.md create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/tasks/main.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/tasks/main.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/tasks/main.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/main.yml create mode 100755 ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/record.sh create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/recording.tar.gz create mode 100755 ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/runme.sh create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/inventory create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/amazonroot.pem create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/isrg-x1.pem create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml create mode 100755 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/runme.sh create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/setup.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/boto_config.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/session_credentials.yml.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/inventory create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml create mode 100755 ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/runme.sh create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/inventory create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create_sgs.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_modify.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_promote.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_restore.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_tag.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/vars/main.yml create mode 100755 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/runme.sh create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/vars/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/enhanced_monitoring_assume_policy.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/s3_integration_policy.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/s3_integration_trust_policy.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/vars/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/test_tagging_gp3.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/params.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/tests.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/route53/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/route53/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/route53/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/route53/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/route53/vars/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/route53_zone/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/route53_zone/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/route53_zone/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/inventory create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_bucket_key.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/object_lock.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/ownership_controls.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json create mode 100755 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/runme.sh create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_object/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_object/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_object/files/hello.txt create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_object/files/test.png create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_object/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object_acl_disabled_bucket.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/delete_bucket.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_object/templates/policy.json.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_object/templates/put-template.txt.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/handlers/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/cleanup.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/handlers/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/cleanup.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/sanity/ignore-2.10.txt create mode 100644 ansible_collections/amazon/aws/tests/sanity/ignore-2.11.txt create mode 100644 ansible_collections/amazon/aws/tests/sanity/ignore-2.12.txt create mode 100644 ansible_collections/amazon/aws/tests/sanity/ignore-2.13.txt create mode 100644 ansible_collections/amazon/aws/tests/sanity/ignore-2.14.txt create mode 100644 ansible_collections/amazon/aws/tests/sanity/ignore-2.15.txt create mode 100644 ansible_collections/amazon/aws/tests/sanity/ignore-2.9.txt create mode 100644 ansible_collections/amazon/aws/tests/unit/compat/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/compat/builtins.py create mode 100644 ansible_collections/amazon/aws/tests/unit/compat/mock.py create mode 100644 ansible_collections/amazon/aws/tests/unit/compat/unittest.py create mode 100644 ansible_collections/amazon/aws/tests/unit/constraints.txt create mode 100644 ansible_collections/amazon/aws/tests/unit/mock/loader.py create mode 100644 ansible_collections/amazon/aws/tests/unit/mock/path.py create mode 100644 ansible_collections/amazon/aws/tests/unit/mock/procenv.py create mode 100644 ansible_collections/amazon/aws/tests/unit/mock/vault_helper.py create mode 100644 ansible_collections/amazon/aws/tests/unit/mock/yaml_helper.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_is_outpost_arn.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_parse_aws_arn.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_code.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_message.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_normalize_boto3_result.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_backoff_iterator.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_cloud_retry.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_decorator_generation.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_retries_found.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_retry_func.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/conftest.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/elbv2/test_prune.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_fail_json_aws.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_minimal_versions.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_require_at_least.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_compare_policies.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/retries/test_awsretry.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/test_iam.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/test_rds.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/test_s3.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/test_tagging.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/test_tower.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_ansible_dict_to_boto3_filter_list.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_map_complex_type.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_scrub_none_parameters.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/inventory/test_aws_ec2.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/conftest.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/test_build_run_instance_spec.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/a.pem create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/b.pem create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.0.cert create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.1.cert create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.2.cert create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.3.cert create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.4.cert create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-4.cert create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/simple-chain-a.cert create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/simple-chain-b.cert create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/thezip.zip create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/.gitkeep create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.CreateStack_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DeleteStack_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_2.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_3.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_4.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_5.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_6.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_7.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_2.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_3.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_4.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_5.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_6.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_7.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.CreateStack_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DeleteStack_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_2.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_3.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_4.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_5.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_6.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_7.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_2.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_3.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_4.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_5.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_6.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_7.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_2.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStacks_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/get_nonexistent_stack/cloudformation.DescribeStacks_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/invalid_template_json/cloudformation.CreateStack_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.CreateStack_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_2.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_3.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_4.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_5.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_2.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_3.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_4.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_5.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.CreateStack_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DeleteStack_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_2.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_2.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.CreateStack_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DeleteStack_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_2.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_3.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_1.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_2.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_3.json create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/test_cloudformation.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_ami.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_key.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_security_group.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_vpc_dhcp_option.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/test_kms_key.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_layer.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_layer_info.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/test_s3_object.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/utils.py create mode 100644 ansible_collections/amazon/aws/tests/unit/requirements.txt create mode 100644 ansible_collections/amazon/aws/tests/unit/utils/amazon_placebo_fixtures.py create mode 100644 ansible_collections/amazon/aws/tox.ini (limited to 'ansible_collections/amazon/aws') diff --git a/ansible_collections/amazon/aws/.coveragerc b/ansible_collections/amazon/aws/.coveragerc new file mode 100644 index 000000000..ff6415d7a --- /dev/null +++ b/ansible_collections/amazon/aws/.coveragerc @@ -0,0 +1,7 @@ +[report] +exclude_lines = + # Have to re-enable the standard pragma + pragma: no cover + + # Don't complain if tests don't hit defensive assertion code: + raise NotImplementedError diff --git a/ansible_collections/amazon/aws/.github/BOTMETA.yml b/ansible_collections/amazon/aws/.github/BOTMETA.yml new file mode 100644 index 000000000..1efbcc4c4 --- /dev/null +++ b/ansible_collections/amazon/aws/.github/BOTMETA.yml @@ -0,0 +1,110 @@ +automerge: false +files: + maintainers: $team_aws + docs/: + labels: docs + $action/: + labels: action + $action/aws_s3.py: + $callback/: + labels: callback + $doc_fragments/: + labels: doc_fragments + $doc_fragments/aws.py: + $doc_fragments/aws_credentials.py: + $doc_fragments/aws_region.py: + $doc_fragments/ec2.py: + $inventory/: + labels: inventory + $inventory/aws_ec2.py: + labels: inventory + $inventory/aws_rds.py: + labels: inventory + $lookup/: + labels: lookup + $module_utils/: + labels: module_utils + $module_utils/ec2.py: + labels: ec2 + $modules/: + authors: wimnat + maintainers: $team_aws + ignore: erydo nadirollo seiffert tedder wimnat + labels: modules + $modules/_aws_az_facts.py: + authors: Sodki + $modules/_aws_caller_facts.py: + authors: orthanc sdubrul + $modules/_cloudformation_facts.py: + authors: jmenga waffie1 + $modules/_ec2_ami_facts.py: + authors: prasadkatti + $modules/_ec2_group_facts.py: + authors: Sodki + $modules/_ec2_vpc_dhcp_option_facts.py: + authors: naslanidis + $modules/aws_az_info.py: + authors: Sodki + $modules/aws_caller_info.py: + authors: orthanc sdubrul + $modules/aws_s3.py: + authors: lwade s-hertel + $modules/cloudformation.py: + authors: jsmartin + $modules/cloudformation_info.py: + authors: jmenga waffie1 + $modules/ec2.py: + authors: lwade skvidal tgerla + labels: ec2 + ignore: erydo nadirollo seiffert skvidal + $modules/ec2_ami.py: + authors: Constantin07 gunzy83 scicoin-project wilvk + $modules/ec2_ami_info.py: + authors: prasadkatti + $modules/ec2_elb_lb.py: + authors: jsdalton + $modules/ec2_group.py: + authors: adq + $modules/ec2_group_info.py: + authors: Sodki + $modules/ec2_key.py: + authors: prasadkatti zbal + $modules/ec2_metadata_facts.py: + authors: roadmapper silviud + $modules/ec2_snapshot.py: + authors: willthames + $modules/ec2_tag.py: + authors: flowerysong lwade + $modules/ec2_tag_info.py: + authors: tremble + maintainers: jillr s-hertel + $modules/ec2_vol.py: + authors: lwade + $modules/ec2_vpc_dhcp_option.py: + authors: joelthompson + $modules/ec2_vpc_dhcp_option_info.py: + authors: naslanidis + $modules/ec2_vpc_net.py: + authors: defionscode s-hertel + ignore: defionscode joshsouza ryansb simplesteph + $modules/ec2_vpc_net_info.py: + maintainers: whiter + $modules/ec2_vpc_subnet.py: + authors: brandond erydo + scripts/inventory/: + labels: inventory + tests/: + labels: tests + tests/integration/: + labels: integrations + tests/units/: + labels: units +macros: + action: plugins/action + callback: plugins/callback + doc_fragments: plugins/doc_fragments + inventory: plugins/inventory + lookup: plugins/lookup + module_utils: plugins/module_utils + modules: plugins/modules + team_aws: jillr s-hertel tremble diff --git a/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/bug_report.yml b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 000000000..c818de3e0 --- /dev/null +++ b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,161 @@ +--- +name: Bug report +description: Create a report to help us improve + +body: +- type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Where possible also test if the latest release and main branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* + + [issue search]: https://github.com/ansible-collections/amazon.aws/search?q=is%3Aissue&type=issues + +- type: textarea + attributes: + label: Summary + description: | + Explain the problem briefly below. + placeholder: >- + When I try to do X with the collection from the main branch on GitHub, Y + breaks in a way Z under the env E. Here are all the details I know + about this problem... + validations: + required: true + +- type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Bug Report + validations: + required: true + +- type: textarea + attributes: + # For smaller collections we could use a multi-select and hardcode the list + # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins + # Select from list, filter as you type (`mysql` would only show the 3 mysql components) + # OR freeform - doesn't seem to be supported in adaptivecards + label: Component Name + description: >- + Write the short name of the module or plugin below, + *use your best guess if unsure*. + placeholder: ec2_instance, ec2_security_group + validations: + required: true + +- type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` between + tripple backticks. + value: | + ```console (paste below) + $ ansible --version + + ``` + validations: + required: true + +- type: textarea + attributes: + label: Collection Versions + description: >- + Paste verbatim output from `ansible-galaxy collection list` between + tripple backticks. + value: | + ```console (paste below) + $ ansible-galaxy collection list + ``` + validations: + required: true + +- type: textarea + attributes: + label: AWS SDK versions + description: >- + The AWS modules depend heavily on the Amazon AWS SDKs which are regularly updated. + Paste verbatim output from `pip show boto boto3 botocore` between quotes + value: | + ```console (paste below) + $ pip show boto boto3 botocore + ``` + validations: + required: true + +- type: textarea + attributes: + label: Configuration + description: >- + If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. + This can be a piece of YAML from, e.g., an automation, script, scene or configuration. + + Paste verbatim output from `ansible-config dump --only-changed` between quotes + value: | + ```console (paste below) + $ ansible-config dump --only-changed + + ``` + +- type: textarea + attributes: + label: OS / Environment + description: >- + Provide all relevant information below, e.g. target OS versions, + network device firmware, etc. + placeholder: RHEL 8, CentOS Stream etc. + validations: + required: false + +- type: textarea + attributes: + label: Steps to Reproduce + description: | + Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also paste any playbooks, configs and commands you used. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + validations: + required: true + +- type: textarea + attributes: + label: Expected Results + description: >- + Describe what you expected to happen when running the steps above. + placeholder: >- + I expected X to happen because I assumed Y. + that it did not. + validations: + required: true + +- type: textarea + attributes: + label: Actual Results + description: | + Describe what actually happened. If possible run with extra verbosity (`-vvvv`). + + Paste verbatim command output between quotes. + value: | + ```console (paste below) + + ``` + +- type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true +... diff --git a/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/ci_report.yml b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/ci_report.yml new file mode 100644 index 000000000..aceb2ec89 --- /dev/null +++ b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/ci_report.yml @@ -0,0 +1,76 @@ +--- +name: CI Bug Report +description: Create a report to help us improve our CI + +body: +- type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + *Complete **all** sections as described, this form is processed automatically.* + + [issue search]: https://github.com/ansible-collections/amazon.aws/search?q=is%3Aissue&type=issues + +- type: textarea + attributes: + label: Summary + description: | + Describe the new issue briefly below. + placeholder: >- + I opened a Pull Request and CI failed to run. I believe this is due to a problem with the CI rather than my code. + validations: + required: true + +- type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - CI Bug Report + validations: + required: true + +- type: textarea + attributes: + label: CI Jobs + description: >- + Please provide a link to the failed CI tests. + placeholder: https://dashboard.zuul.ansible.com/t/ansible/buildset/be956faa49d84e43bc860d0cd3dc8503 + validations: + required: false + +- type: textarea + attributes: + label: Pull Request + description: >- + Please provide a link to the Pull Request where the tests are failing + placeholder: https://github.com/ansible-collections/amazon.aws/runs/3040421733 + validations: + required: false + +- type: textarea + attributes: + label: Additional Information + description: | + Please provide as much information as possible to help us understand the issue being reported. + Where possible, please include the specific errors that you're seeing. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + validations: + required: false + +- type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true +... diff --git a/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/config.yml b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..f90bd1ad8 --- /dev/null +++ b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,27 @@ +--- +# Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser +blank_issues_enabled: false # default: true +contact_links: +- name: Security bug report + url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: | + Please learn how to report security vulnerabilities here. + + For all security related bugs, email security@ansible.com + instead of using this issue tracker and you will receive + a prompt response. + + For more information, see + https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html +- name: Ansible Code of Conduct + url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: Be nice to other members of the community. +- name: Talks to the community + url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information + about: Please ask and answer usage questions here +- name: Working groups + url: https://github.com/ansible/community/wiki + about: Interested in improving a specific area? Become a part of a working group! +- name: For Enterprise + url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: Red Hat offers support for the Ansible Automation Platform diff --git a/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/documentation_report.yml b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/documentation_report.yml new file mode 100644 index 000000000..b88a81614 --- /dev/null +++ b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/documentation_report.yml @@ -0,0 +1,130 @@ +--- +name: Documentation Report +description: Ask us about docs +# NOTE: issue body is enabled to allow screenshots + +body: +- type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Where possible also test if the latest release and main branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* + + [issue search]: https://github.com/ansible-collections/amazon.aws/search?q=is%3Aissue&type=issues + +- type: textarea + attributes: + label: Summary + description: | + Explain the problem briefly below, add suggestions to wording or structure. + + **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page? + placeholder: >- + I was reading the Collection documentation of version X and I'm having + problems understanding Y. It would be very helpful if that got + rephrased as Z. + validations: + required: true + +- type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Documentation Report + validations: + required: true + +- type: textarea + attributes: + # For smaller collections we could use a multi-select and hardcode the list + # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins + # Select from list, filter as you type (`mysql` would only show the 3 mysql components) + # OR freeform - doesn't seem to be supported in adaptivecards + label: Component Name + description: >- + Write the short name of the rst file, module, plugin or task below, + *use your best guess if unsure*. + placeholder: ec2_instance, ec2_security_group + validations: + required: true + +- type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` between + tripple backticks. + value: | + ```console (paste below) + $ ansible --version + + ``` + validations: + required: false + +- type: textarea + attributes: + label: Collection Versions + description: >- + Paste verbatim output from `ansible-galaxy collection list` between + tripple backticks. + value: | + ```console (paste below) + $ ansible-galaxy collection list + ``` + validations: + required: false + +- type: textarea + attributes: + label: Configuration + description: >- + If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. + This can be a piece of YAML from, e.g., an automation, script, scene or configuration. + + Paste verbatim output from `ansible-config dump --only-changed` between quotes + value: | + ```console (paste below) + $ ansible-config dump --only-changed + + ``` + validations: + required: false + +- type: textarea + attributes: + label: OS / Environment + description: >- + Provide all relevant information below, e.g. OS version, + browser, etc. + placeholder: RHEL 8, Firefox etc. + validations: + required: false + +- type: textarea + attributes: + label: Additional Information + description: | + Describe how this improves the documentation, e.g. before/after situation or screenshots. + + **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them. + + **HINT:** You can paste https://gist.github.com links for larger files. + placeholder: >- + When the improvement is applied, it makes it more straightforward + to understand X. + validations: + required: false + +- type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true +... diff --git a/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/feature_request.yml b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 000000000..4178d067e --- /dev/null +++ b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,74 @@ +--- +name: Feature request +description: Suggest an idea for this project + +body: +- type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Where possible also test if the latest release and main branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* + + [issue search]: https://github.com/ansible-collections/amazon.aws/search?q=is%3Aissue&type=issues + +- type: textarea + attributes: + label: Summary + description: | + Describe the new feature/improvement briefly below. + placeholder: >- + I am trying to do X with the collection from the main branch on GitHub and + I think that implementing a feature Y would be very helpful for me and + every other user of amazon.aws because of Z. + validations: + required: true + +- type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Feature Idea + validations: + required: true + +- type: textarea + attributes: + # For smaller collections we could use a multi-select and hardcode the list + # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins + # Select from list, filter as you type (`mysql` would only show the 3 mysql components) + # OR freeform - doesn't seem to be supported in adaptivecards + label: Component Name + description: >- + Write the short name of the module or plugin below, + *use your best guess if unsure*. + placeholder: ec2_instance, ec2_security_group + validations: + required: true + +- type: textarea + attributes: + label: Additional Information + description: | + Describe how the feature would be used, why it is needed and what it would solve. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + validations: + required: false + +- type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true +... diff --git a/ansible_collections/amazon/aws/.github/patchback.yml b/ansible_collections/amazon/aws/.github/patchback.yml new file mode 100644 index 000000000..113fc5294 --- /dev/null +++ b/ansible_collections/amazon/aws/.github/patchback.yml @@ -0,0 +1,4 @@ +--- +backport_branch_prefix: patchback/backports/ +backport_label_prefix: backport- +target_branch_prefix: stable- diff --git a/ansible_collections/amazon/aws/.github/settings.yml b/ansible_collections/amazon/aws/.github/settings.yml new file mode 100644 index 000000000..b27b575f0 --- /dev/null +++ b/ansible_collections/amazon/aws/.github/settings.yml @@ -0,0 +1,5 @@ +# DO NOT MODIFY + +# Settings: https://probot.github.io/apps/settings/ +# Pull settings from https://github.com/ansible-collections/.github/blob/master/.github/settings.yml +_extends: ".github" diff --git a/ansible_collections/amazon/aws/.github/workflows/docs-pr.yml b/ansible_collections/amazon/aws/.github/workflows/docs-pr.yml new file mode 100644 index 000000000..10cb50de7 --- /dev/null +++ b/ansible_collections/amazon/aws/.github/workflows/docs-pr.yml @@ -0,0 +1,73 @@ +name: Collection Docs +concurrency: + group: docs-${{ github.head_ref }} + cancel-in-progress: true +on: + pull_request_target: + types: [opened, synchronize, reopened, closed] + +env: + GHP_BASE_URL: https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }} + +jobs: + validate-docs: + permissions: + contents: read + name: Validate Ansible Docs + uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-pr.yml@main + with: + init-lenient: false + init-fail-on-error: true + intersphinx-links: | + community_aws:https://ansible-collections.github.io/community.aws/branch/main/ + ansible_devel:https://docs.ansible.com/ansible-core/devel/ + + + build-docs: + permissions: + contents: read + name: Build Ansible Docs + uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-pr.yml@main + with: + init-lenient: true + init-fail-on-error: false + intersphinx-links: | + community_aws:https://ansible-collections.github.io/community.aws/branch/main/ + ansible_devel:https://docs.ansible.com/ansible-core/devel/ + + comment: + permissions: + pull-requests: write + runs-on: ubuntu-latest + needs: [build-docs] + name: PR comments + steps: + - name: PR comment + uses: ansible-community/github-docs-build/actions/ansible-docs-build-comment@main + with: + body-includes: '## Docs Build' + reactions: heart + action: ${{ needs.build-docs.outputs.changed != 'true' && 'remove' || '' }} + on-closed-action: remove + on-merged-body: | + ## Docs Build 📝 + + Thank you for contribution!✨ + + This PR has been merged and your docs changes will be incorporated when they are next published. + body: | + ## Docs Build 📝 + + Thank you for contribution!✨ + + The docsite for **this PR** is available for download as an artifact from this run: + ${{ needs.build-docs.outputs.artifact-url }} + + You can compare to the docs for the `main` branch here: + ${{ env.GHP_BASE_URL }}/branch/main + + File changes: + + ${{ needs.build-docs.outputs.diff-files-rendered }} + + ${{ needs.build-docs.outputs.diff-rendered }} diff --git a/ansible_collections/amazon/aws/.github/workflows/docs-push.yml b/ansible_collections/amazon/aws/.github/workflows/docs-push.yml new file mode 100644 index 000000000..0acd93200 --- /dev/null +++ b/ansible_collections/amazon/aws/.github/workflows/docs-push.yml @@ -0,0 +1,39 @@ +name: Collection Docs +concurrency: + group: docs-push-${{ github.sha }} + cancel-in-progress: true +on: + push: + branches: + - main + - stable-* + tags: + - '*' + schedule: + - cron: '0 12 * * *' + +jobs: + build-docs: + permissions: + contents: read + name: Build Ansible Docs + uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-push.yml@main + with: + init-lenient: false + init-fail-on-error: true + intersphinx-links: | + community_aws:https://ansible-collections.github.io/community.aws/branch/main/ + ansible_devel:https://docs.ansible.com/ansible-core/devel/ + + publish-docs-gh-pages: + # use to prevent running on forks + if: github.repository == 'ansible-collections/amazon.aws' + permissions: + contents: write + needs: [build-docs] + name: Publish Ansible Docs + uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-publish-gh-pages.yml@main + with: + artifact-name: ${{ needs.build-docs.outputs.artifact-name }} + secrets: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/ansible_collections/amazon/aws/.gitignore b/ansible_collections/amazon/aws/.gitignore new file mode 100644 index 000000000..6058f0fa3 --- /dev/null +++ b/ansible_collections/amazon/aws/.gitignore @@ -0,0 +1,390 @@ + +# Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv +# Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv + +### dotenv ### +.env + +### Emacs ### +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* + +# Org-mode +.org-id-locations +*_archive + +# flymake-mode +*_flymake.* + +# eshell files +/eshell/history +/eshell/lastdir + +# elpa packages +/elpa/ + +# reftex files +*.rel + +# AUCTeX auto folder +/auto/ + +# cask packages +.cask/ +dist/ + +# Flycheck +flycheck_*.el + +# server auth directory +/server/ + +# projectiles files +.projectile + +# directory configuration +.dir-locals.el + +# network security +/network-security.data + + +### Git ### +# Created by git for backups. To disable backups in Git: +# $ git config --global mergetool.keepBackup false +*.orig + +# Created by git when using merge tools for conflicts +*.BACKUP.* +*.BASE.* +*.LOCAL.* +*.REMOTE.* +*_BACKUP_*.txt +*_BASE_*.txt +*_LOCAL_*.txt +*_REMOTE_*.txt + +#!! ERROR: jupyternotebook is undefined. Use list command to see defined gitignore types !!# + +### Linux ### + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + +### PyCharm+all ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### PyCharm+all Patch ### +# Ignores the whole .idea folder and all .iml files +# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 + +.idea/ + +# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 + +*.iml +modules.xml +.idea/misc.xml +*.ipr + +# Sonarlint plugin +.idea/sonarlint + +### pydev ### +.pydevproject + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# Mr Developer +.mr.developer.cfg +.project + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +### Vim ### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +### WebStorm ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff + +# Generated files + +# Sensitive or high-churn files + +# Gradle + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake + +# Mongo Explorer plugin + +# File-based project format + +# IntelliJ + +# mpeltonen/sbt-idea plugin + +# JIRA plugin + +# Cursive Clojure plugin + +# Crashlytics plugin (for Android Studio and IntelliJ) + +# Editor-based Rest Client + +# Android studio 3.1+ serialized cache file + +### WebStorm Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin +.idea/**/sonarlint/ + +# SonarQube Plugin +.idea/**/sonarIssues.xml + +# Markdown Navigator plugin +.idea/**/markdown-navigator.xml +.idea/**/markdown-navigator/ + +### Windows ### +# Windows thumbnail cache files +Thumbs.db +Thumbs.db:encryptable +ehthumbs.db +ehthumbs_vista.db + +# Dump file +*.stackdump + +# Folder config file +[Dd]esktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msix +*.msm +*.msp + +# Windows shortcuts +*.lnk + +# Antsibull-changelog +changelogs/.plugin-cache.yaml + +# End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv diff --git a/ansible_collections/amazon/aws/CHANGELOG.rst b/ansible_collections/amazon/aws/CHANGELOG.rst new file mode 100644 index 000000000..6e07527c1 --- /dev/null +++ b/ansible_collections/amazon/aws/CHANGELOG.rst @@ -0,0 +1,1013 @@ +======================== +amazon.aws Release Notes +======================== + +.. contents:: Topics + + +v5.5.1 +====== + +Release Summary +--------------- + +This release brings few bugfixes. + + +Bugfixes +-------- + +- autoscaling_group - fix ValidationError when describing an autoscaling group that has more than 20 target groups attached to it by breaking the request into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). +- autoscaling_group_info - fix ValidationError when describing an autoscaling group that has more than 20 target groups attached to it by breaking the request into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). +- aws_account_attribute - raise correct ``AnsibleLookupError`` rather than ``AnsibleError`` (https://github.com/ansible-collections/amazon.aws/issues/1528). +- aws_secret - raise correct ``AnsibleLookupError`` rather than ``AnsibleError`` (https://github.com/ansible-collections/amazon.aws/issues/1528). +- aws_service_ip_ranges raise correct ``AnsibleLookupError`` rather than ``AnsibleError`` (https://github.com/ansible-collections/amazon.aws/issues/1528). +- aws_ssm - raise correct ``AnsibleLookupError`` rather than ``AnsibleError`` (https://github.com/ansible-collections/amazon.aws/issues/1528). +- ec2_instance - fix check_mode issue when adding network interfaces (https://github.com/ansible-collections/amazon.aws/issues/1403). +- elb_application_lb - fix missing attributes on creation of ALB. The ``create_or_update_alb()`` was including ALB-specific attributes when updating an existing ALB but not when creating a new ALB (https://github.com/ansible-collections/amazon.aws/issues/1510). + +v5.5.0 +====== + +Release Summary +--------------- + +This release contains a number of bugfixes, new features and new modules. This is the last planned minor release prior to the release of version 6.0.0. + + +Minor Changes +------------- + +- Add connectivity_type to ec2_vpc_nat_gateway module (https://github.com/ansible-collections/amazon.aws/pull/1267). +- cloudwatch - Add metrics and extended_statistic keys to cloudwatch module (https://github.com/ansible-collections/amazon.aws/pull/1133). +- ec2_ami - add support for BootMode, TpmSupport, UefiData params (https://github.com/ansible-collections/amazon.aws/pull/1037). +- ec2_metadata_facts - added support to query instance tags in metadata (https://github.com/ansible-collections/amazon.aws/pull/1186). +- kms_key - Add multi_region option to create_key (https://github.com/ansible-collections/amazon.aws/pull/1290). +- lambda - add support for function layers when creating or updating lambda function (https://github.com/ansible-collections/amazon.aws/pull/1118). +- lambda_event - Added support to set FunctionResponseTypes when creating lambda event source mappings (https://github.com/ansible-collections/amazon.aws/pull/1209). +- module_utils/elbv2 - removed compatibility code for ``botocore < 1.10.30`` (https://github.com/ansible-collections/amazon.aws/pull/1477). +- rds_cluster - New ``engine_mode`` parameter (https://github.com/ansible-collections/amazon.aws/pull/941). +- rds_cluster - add new options (e.g., ``db_cluster_instance_class``, ``allocated_storage``, ``storage_type``, ``iops``) (https://github.com/ansible-collections/amazon.aws/pull/1191). +- rds_cluster - update list of supported engines with ``mysql`` and ``postgres`` (https://github.com/ansible-collections/amazon.aws/pull/1191). +- s3_bucket - ensure ``public_access`` is configured before updating policies (https://github.com/ansible-collections/amazon.aws/pull/1511). + +Bugfixes +-------- + +- cloudwatch_metric_alarm - Don't consider ``StateTransitionedTimestamp`` in change detection. (https://github.com/ansible-collections/amazon.aws/pull/1440). +- ec2_instance - Pick up ``app_callback -> set_password`` rather than ``app_callback -> set_passwd`` (https://github.com/ansible-collections/amazon.aws/issues/1449). +- lambda_info - Do not convert environment variables to snake_case when querying lambda config. (https://github.com/ansible-collections/amazon.aws/pull/1457). +- rds_instance - fix type of ``promotion_tier`` as passed to the APIs (https://github.com/ansible-collections/amazon.aws/pull/1475). + +New Modules +----------- + +- lambda_layer - Creates an AWS Lambda layer or deletes an AWS Lambda layer version +- lambda_layer_info - List lambda layer or lambda layer versions + +v5.4.0 +====== + +Release Summary +--------------- + +This minor release brings bugfixes and minor new features. + +Minor Changes +------------- + +- ec2_spot_instance - add parameter ``terminate_instances`` to support terminate instances associated with spot requests. (https://github.com/ansible-collections/amazon.aws/pull/1402). +- route53_health_check - added support for enabling Latency graphs (MeasureLatency) during creation of a Route53 Health Check. (https://github.com/ansible-collections/amazon.aws/pull/1201). + +Bugfixes +-------- + +- ec2_metadata_facts - fix ``AttributeError`` when running the ec2_metadata_facts module on Python 2 managed nodes (https://github.com/ansible-collections/amazon.aws/issues/1358). +- ec2_vol - handle ec2_vol.tags when the associated instance already exists (https://github.com/ansible-collections/amazon.aws/pull/1071). +- rds_instance - Fixed ``TypeError`` when tagging RDS DB with storage type ``gp3`` (https://github.com/ansible-collections/amazon.aws/pull/1437). +- route53_info - Add new return key ``health_check_observations`` for health check operations (https://github.com/ansible-collections/amazon.aws/pull/1419). +- route53_info - Fixed ``Key Error`` when getting status or failure_reason of a health check (https://github.com/ansible-collections/amazon.aws/pull/1419). + +v5.3.0 +====== + +Release Summary +--------------- + +This release brings some minor changes, bugfixes, and deprecated features. + +Minor Changes +------------- + +- ec2_instance - more consistently return ``instances`` information (https://github.com/ansible-collections/amazon.aws/pull/964). +- ec2_instance - remove unused import (https://github.com/ansible-collections/amazon.aws/pull/1350). +- ec2_key - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1288). +- ec2_vpc_nat_gateway - ensure allocation_id is defined before potential access (https://github.com/ansible-collections/amazon.aws/pull/1350). +- route53_zone - added support for associating multiple VPCs to route53 hosted zones (https://github.com/ansible-collections/amazon.aws/pull/1300). +- s3_bucket - add option to support creation of buckets with object lock enabled (https://github.com/ansible-collections/amazon.aws/pull/1372). + +Deprecated Features +------------------- + +- support for passing both profile and security tokens through a mix of environment variables and parameters has been deprecated and support will be removed in release 6.0.0. After release 6.0.0 it will only be possible to pass either a profile or security tokens, regardless of mechanism used to pass them. To explicitly block a parameter coming from an environment variable pass an empty string as the parameter value. Support for passing profile and security tokens together was originally deprecated in release 1.2.0, however only partially implemented in release 5.0.0 (https://github.com/ansible-collections/amazon.aws/pull/1355). + +Bugfixes +-------- + +- cloudtrail - support to disabling encryption using ``kms_key_id`` (https://github.com/ansible-collections/amazon.aws/pull/1384). +- ec2_key - fix issue when trying to update existing key pair with the same key material (https://github.com/ansible-collections/amazon.aws/pull/1383). +- module_utils/elbv2 - fix change detection by adding default values for ``Scope`` and ``SessionTimeout`` parameters in ``authenticate-oidc`` rules (https://github.com/ansible-collections/amazon.aws/pull/1270). +- module_utils/elbv2 - respect ``UseExistingClientSecret`` parameter in ``authenticate-oidc`` rules (https://github.com/ansible-collections/amazon.aws/pull/1270). +- revert breaking change introduced in 5.2.0 when passing credentials through a mix of environment variables and parameters (https://github.com/ansible-collections/amazon.aws/issues/1353). +- s3_bucket - empty bucket policy was throwing a JSONDecodeError - deal with it gracefully instead (https://github.com/ansible-collections/amazon.aws/pull/1368) + +v5.2.0 +====== + +Release Summary +--------------- + +A minor release containing bugfixes for the ``ec2_eni_info`` module and the ``aws_rds`` inventory plugin, as well as improvements to the ``rds_instance`` module. + + +Minor Changes +------------- + +- amazon.aws collection - refacterization of code to use argument specification ``fallback`` when falling back to environment variables for security credentials and AWS connection details (https://github.com/ansible-collections/amazon.aws/pull/1174). +- rds_instance - Split up the integration test-suite in a series of smaller tests (https://github.com/ansible-collections/amazon.aws/pull/1185). +- rds_instance - add support for gp3 storage type (https://github.com/ansible-collections/amazon.aws/pull/1266). + +Bugfixes +-------- + +- aws_rds - fixes bug in RDS inventory plugin where config file was ignored (https://github.com/ansible-collections/amazon.aws/issues/1304). +- lambda - fix flaky integration test which assumes there are no other lambdas in the account (https://github.com/ansible-collections/amazon.aws/issues/1277) + +v5.1.0 +====== + +Release Summary +--------------- + +This release brings some minor changes, bugfixes, security fixes and deprecated features. + +Minor Changes +------------- + +- amazon.aws collection - The ``aws_access_key`` parameter has been renamed to ``access_key``, ``access_key`` was previously an alias for this parameter and ``aws_access_key`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation. (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``aws_secret_key`` parameter has been renamed to ``secret_key``, ``secret_key`` was previously an alias for this parameter and ``aws_secret_key`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation. (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``security_token`` parameter has been renamed to ``session_token``, ``security_token`` was previously an alias for this parameter and ``security_token`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation. (https://github.com/ansible-collections/amazon.aws/pull/1172). +- aws_account_attribute lookup plugin - use ``missing_required_lib`` for more consistent error message when boto3/botocore is not available (https://github.com/ansible-collections/amazon.aws/pull/1152). +- aws_ec2 inventory - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- aws_ec2 inventory plugin - use ``missing_required_lib`` for more consistent error message when boto3/botocore is not available (https://github.com/ansible-collections/amazon.aws/pull/1152). +- aws_rds inventory plugin - use ``missing_required_lib`` for more consistent error message when boto3/botocore is not available (https://github.com/ansible-collections/amazon.aws/pull/1152). +- aws_secret lookup plugin - use ``missing_required_lib`` for more consistent error message when boto3/botocore is not available (https://github.com/ansible-collections/amazon.aws/pull/1152). +- aws_ssm lookup plugin - use ``missing_required_lib`` for more consistent error message when boto3/botocore is not available (https://github.com/ansible-collections/amazon.aws/pull/1152). +- ec2_instance - minor fix for launching an instance in specified AZ when ``vpc_subnet_id`` is not provided (https://github.com/ansible-collections/amazon.aws/pull/1150). +- ec2_instance - refacter ``tower_callback`` code to handle parameter validation as part of the argument specification (https://github.com/ansible-collections/amazon.aws/pull/1199). +- ec2_instance - the ``instance_role`` parameter has been renamed to ``iam_instance_profile`` to better reflect what it is, ``instance_role`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/1151). +- ec2_instance - the ``tower_callback`` parameter has been renamed to ``aap_callback``, ``tower_callback`` remains as an alias. This change should have no observable effect for users outside the module documentation (https://github.com/ansible-collections/amazon.aws/pull/1199). +- s3_object_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + +Deprecated Features +------------------- + +- amazon.aws collection - Support for the ``EC2_ACCESS_KEY`` environment variable has been deprecated and will be removed in a release after 2024-12-01. Please use the ``access_key`` parameter or ``AWS_ACCESS_KEY_ID`` environment variable instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - Support for the ``EC2_REGION`` environment variable has been deprecated and will be removed in a release after 2024-12-01. Please use the ``region`` parameter or ``AWS_REGION`` environment variable instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - Support for the ``EC2_SECRET_KEY`` environment variable has been deprecated and will be removed in a release after 2024-12-01. Please use the ``secret_key`` parameter or ``AWS_SECRET_ACCESS_KEY`` environment variable instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - Support for the ``EC2_SECURITY_TOKEN`` environment variable has been deprecated and will be removed in a release after 2024-12-01. Please use the ``session_token`` parameter or ``AWS_SESSION_TOKEN`` environment variable instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - Support for the ``EC2_URL`` and ``S3_URL`` environment variables has been deprecated and will be removed in a release after 2024-12-01. Please use the ``endpoint_url`` parameter or ``AWS_ENDPOINT_URL`` environment variable instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``access_token`` alias for the ``session_token`` parameter has been deprecated and will be removed in a release after 2024-12-01. Please use the ``session_token`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``access_token`` alias for the ``session_token`` parameter has been deprecated and will be removed in a release after 2024-12-01. Please use the ``session_token`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``aws_security_token`` alias for the ``session_token`` parameter has been deprecated and will be removed in a release after 2024-12-01. Please use the ``session_token`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``ec2_access_key`` alias for the ``access_key`` parameter has been deprecated and will be removed in a release after 2024-12-01. Please use the ``access_key`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``ec2_region`` alias for the ``region`` parameter has been deprecated and will be removed in a release after 2024-12-01. Please use the ``region`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``ec2_secret_key`` alias for the ``secret_key`` parameter has been deprecated and will be removed in a release after 2024-12-01. Please use the ``secret_key`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``security_token`` alias for the ``session_token`` parameter has been deprecated and will be removed in a release after 2024-12-01. Please use the ``session_token`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- ec2_security_group - support for passing nested lists to ``cidr_ip`` and ``cidr_ipv6`` has been deprecated. Nested lists can be passed through the ``flatten`` filter instead ``cidr_ip: '{{ my_cidrs | flatten }}'`` (https://github.com/ansible-collections/amazon.aws/pull/1213). +- module_utils.url - ``ansible_collections.amazon.aws.module_utils.urls`` is believed to be unused and has been deprecated and will be removed in release 7.0.0. + +Security Fixes +-------------- + +- ec2_instance - fixes leak of password into logs when using ``tower_callback.windows=True`` and ``tower_callback.set_password`` (https://github.com/ansible-collections/amazon.aws/pull/1199). + +Bugfixes +-------- + +- ec2_instance - fixes ``Invalid type for parameter TagSpecifications, value None`` error when tags aren't specified (https://github.com/ansible-collections/amazon.aws/issues/1148). +- module_utils.transformations - ensure that ``map_complex_type`` still returns transformed items if items exists that are not in the type_map (https://github.com/ansible-collections/amazon.aws/pull/1163). + +v5.0.2 +====== + +Bugfixes +-------- + +- ec2_metadata_facts - fixed ``AttributeError`` (https://github.com/ansible-collections/amazon.aws/issues/1134). + +v5.0.1 +====== + +Bugfixes +-------- + +- ec2_vpc_net_info - fix KeyError (https://github.com/ansible-collections/amazon.aws/pull/1109). +- ec2_vpc_net_info - remove hardcoded ``ClassicLinkEnabled`` parameter when request for ``ClassicLinkDnsSupported`` failed (https://github.com/ansible-collections/amazon.aws/pull/1109). +- s3_object - be more defensive when checking the results of ``s3.get_bucket_ownership_controls`` (https://github.com/ansible-collections/amazon.aws/issues/1115). + +v5.0.0 +====== + +Release Summary +--------------- + +In this release we promoted many community modules to Red Hat supported status. Those modules have been moved from the commuity.aws to amazon.aws collection. This release also brings some new features, bugfixes, breaking changes and deprecated features. The amazon.aws collection has dropped support for ``botocore<1.21.0`` and ``boto3<1.18.0``. Support for ``ansible-core<2.11`` has also been dropped. + +Major Changes +------------- + +- autoscaling_group - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.autoscaling_group``. +- autoscaling_group_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.autoscaling_group_info``. +- cloudtrail - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.cloudtrail``. +- cloudwatch_metric_alarm - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.cloudwatch_metric_alarm``. +- cloudwatchevent_rule - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.cloudwatchevent_rule``. +- cloudwatchlogs_log_group - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.cloudwatchlogs_log_group``. +- cloudwatchlogs_log_group_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.cloudwatchlogs_log_group_info``. +- cloudwatchlogs_log_group_metric_filter - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.cloudwatchlogs_log_group_metric_filter``. +- ec2_eip - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_eip``. +- ec2_eip_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_eip_info``. +- elb_application_lb - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.elb_application_lb``. +- elb_application_lb_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.elb_application_lb_info``. +- execute_lambda - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.execute_lambda``. +- iam_policy - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_policy``. +- iam_policy_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_policy_info``. +- iam_user - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_user``. +- iam_user_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_user_info``. +- kms_key - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.kms_key``. +- kms_key_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.kms_key_info``. +- lambda - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.lambda``. +- lambda_alias - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.lambda_alias``. +- lambda_event - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.lambda_event``. +- lambda_execute - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.lambda_execute``. +- lambda_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.lambda_info``. +- lambda_policy - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.lambda_policy``. +- rds_cluster - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_cluster``. +- rds_cluster_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_cluster_info``. +- rds_cluster_snapshot - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_cluster_snapshot``. +- rds_instance - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_instance``. +- rds_instance_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_instance_info``. +- rds_instance_snapshot - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_instance_snapshot``. +- rds_option_group - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_option_group``. +- rds_option_group_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_option_group_info``. +- rds_param_group - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_param_group``. +- rds_snapshot_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_snapshot_info``. +- rds_subnet_group - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_subnet_group``. +- route53 - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.route53``. +- route53_health_check - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.route53_health_check``. +- route53_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.route53_info``. +- route53_zone - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.route53_zone``. + +Minor Changes +------------- + +- Ability to record and replay the API interaction of a module for testing purpose. Show case the feature with an example (https://github.com/ansible-collections/amazon.aws/pull/998). +- Remove the empty __init__.py file from the distribution, they were not required anymore (https://github.com/ansible-collections/amazon.aws/pull/1018). +- amazon.aws modules - the ``ec2_url`` parameter has been renamed to ``endpoint_url`` for consistency, ``ec2_url`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/992). +- aws_caller_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/968). +- aws_ec2 - introduce the ``allow_duplicated_hosts`` configuration key (https://github.com/ansible-collections/amazon.aws/pull/1026). +- cloudformation - avoid catching ``Exception``, catch more specific errors instead (https://github.com/ansible-collections/amazon.aws/pull/968). +- cloudwatch_metric_alarm_info - Added a new module that describes the cloudwatch metric alarms (https://github.com/ansible-collections/amazon.aws/pull/988). +- ec2_group - The ``ec2_group`` module has been renamed to ``ec2_security_group``, ``ec2_group`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/897). +- ec2_group_info - The ``ec2_group_info`` module has been renamed to ``ec2_security_group_info``, ``ec2_group_info`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/897). +- ec2_instance - Add hibernation_options and volumes->ebs->encrypted keys to support stop-hibernate instance (https://github.com/ansible-collections/amazon.aws/pull/972). +- ec2_instance - expanded the use of the automatic retries to ``InsuffienctInstanceCapacity`` (https://github.com/ansible-collections/amazon.aws/issues/1038). +- ec2_metadata_facts - avoid catching ``Exception``, catch more specific errors instead (https://github.com/ansible-collections/amazon.aws/pull/968). +- ec2_security_group - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/968). +- ec2_vpc_endpoint - avoid catching ``Exception``, catch more specific errors instead (https://github.com/ansible-collections/amazon.aws/pull/968). +- ec2_vpc_nat_gateway - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/968). +- ec2_vpc_net_info - handle classic link check for shared VPCs by throwing a warning instead of an error (https://github.com/ansible-collections/amazon.aws/pull/984). +- module_utils/acm - Move to jittered backoff (https://github.com/ansible-collections/amazon.aws/pull/946). +- module_utils/elbv2 - ensures that ``ip_address_type`` is set on creation rather than re-setting it after creation (https://github.com/ansible-collections/amazon.aws/pull/940). +- module_utils/elbv2 - uses new waiters with retries for temporary failures (https://github.com/ansible-collections/amazon.aws/pull/940). +- module_utils/waf - Move to jittered backoff (https://github.com/ansible-collections/amazon.aws/pull/946). +- module_utils/waiters - Add waiters to manage eks_nodegroup module (https://github.com/ansible-collections/community.aws/pull/1415). +- s3_bucket - ``rgw`` was added as an alias for the ``ceph`` parameter for consistency with the ``s3_object`` module (https://github.com/ansible-collections/amazon.aws/pull/994). +- s3_bucket - the ``s3_url`` parameter was merged into the ``endpoint_url`` parameter, ``s3_url`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/994). +- s3_object - added the ``sig_v4`` paramater, enbling the user to opt in to signature version 4 for download/get operations. (https://github.com/ansible-collections/amazon.aws/pull/1014) +- s3_object - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/968). +- s3_object - the ``rgw`` parameter was renamed to ``ceph`` for consistency with the ``s3_bucket`` module, ``rgw`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/994). +- s3_object - the ``s3_url`` parameter was merged into the ``endpoint_url`` parameter, ``s3_url`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/994). +- s3_object - updated module to add support for handling file upload to a bucket with ACL disabled (https://github.com/ansible-collections/amazon.aws/pull/921). +- s3_object_info - Added a new module that describes S3 Objects (https://github.com/ansible-collections/amazon.aws/pull/977). + +Breaking Changes / Porting Guide +-------------------------------- + +- amazon.aws collection - Support for ansible-core < 2.11 has been dropped (https://github.com/ansible-collections/amazon.aws/pull/1087). +- amazon.aws collection - The amazon.aws collection has dropped support for ``botocore<1.21.0`` and ``boto3<1.18.0``. Most modules will continue to work with older versions of the AWS SDK, however compatibility with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/934). +- doc_fragments - remove minimum collection requirements from doc_fragments/aws.py and allow pulling those from doc_fragments/aws_boto3.py instead (https://github.com/ansible-collections/amazon.aws/pull/985). +- ec2_ami - the default value for ``purge_tags`` has been changed from ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). +- ec2_ami - the parameter aliases ``DeviceName``, ``VirtualName`` and ``NoDevice`` were previously deprecated and have been removed, please use ``device_name``, ``virtual_name`` and ``no_device`` instead (https://github.com/ansible-collections/amazon.aws/pull/913). +- ec2_eni_info - the mutual exclusivity of the ``eni_id`` and ``filters`` parameters is now enforced, previously ``filters`` would be ignored if ``eni_id`` was set (https://github.com/ansible-collections/amazon.aws/pull/954). +- ec2_instance - the default value for ``purge_tags`` has been changed from ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). +- ec2_key - the default value for ``purge_tags`` has been changed from ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). +- ec2_vol - the default value for ``purge_tags`` has been changed from ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). +- ec2_vpc_dhcp_option_info - the parameter aliases ``DhcpOptionIds`` and ``DryRun`` were previously deprecated and have been removed, please use ``dhcp_options_ids`` and ``no_device`` instead (https://github.com/ansible-collections/amazon.aws/pull/913). +- ec2_vpc_endpoint - the default value for ``purge_tags`` has been changed from ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). +- ec2_vpc_net - the default value for ``purge_tags`` has been changed from ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). +- ec2_vpc_route_table - the default value for ``purge_tags`` has been changed from ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). +- s3_bucket - the previously deprecated alias ``S3_URL`` for the ``s3_url`` parameter has been removed. Playbooks shuold be updated to use ``s3_url`` (https://github.com/ansible-collections/amazon.aws/pull/908). +- s3_object - the previously deprecated alias ``S3_URL`` for the ``s3_url`` parameter has been removed. Playbooks should be updated to use ``s3_url`` (https://github.com/ansible-collections/amazon.aws/pull/908). + +Deprecated Features +------------------- + +- amazon.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection has been deprecated and will be removed in a release after 2023-05-31 (https://github.com/ansible-collections/amazon.aws/pull/935). +- inventory/aws_ec2 - the ``include_extra_api_calls`` is now deprecated, its value is silently ignored (https://github.com/ansible-collections/amazon.aws/pull/1097). + +Bugfixes +-------- + +- aws_ec2 - address a regression introduced in 4.1.0 (https://github.com/ansible-collections/amazon.aws/pull/862) that cause the presnse of duplicated hosts in the inventory. +- cloudtrail - Fix key error TagList to TagsList (https://github.com/ansible-collections/amazon.aws/issues/1088). +- ec2_instance - Only show the deprecation warning for the default value of ``instance_type`` when ``count`` or ``exact_count`` are specified (https://github.com//issues/980). +- ec2_metadata_facts - fix ``'NoneType' object is not callable`` exception when using Ansible 2.13+ (https://github.com/ansible-collections/amazon.aws/issues/942). +- module_utils/botocore - fix ``object has no attribute 'fail'`` error in error handling (https://github.com/ansible-collections/amazon.aws/pull/1045). +- module_utils/elbv2 - fixes ``KeyError`` when using ``UseExistingClientSecret`` rather than ``ClientSecret`` (https://github.com/ansible-collections/amazon.aws/pull/940). +- module_utils/elbv2 - improvements to idempotency when comparing listeners (https://github.com/ansible-collections/community.aws/issues/604). +- s3_object - also use ``ignore_nonexistent_bucket`` when listing a bucket (https://github.com/ansible-collections/amazon.aws/issues/966). + +New Modules +----------- + +- cloudtrail_info - Gather information about trails in AWS Cloud Trail. +- cloudwatch_metric_alarm_info - Gather information about the alarms for the specified metric +- s3_object_info - Gather information about objects in S3 + +v4.3.0 +====== + +Release Summary +--------------- + +The amazon.aws 4.3.0 release includes a number of minor bug fixes and improvements. +Following the release of amazon.aws 5.0.0, backports to the 4.x series will be limited to +security issues and bugfixes. + + +Minor Changes +------------- + +- ec2_instance - expanded the use of the automatic retries to ``InsuffienctInstanceCapacity`` (https://github.com/ansible-collections/amazon.aws/issues/1038). + +Bugfixes +-------- + +- ec2_metadata_facts - fix ``'NoneType' object is not callable`` exception when using Ansible 2.13+ (https://github.com/ansible-collections/amazon.aws/issues/942). +- module_utils/cloud - Fix ``ValueError: ansible_collections.amazon.aws.plugins.module_utils.core.__spec__ is None`` error on Ansible 2.9 (https://github.com/ansible-collections/amazon.aws/issues/1083). + +v4.2.0 +====== + +Minor Changes +------------- + +- ec2_security_group - set type as ``list`` for rules->group_name as it can accept both ``str`` and ``list`` (https://github.com/ansible-collections/amazon.aws/pull/971). +- various modules - linting fixups (https://github.com/ansible-collections/amazon.aws/pull/953). + +Deprecated Features +------------------- + +- module_utils.cloud - removal of the ``CloudRetry.backoff`` has been delayed until release 6.0.0. It is recommended to update custom modules to use ``jittered_backoff`` or ``exponential_backoff`` instead (https://github.com/ansible-collections/amazon.aws/pull/951). + +v4.1.0 +====== + +Minor Changes +------------- + +- ec2_instance - expanded the use of the automatic retries on temporary failures (https://github.com/ansible-collections/amazon.aws/issues/927). +- s3_bucket - updated module to enable support for setting S3 Bucket Keys for SSE-KMS (https://github.com/ansible-collections/amazon.aws/pull/882). + +Deprecated Features +------------------- + +- amazon.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection has been deprecated and will be removed in a release after 2023-05-31 (https://github.com/ansible-collections/amazon.aws/pull/935). + +Bugfixes +-------- + +- aws_ec2 - ensure the correct number of hosts are returned when tags as hostnames are used (https://github.com/ansible-collections/amazon.aws/pull/862). +- elb_application_lb - fix ``KeyError`` when balancing across two Target Groups (https://github.com/ansible-collections/community.aws/issues/1089). +- elb_classic_lb - fix ``'NoneType' object has no attribute`` bug when creating a new ELB in check mode with a health check (https://github.com/ansible-collections/amazon.aws/pull/915). +- elb_classic_lb - fix ``'NoneType' object has no attribute`` bug when creating a new ELB using security group names (https://github.com/ansible-collections/amazon.aws/issues/914). + +v4.0.0 +====== + +Major Changes +------------- + +- amazon.aws collection - The amazon.aws collection has dropped support for ``botocore<1.20.0`` and ``boto3<1.17.0``. Most modules will continue to work with older versions of the AWS SDK, however compatibility with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/574). + +Minor Changes +------------- + +- aws_s3 - Add ``validate_bucket_name`` option, to control bucket name validation (https://github.com/ansible-collections/amazon.aws/pull/615). +- aws_s3 - The ``aws_s3`` module has been renamed to ``s3_object`` (https://github.com/ansible-collections/amazon.aws/pull/869). +- aws_s3 - ``resource_tags`` has been added as an alias for the ``tags`` parameter (https://github.com/ansible-collections/amazon.aws/pull/845). +- ec2_eni - Change parameter ``device_index`` data type to string when passing to ``describe_network_inter`` api call (https://github.com/ansible-collections/amazon.aws/pull/877). +- ec2_eni - ``resource_tags`` has been added as an alias for the ``tags`` parameter (https://github.com/ansible-collections/amazon.aws/pull/845). +- ec2_group - add ``egress_rules`` as an alias for ``rules_egress`` (https://github.com/ansible-collections/amazon.aws/pull/878). +- ec2_group - add ``purge_egress_rules`` as an alias for ``purge_rules_egress`` (https://github.com/ansible-collections/amazon.aws/pull/878). +- ec2_instance - Add missing ``metadata_options`` parameters (https://github.com/ansible-collections/amazon.aws/pull/715). +- ec2_key - ``resource_tags`` has been added as an alias for the ``tags`` parameter (https://github.com/ansible-collections/amazon.aws/pull/845). +- ec2_vpc_net - add support for managing VPCs by ID (https://github.com/ansible-collections/amazon.aws/pull/848). +- ec2_vpc_subnet - add support for OutpostArn param (https://github.com/ansible-collections/amazon.aws/pull/598). +- elb_classic_lb - ``resource_tags`` has been added as an alias for the ``tags`` parameter (https://github.com/ansible-collections/amazon.aws/pull/845). +- s3_bucket - Add ``validate_bucket_name`` option, to control bucket name validation (https://github.com/ansible-collections/amazon.aws/pull/615). +- s3_bucket - ``resource_tags`` has been added as an alias for the ``tags`` parameter (https://github.com/ansible-collections/amazon.aws/pull/845). + +Breaking Changes / Porting Guide +-------------------------------- + +- Tags beginning with ``aws:`` will not be removed when purging tags, these tags are reserved by Amazon and may not be updated or deleted (https://github.com/ansible-collections/amazon.aws/issues/817). +- amazon.aws collection - the ``profile`` parameter is now mutually exclusive with the ``aws_access_key``, ``aws_secret_key`` and ``security_token`` parameters (https://github.com/ansible-collections/amazon.aws/pull/834). +- aws_az_info - the module alias ``aws_az_facts`` was deprecated in Ansible 2.9 and has now been removed (https://github.com/ansible-collections/amazon.aws/pull/832). +- aws_s3 - the default value for ``ensure overwrite`` has been changed to ``different`` instead of ``always`` so that the module is idempotent by default (https://github.com/ansible-collections/amazon.aws/issues/811). +- aws_ssm - on_denied and on_missing now both default to error, for consistency with both aws_secret and the base Lookup class (https://github.com/ansible-collections/amazon.aws/issues/617). +- ec2 - The ``ec2`` module has been removed in release 4.0.0 and replaced by the ``ec2_instance`` module (https://github.com/ansible-collections/amazon.aws/pull/630). +- ec2_vpc_igw_info - The default value for ``convert_tags`` has been changed to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/835). +- elb_classic_lb - the ``ec2_elb`` fact has been removed (https://github.com/ansible-collections/amazon.aws/pull/827). +- module_utils - Support for the original AWS SDK aka ``boto`` has been removed, including all relevant helper functions. All modules should now use the ``boto3``/``botocore`` AWS SDK (https://github.com/ansible-collections/amazon.aws/pull/630) + +Deprecated Features +------------------- + +- aws_s3 - The ``S3_URL`` alias for the s3_url option has been deprecated and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). +- ec2_ami - The ``DeviceName`` alias for the device_name option has been deprecated and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). +- ec2_ami - The ``NoDevice`` alias for the no_device option has been deprecated and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). +- ec2_ami - The ``VirtualName`` alias for the virtual_name option has been deprecated and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). +- ec2_ami - the current default value of ``False`` for ``purge_tags`` has been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/846). +- ec2_instance - The default value for ```instance_type``` has been deprecated, in the future release you must set an instance_type or a launch_template (https://github.com/ansible-collections/amazon.aws/pull/587). +- ec2_instance - the current default value of ``False`` for ``purge_tags`` has been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/849). +- ec2_key - the current default value of ``False`` for ``purge_tags`` has been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/846). +- ec2_vol - the current default value of ``False`` for ``purge_tags`` has been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/846). +- ec2_vpc_dhcp_option_info - The ``DhcpOptionIds`` alias for the dhcp_option_ids option has been deprecated and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). +- ec2_vpc_dhcp_option_info - The ``DryRun`` alias for the dry_run option has been deprecated and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). +- ec2_vpc_endpoint - the current default value of ``False`` for ``purge_tags`` has been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/846). +- ec2_vpc_net - the current default value of ``False`` for ``purge_tags`` has been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/848). +- ec2_vpc_route_table - the current default value of ``False`` for ``purge_tags`` has been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/846). +- s3_bucket - The ``S3_URL`` alias for the s3_url option has been deprecated and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). +- s3_object - Support for creation and deletion of S3 buckets has been deprecated. Please use the ``amazon.aws.s3_bucket`` module to create and delete buckets (https://github.com/ansible-collections/amazon.aws/pull/869). + +Removed Features (previously deprecated) +---------------------------------------- + +- cloudformation - the ``template_format`` option has been removed. It has been ignored by the module since Ansible 2.3 (https://github.com/ansible-collections/amazon.aws/pull/833). +- ec2_key - the ``wait_timeout`` option had no effect, was deprecated in release 1.0.0, and has now been removed (https://github.com/ansible-collections/amazon.aws/pull/830). +- ec2_key - the ``wait`` option had no effect, was deprecated in release 1.0.0, and has now been removed (https://github.com/ansible-collections/amazon.aws/pull/830). +- ec2_tag - the previously deprecated state ``list`` has been removed. To list tags on an EC2 resource the ``ec2_tag_info`` module can be used (https://github.com/ansible-collections/amazon.aws/pull/829). +- ec2_vol - the previously deprecated state ``list`` has been removed. To list volumes the ``ec2_vol_info`` module can be used (https://github.com/ansible-collections/amazon.aws/pull/828). +- module_utils.batch - the class ``ansible_collections.amazon.aws.plugins.module_utils.batch.AWSConnection`` has been removed. Please use ``AnsibleAWSModule.client()`` instead (https://github.com/ansible-collections/amazon.aws/pull/831). + +Bugfixes +-------- + +- ec2_group - fix uncaught exception when running with ``--diff`` and ``--check`` to create a new security group (https://github.com/ansible-collections/amazon.aws/issues/440). +- ec2_instance - Add a condition to handle default ```instance_type``` value for fix breaking on instance creation with launch template (https://github.com/ansible-collections/amazon.aws/pull/587). +- ec2_instance - raise an error when missing permission to stop instance when ``state`` is set to ``rebooted``` (https://github.com/ansible-collections/amazon.aws/pull/671). +- ec2_vpc_igw - use gateway_id rather than filters to paginate if possible to fix 'NoneType' object is not subscriptable error (https://github.com/ansible-collections/amazon.aws/pull/766). +- ec2_vpc_net - fix a bug where CIDR configuration would be updated in check mode (https://github.com/ansible/ansible/issues/62678). +- ec2_vpc_net - fix a bug where the module would get stuck if DNS options were updated in check mode (https://github.com/ansible/ansible/issues/62677). +- elb_classic_lb - modify the return value of _format_listeners method to resolve a failure creating https listeners (https://github.com/ansible-collections/amazon.aws/pull/860). + +v3.5.0 +====== + +Release Summary +--------------- + +Following the release of amazon.aws 5.0.0, 3.5.0 is a bugfix release and the final planned release for the 3.x series. + + +Minor Changes +------------- + +- ec2_security_group - set type as ``list`` for rules->group_name as it can accept both ``str`` and ``list`` (https://github.com/ansible-collections/amazon.aws/pull/971). + +Bugfixes +-------- + +- ec2_metadata_facts - fix ``'NoneType' object is not callable`` exception when using Ansible 2.13+ (https://github.com/ansible-collections/amazon.aws/issues/942). + +v3.4.0 +====== + +Minor Changes +------------- + +- ec2_instance - expanded the use of the automatic retries on temporary failures (https://github.com/ansible-collections/amazon.aws/issues/927). + +Bugfixes +-------- + +- elb_application_lb - fix ``KeyError`` when balancing across two Target Groups (https://github.com/ansible-collections/community.aws/issues/1089). +- elb_classic_lb - fix ``'NoneType' object has no attribute`` bug when creating a new ELB in check mode with a health check (https://github.com/ansible-collections/amazon.aws/pull/915). +- elb_classic_lb - fix ``'NoneType' object has no attribute`` bug when creating a new ELB using security group names (https://github.com/ansible-collections/amazon.aws/issues/914). + +v3.3.1 +====== + +v3.3.0 +====== + +Minor Changes +------------- + +- aws_ec2 inventory - Allow for literal strings in hostname that don't match filter parameters in ec2 describe-instances (https://github.com/ansible-collections/amazon.aws/pull/826). +- aws_ssm - Add support for ``endpoint`` parameter (https://github.com/ansible-collections/amazon.aws/pull/837). +- module.utils.rds - add retry_codes to get_rds_method_attribute return data to use in call_method and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/776). +- module.utils.rds - refactor to utilize get_rds_method_attribute return data (https://github.com/ansible-collections/amazon.aws/pull/776). +- module_utils - add new aliases ``aws_session_token`` and ``session_token`` to the ``security_token`` parameter to be more in-line with the boto SDK (https://github.com/ansible-collections/amazon.aws/pull/631). +- module_utils.rds - Add support and unit tests for addition/removal of IAM roles to/from a db instance in module_utils.rds with waiters (https://github.com/ansible-collections/amazon.aws/pull/714). + +Bugfixes +-------- + +- Include ``PSF-license.txt`` file for ``plugins/module_utils/_version.py``. +- aws_account_attribute lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_ec2 inventory plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_rds inventory plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_resource_actions callback plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_secret lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_service_ip_ranges lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_ssm - Fix environment variables for client configuration (e.g., AWS_PROFILE, AWS_ACCESS_KEY_ID) (https://github.com/ansible-collections/amazon.aws/pull/837). +- aws_ssm lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- ec2_instance - ec2_instance module broken in Python 3.8 - dict keys modified during iteration (https://github.com/ansible-collections/amazon.aws/issues/709). +- module.utils.rds - Add waiter for promoting read replica to fix idempotency issue (https://github.com/ansible-collections/amazon.aws/pull/714). +- module.utils.rds - Catch InvalidDBSecurityGroupStateFault when modifying a db instance (https://github.com/ansible-collections/amazon.aws/pull/776). +- module.utils.s3 - Update validate_bucket_name minimum length to 3 (https://github.com/ansible-collections/amazon.aws/pull/802). + +v3.2.0 +====== + +Minor Changes +------------- + +- aws_secret - add pagination for ``bypath`` functionality (https://github.com/ansible-collections/amazon.aws/pull/591). +- ec2_instance - Fix scope of deprecation warning to not show warning when ``state`` in ``absent`` (https://github.com/ansible-collections/amazon.aws/pull/719). +- ec2_vpc_route_table - support associating internet gateways (https://github.com/ansible-collections/amazon.aws/pull/690). +- module_utils.elbv2 - Add support for alb specific attributes and compare_elb_attributes method to support check_mode in module_utils.elbv2 (https://github.com/ansible-collections/amazon.aws/pull/696). +- s3_bucket - Add support for enforced bucket owner object ownership (https://github.com/ansible-collections/amazon.aws/pull/694). + +Bugfixes +-------- + +- aws_ec2 inventory - use the iam_role_arn configuration parameter to assume the role before trying to call DescribeRegions if the regions configuration is not set and AWS credentials provided without enough privilege to perform the DescribeRegions action. (https://github.com/ansible-collections/amazon.aws/issues/566). +- ec2_vol - changing a volume from a type that does not support IOPS (like ``standard``) to a type that does (like ``gp3``) fails (https://github.com/ansible-collections/amazon.aws/issues/626). +- ec2_vpc_igw - fix 'NoneType' object is not subscriptable error (https://github.com/ansible-collections/amazon.aws/pull/691). +- ec2_vpc_igw - use paginator for describe internet gateways and add retry to fix NoneType object is not subscriptable error (https://github.com/ansible-collections/amazon.aws/pull/695). +- ec2_vpc_net - In check mode, ensure the module does not change the configuration. Handle case when Amazon-provided ipv6 block is enabled, then disabled, then enabled again. Do not disable IPv6 CIDR association (using Amazon pool) if ipv6_cidr property is not present in the task. If the VPC already exists and ipv6_cidr property, retain the current config (https://github.com/ansible-collections/amazon.aws/pull/631). + +v3.1.1 +====== + +Minor Changes +------------- + +- bump the release version of the amazon.aws collection from 3.1.0 to 3.1.1 because of a bug that occurred while uploading to Galaxy. + +v3.1.0 +====== + +Minor Changes +------------- + +- add new parameters hostvars_prefix and hostvars_suffix for inventory plugins aws_ec2 and aws_rds (https://github.com/ansible-collections/amazon.aws/issues/535). +- aws_s3 - Add ``validate_bucket_name`` option, to control bucket name validation (https://github.com/ansible-collections/amazon.aws/pull/615). +- aws_s3 - add latest choice on ``overwrite`` parameter to get latest object on S3 (https://github.com/ansible-collections/amazon.aws/pull/595). +- ec2_vol - add support for OutpostArn param (https://github.com/ansible-collections/amazon.aws/pull/597). +- ec2_vol - tag volume on creation (https://github.com/ansible-collections/amazon.aws/pull/603). +- ec2_vpc_route_table - add support for IPv6 in creating route tables (https://github.com/ansible-collections/amazon.aws/pull/601). +- s3_bucket - Add ``validate_bucket_name`` option, to control bucket name validation (https://github.com/ansible-collections/amazon.aws/pull/615). + +Deprecated Features +------------------- + +- ec2_instance - The default value for ```instance_type``` has been deprecated, in the future release you must set an instance_type or a launch_template (https://github.com/ansible-collections/amazon.aws/pull/587). + +Bugfixes +-------- + +- Various modules and plugins - use vendored version of ``distutils.version`` instead of the deprecated Python standard library ``distutils`` (https://github.com/ansible-collections/amazon.aws/pull/599). +- aws_acm - No longer raising ResourceNotFound exception while retrieving ACM certificates. +- aws_s3 - fix exception raised when using module to copy from source to destination and key is missing from source (https://github.com/ansible-collections/amazon.aws/issues/602). +- ec2_instance - Add a condition to handle default ```instance_type``` value for fix breaking on instance creation with launch template (https://github.com/ansible-collections/amazon.aws/pull/587). +- ec2_key - add support for ED25519 key type (https://github.com/ansible-collections/amazon.aws/issues/572). +- ec2_vol - Sets the Iops value in req_obj even if the iops value has not changed, to allow modifying volume types that require passing an iops value to boto. (https://github.com/ansible-collections/amazon.aws/pull/606) +- elb_classic_lb - handle security_group_ids when providing security_group_names and fix broken tasks in integration test (https://github.com/ansible-collections/amazon.aws/pull/592). +- s3_bucket - Enable the management of bucket-level ACLs (https://github.com/ansible-collections/amazon.aws/issues/573). + +v3.0.0 +====== + +Major Changes +------------- + +- amazon.aws collection - The amazon.aws collection has dropped support for ``botocore<1.19.0`` and ``boto3<1.16.0``. Most modules will continue to work with older versions of the AWS SDK, however compatibility with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/574). + +Minor Changes +------------- + +- ec2_instance - add count parameter support (https://github.com/ansible-collections/amazon.aws/pull/539). + +Breaking Changes / Porting Guide +-------------------------------- + +- aws_caller_facts - Remove deprecated ``aws_caller_facts`` alias. Please use ``aws_caller_info`` instead. +- cloudformation_facts - Remove deprecated ``cloudformation_facts`` alias. Please use ``cloudformation_info`` instead. +- ec2_ami_facts - Remove deprecated ``ec2_ami_facts`` alias. Please use ``ec2_ami_info`` instead. +- ec2_eni_facts - Remove deprecated ``ec2_eni_facts`` alias. Please use ``ec2_eni_info`` instead. +- ec2_group_facts - Remove deprecated ``ec2_group_facts`` alias. Please use ``ec2_group_info`` instead. +- ec2_instance_facts - Remove deprecated ``ec2_instance_facts`` alias. Please use ``ec2_instance_info`` instead. +- ec2_snapshot_facts - Remove deprecated ``ec2_snapshot_facts`` alias. Please use ``ec2_snapshot_info`` instead. +- ec2_vol_facts - Remove deprecated ``ec2_vol_facts`` alias. Please use ``ec2_vol_info`` instead. +- ec2_vpc_dhcp_option_facts - Remove deprecated ``ec2_vpc_dhcp_option_facts`` alias. Please use ``ec2_vpc_dhcp_option_info`` instead. +- ec2_vpc_endpoint_facts - Remove deprecated ``ec2_vpc_endpoint_facts`` alias. Please use ``ec2_vpc_endpoint_info`` instead. +- ec2_vpc_igw_facts - Remove deprecated ``ec2_vpc_igw_facts`` alias. Please use ``ec2_vpc_igw_info`` instead. +- ec2_vpc_nat_gateway_facts - Remove deprecated ``ec2_vpc_nat_gateway_facts`` alias. Please use ``ec2_vpc_nat_gateway_info`` instead. +- ec2_vpc_net_facts - Remove deprecated ``ec2_vpc_net_facts`` alias. Please use ``ec2_vpc_net_info`` instead. +- ec2_vpc_route_table_facts - Remove deprecated ``ec2_vpc_route_table_facts`` alias. Please use ``ec2_vpc_route_table_info`` instead. +- ec2_vpc_subnet_facts - Remove deprecated ``ec2_vpc_subnet_facts`` alias. Please use ``ec2_vpc_subnet_info`` instead. + +Deprecated Features +------------------- + +- module_utils - support for the original AWS SDK ``boto`` has been deprecated in favour of the ``boto3``/``botocore`` SDK. All ``boto`` based modules have either been deprecated or migrated to ``botocore``, and the remaining support code in module_utils will be removed in release 4.0.0 of the amazon.aws collection. Any modules outside of the amazon.aws and community.aws collections based on the ``boto`` library will need to be migrated to the ``boto3``/``botocore`` libraries (https://github.com/ansible-collections/amazon.aws/pull/575). + +v2.2.0 +====== + +Minor Changes +------------- + +- ec2_instance - add count parameter support (https://github.com/ansible-collections/amazon.aws/pull/539). + +Bugfixes +-------- + +- aws_ec2 inventory - use the iam_role_arn configuration parameter to assume the role before trying to call DescribeRegions if the regions configuration is not set and AWS credentials provided without enough privilege to perform the DescribeRegions action. (https://github.com/ansible-collections/amazon.aws/issues/566). +- ec2_vol - Sets the Iops value in req_obj even if the iops value has not changed, to allow modifying volume types that require passing an iops value to boto. (https://github.com/ansible-collections/amazon.aws/pull/606) +- ec2_vol - changing a volume from a type that does not support IOPS (like ``standard``) to a type that does (like ``gp3``) fails (https://github.com/ansible-collections/amazon.aws/issues/626). +- ec2_vpc_igw - fix 'NoneType' object is not subscriptable error (https://github.com/ansible-collections/amazon.aws/pull/691). +- ec2_vpc_igw - use paginator for describe internet gateways and add retry to fix NoneType object is not subscriptable error (https://github.com/ansible-collections/amazon.aws/pull/695). +- elb_classic_lb - handle security_group_ids when providing security_group_names and fix broken tasks in integration test (https://github.com/ansible-collections/amazon.aws/pull/592). + +v2.1.0 +====== + +Minor Changes +------------- + +- aws_service_ip_ranges - add new option ``ipv6_prefixes`` to get only IPV6 addresses and prefixes for Amazon services (https://github.com/ansible-collections/amazon.aws/pull/430) +- cloudformation - fix detection when there are no changes. Sometimes when there are no changes, the change set will have a status FAILED with StatusReason No updates are to be performed (https://github.com/ansible-collections/amazon.aws/pull/507). +- ec2_ami - add check_mode support (https://github.com/ansible-collections/amazon.aws/pull/516). +- ec2_ami - use module_util helper for tagging AMIs (https://github.com/ansible-collections/amazon.aws/pull/520). +- ec2_ami - when creating an AMI from an instance pass the tagging options at creation time (https://github.com/ansible-collections/amazon.aws/pull/551). +- ec2_elb_lb - module renamed to ``elb_classic_lb`` (https://github.com/ansible-collections/amazon.aws/pull/377). +- ec2_eni - add check mode support (https://github.com/ansible-collections/amazon.aws/pull/534). +- ec2_eni - use module_util helper for tagging ENIs (https://github.com/ansible-collections/amazon.aws/pull/522). +- ec2_instance - use module_util helpers for tagging (https://github.com/ansible-collections/amazon.aws/pull/527). +- ec2_key - add support for tagging key pairs (https://github.com/ansible-collections/amazon.aws/pull/548). +- ec2_snapshot - add check_mode support (https://github.com/ansible-collections/amazon.aws/pull/512). +- ec2_vol - add check_mode support (https://github.com/ansible-collections/amazon.aws/pull/509). +- ec2_vpc_dhcp_option - use module_util helpers for tagging (https://github.com/ansible-collections/amazon.aws/pull/531). +- ec2_vpc_endpoint - added ``vpc_endpoint_security_groups`` parameter to support defining the security group attached to an interface endpoint (https://github.com/ansible-collections/amazon.aws/pull/544). +- ec2_vpc_endpoint - added ``vpc_endpoint_subnets`` parameter to support defining the subnet attached to an interface or gateway endpoint (https://github.com/ansible-collections/amazon.aws/pull/544). +- ec2_vpc_endpoint - use module_util helper for tagging (https://github.com/ansible-collections/amazon.aws/pull/525). +- ec2_vpc_endpoint - use module_util helpers for tagging (https://github.com/ansible-collections/amazon.aws/pull/531). +- ec2_vpc_igw - use module_util helper for tagging (https://github.com/ansible-collections/amazon.aws/pull/523). +- ec2_vpc_igw - use module_util helpers for tagging (https://github.com/ansible-collections/amazon.aws/pull/531). +- ec2_vpc_nat_gateway - use module_util helper for tagging (https://github.com/ansible-collections/amazon.aws/pull/524). +- ec2_vpc_nat_gateway - use module_util helpers for tagging (https://github.com/ansible-collections/amazon.aws/pull/531). +- elb_classic_lb - added retries on common AWS temporary API failures (https://github.com/ansible-collections/amazon.aws/pull/377). +- elb_classic_lb - added support for check_mode (https://github.com/ansible-collections/amazon.aws/pull/377). +- elb_classic_lb - added support for wait during creation (https://github.com/ansible-collections/amazon.aws/pull/377). +- elb_classic_lb - added support for wait during instance addition and removal (https://github.com/ansible-collections/amazon.aws/pull/377). +- elb_classic_lb - migrated to boto3 SDK (https://github.com/ansible-collections/amazon.aws/pull/377). +- elb_classic_lb - various error messages changed due to refactor (https://github.com/ansible-collections/amazon.aws/pull/377). +- module_utils.ec2 - moved generic tagging helpers into module_utils.tagging (https://github.com/ansible-collections/amazon.aws/pull/527). +- module_utils.tagging - add new helper to generate TagSpecification lists (https://github.com/ansible-collections/amazon.aws/pull/527). + +Deprecated Features +------------------- + +- ec2_classic_lb - setting of the ``ec2_elb`` fact has been deprecated and will be removed in release 4.0.0 of the collection. The module now returns ``elb`` which can be accessed using the register keyword (https://github.com/ansible-collections/amazon.aws/pull/552). + +Bugfixes +-------- + +- AWS action group - added missing ``ec2_instance_facts`` entry (https://github.com/ansible-collections/amazon.aws/issues/557) +- ec2_ami - fix problem when creating an AMI from an instance with ephemeral volumes (https://github.com/ansible-collections/amazon.aws/issues/511). +- ec2_instance - ensure that ec2_instance falls back to the tag(Name) parameter when no filter and no name parameter is passed (https://github.com/ansible-collections/amazon.aws/issues/526). +- s3_bucket - update error handling to better support DigitalOcean Space (https://github.com/ansible-collections/amazon.aws/issues/508). + +v2.0.0 +====== + +Major Changes +------------- + +- amazon.aws collection - Due to the AWS SDKs announcing the end of support for Python less than 3.6 (https://boto3.amazonaws.com/v1/documentation/api/1.17.64/guide/migrationpy3.html) this collection now requires Python 3.6+ (https://github.com/ansible-collections/amazon.aws/pull/298). +- amazon.aws collection - The amazon.aws collection has dropped support for ``botocore<1.18.0`` and ``boto3<1.15.0``. Most modules will continue to work with older versions of the AWS SDK, however compatibility with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/502). +- ec2_instance - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_instance``. +- ec2_instance_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_instance_info``. +- ec2_vpc_endpoint - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_endpoint``. +- ec2_vpc_endpoint_facts - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_endpoint_info``. +- ec2_vpc_endpoint_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_endpoint_info``. +- ec2_vpc_endpoint_service_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_endpoint_service_info``. +- ec2_vpc_igw - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_igw``. +- ec2_vpc_igw_facts - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_igw_facts``. +- ec2_vpc_igw_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_igw_info``. +- ec2_vpc_nat_gateway - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_nat_gateway``. +- ec2_vpc_nat_gateway_facts - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_nat_gateway_info``. +- ec2_vpc_nat_gateway_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_nat_gateway_info``. +- ec2_vpc_route_table - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_route_table``. +- ec2_vpc_route_table_facts - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_route_table_facts``. +- ec2_vpc_route_table_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_route_table_info``. + +Minor Changes +------------- + +- aws_ec2 - use a generator rather than list comprehension (https://github.com/ansible-collections/amazon.aws/pull/465). +- aws_s3 - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- aws_s3 - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- aws_s3 - add ``tags`` and ``purge_tags`` features for an S3 object (https://github.com/ansible-collections/amazon.aws/pull/335) +- aws_s3 - new mode to copy existing on another bucket (https://github.com/ansible-collections/amazon.aws/pull/359). +- aws_secret - added support for gracefully handling deleted secrets (https://github.com/ansible-collections/amazon.aws/pull/455). +- aws_ssm - add "on_missing" and "on_denied" option (https://github.com/ansible-collections/amazon.aws/pull/370). +- cloudformation - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- cloudformation - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- ec2_ami - ensure tags are propagated to the snapshot(s) when creating an AMI (https://github.com/ansible-collections/amazon.aws/pull/437). +- ec2_eni - fix idempotency when ``security_groups`` attribute is specified (https://github.com/ansible-collections/amazon.aws/pull/337). +- ec2_eni - timeout increased when waiting for ENIs to finish detaching (https://github.com/ansible-collections/amazon.aws/pull/501). +- ec2_group - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- ec2_group - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- ec2_group - use a generator rather than list comprehension (https://github.com/ansible-collections/amazon.aws/pull/465). +- ec2_group - use system ipaddress module, available with Python >= 3.3, instead of vendored copy (https://github.com/ansible-collections/amazon.aws/pull/461). +- ec2_instance - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- ec2_instance - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- ec2_instance - add ``throughput`` parameter for gp3 volume types (https://github.com/ansible-collections/amazon.aws/pull/433). +- ec2_instance - add support for controlling metadata options (https://github.com/ansible-collections/amazon.aws/pull/414). +- ec2_instance - remove unnecessary raise when exiting with a failure (https://github.com/ansible-collections/amazon.aws/pull/460). +- ec2_instance_info - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- ec2_instance_info - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- ec2_snapshot - migrated to use the boto3 python library (https://github.com/ansible-collections/amazon.aws/pull/356). +- ec2_spot_instance_info - Added a new module that describes the specified Spot Instance requests (https://github.com/ansible-collections/amazon.aws/pull/487). +- ec2_vol - add parameter ``multi_attach`` to support Multi-Attach on volume creation/update (https://github.com/ansible-collections/amazon.aws/pull/362). +- ec2_vol - relax the boto3/botocore requirements and only require botocore 1.19.27 for modifying the ``throughput`` parameter (https://github.com/ansible-collections/amazon.aws/pull/346). +- ec2_vpc_dhcp_option - Now also returns a boto3-style resource description in the ``dhcp_options`` result key. This includes any tags for the ``dhcp_options_id`` and has the same format as the current return value of ``ec2_vpc_dhcp_option_info``. (https://github.com/ansible-collections/amazon.aws/pull/252) +- ec2_vpc_dhcp_option_info - Now also returns a user-friendly ``dhcp_config`` key that matches the historical ``new_config`` key from ec2_vpc_dhcp_option, and alleviates the need to use ``items2dict(key_name='key', value_name='values')`` when parsing the output of the module. (https://github.com/ansible-collections/amazon.aws/pull/252) +- ec2_vpc_subnet - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- ec2_vpc_subnet - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- integration tests - remove dependency with collection ``community.general`` (https://github.com/ansible-collections/amazon.aws/pull/361). +- module_utils/waiter - add RDS cluster ``cluster_available`` waiter (https://github.com/ansible-collections/amazon.aws/pull/464). +- module_utils/waiter - add RDS cluster ``cluster_deleted`` waiter (https://github.com/ansible-collections/amazon.aws/pull/464). +- module_utils/waiter - add Route53 ``resource_record_sets_changed`` waiter (https://github.com/ansible-collections/amazon.aws/pull/350). +- s3_bucket - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- s3_bucket - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- s3_bucket - add new option ``object_ownership`` to configure object ownership (https://github.com/ansible-collections/amazon.aws/pull/311) +- s3_bucket - updated to use HeadBucket instead of ListBucket when testing for bucket existence (https://github.com/ansible-collections/amazon.aws/pull/357). + +Breaking Changes / Porting Guide +-------------------------------- + +- ec2_instance - instance wait for state behaviour has changed. If plays require the old behavior of waiting for the instance monitoring status to become ``OK`` when launching a new instance, the action will need to specify ``state: started`` (https://github.com/ansible-collections/amazon.aws/pull/481). +- ec2_snapshot - support for waiting indefinitely has been dropped, new default is 10 minutes (https://github.com/ansible-collections/amazon.aws/pull/356). +- ec2_vol_info - return ``attachment_set`` is now a list of attachments with Multi-Attach support on disk. (https://github.com/ansible-collections/amazon.aws/pull/362). +- ec2_vpc_dhcp_option - The module has been refactored to use boto3. Keys and value types returned by the module are now consistent, which is a change from the previous behaviour. A ``purge_tags`` option has been added, which defaults to ``True``. (https://github.com/ansible-collections/amazon.aws/pull/252) +- ec2_vpc_dhcp_option_info - Now preserves case for tag keys in return value. (https://github.com/ansible-collections/amazon.aws/pull/252) +- module_utils.core - The boto3 switch has been removed from the region parameter (https://github.com/ansible-collections/amazon.aws/pull/287). +- module_utils/compat - vendored copy of ipaddress removed (https://github.com/ansible-collections/amazon.aws/pull/461). +- module_utils/core - updated the ``scrub_none_parameters`` function so that ``descend_into_lists`` is set to ``True`` by default (https://github.com/ansible-collections/amazon.aws/pull/297). + +Deprecated Features +------------------- + +- ec2 - the boto based ``ec2`` module has been deprecated in favour of the boto3 based ``ec2_instance`` module. The ``ec2`` module will be removed in release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/424). +- ec2_vpc_dhcp_option - The ``new_config`` return key has been deprecated and will be removed in a future release. It will be replaced by ``dhcp_config``. Both values are returned in the interim. (https://github.com/ansible-collections/amazon.aws/pull/252) + +Bugfixes +-------- + +- aws_s3 - Fix upload permission when an S3 bucket ACL policy requires a particular canned ACL (https://github.com/ansible-collections/amazon.aws/pull/318) +- ec2_ami - Fix ami issue when creating an ami with no_device parameter (https://github.com/ansible-collections/amazon.aws/pull/386) +- ec2_instance - ``ec2_instance`` was waiting on EC2 instance monitoring status to be ``OK`` when launching a new instance. This could cause a play to wait multiple minutes for AWS's monitoring to complete status checks (https://github.com/ansible-collections/amazon.aws/pull/481). +- ec2_snapshot - Fix snapshot issue when capturing a snapshot of a volume without tags (https://github.com/ansible-collections/amazon.aws/pull/383) +- ec2_vol - Fixes ``changed`` status when ``modify_volume`` is used, but no new disk is being attached. The module incorrectly reported that no change had occurred even when disks had been modified (iops, throughput, type, etc.). (https://github.com/ansible-collections/amazon.aws/issues/482). +- ec2_vol - fix iops setting and enforce iops/throughput parameters usage (https://github.com/ansible-collections/amazon.aws/pull/334) +- inventory - ``include_filters`` won't be ignored anymore if ``filters`` is not set (https://github.com/ansible-collections/amazon.aws/issues/457). +- s3_bucket - Fix error handling when attempting to set a feature that is not implemented (https://github.com/ansible-collections/amazon.aws/pull/391). +- s3_bucket - Gracefully handle ``NotImplemented`` exceptions when fetching encryption settings (https://github.com/ansible-collections/amazon.aws/issues/390). + +New Modules +----------- + +- ec2_spot_instance - request, stop, reboot or cancel spot instance +- ec2_spot_instance_info - Gather information about ec2 spot instance requests + +v1.5.0 +====== + +Minor Changes +------------- + +- AWS inventory plugins - use shared HAS_BOTO3 helper rather than copying code (https://github.com/ansible-collections/amazon.aws/pull/288). +- AWS lookup plugins - use shared HAS_BOTO3 helper rather than copying code (https://github.com/ansible-collections/amazon.aws/pull/288). +- aws_account_attribute - add retries on common AWS failures (https://github.com/ansible-collections/amazon.aws/pull/295). +- aws_ec2 inventory - expose a new configuration key ``use_contrib_script_compatible_ec2_tag_keys`` to reproduce a behavior of the old ``ec2.py`` inventory script. With this option enabled, each tag is exposed using a ``ec2_tag_TAGNAME`` key (https://github.com/ansible-collections/amazon.aws/pull/331). +- aws_ec2 inventory - expose to new keys called ``include_filters`` and ``exclude_filters`` to give the user the ability to compose an inventory with multiple queries (https://github.com/ansible-collections/amazon.aws/pull/328). +- aws_ec2 inventory plugin - Added support for using Jinja2 templates in the authentication fields (https://github.com/ansible-collections/amazon.aws/pull/57). +- cloudformation - added support for StackPolicyDuringUpdateBody (https://github.com/ansible-collections/amazon.aws/pull/155). +- ec2_metadata_facts - add support for IMDSv2 (https://github.com/ansible-collections/amazon.aws/pull/43). +- ec2_snapshot_info - add the ``max_results`` along with ``next_token_id`` option (https://github.com/ansible-collections/amazon.aws/pull/321). +- ec2_tag - use common code for tagging resources (https://github.com/ansible-collections/amazon.aws/pull/309). +- ec2_tag_info - use common code for tagging resources (https://github.com/ansible-collections/amazon.aws/pull/309). +- ec2_vol - add the ``purge_tags`` option (https://github.com/ansible-collections/amazon.aws/pull/242). +- ec2_vol - use common code for tagging resources (https://github.com/ansible-collections/amazon.aws/pull/309). +- ec2_vpc_net - use a custom waiter which can handle API rate limiting (https://github.com/ansible-collections/amazon.aws/pull/270). +- ec2_vpc_subnet - use AWSRetry decorator to more consistently handle API rate limiting (https://github.com/ansible-collections/amazon.aws/pull/270). +- ec2_vpc_subnet - use common code for tagging resources (https://github.com/ansible-collections/amazon.aws/pull/309). +- module_utils.cloudfront_facts - linting cleanup (https://github.com/ansible-collections/amazon.aws/pull/291). +- module_utils.ec2 - linting cleanup (https://github.com/ansible-collections/amazon.aws/pull/291). +- module_utils/core - add a helper function ``normalize_boto3_result`` (https://github.com/ansible-collections/amazon.aws/pull/271). +- module_utils/core - add parameter ``descend_into_lists`` to ``scrub_none_parameters`` helper function (https://github.com/ansible-collections/amazon.aws/pull/262). +- module_utils/ec2 - added additional helper functions for tagging EC2 resources (https://github.com/ansible-collections/amazon.aws/pull/309). +- sanity tests - add ignore.txt for 2.12 (https://github.com/ansible-collections/amazon.aws/pull/315). + +Bugfixes +-------- + +- ec2_vol - create or update now preserves the existing tags, including Name (https://github.com/ansible-collections/amazon.aws/issues/229) +- ec2_vol - fix exception when platform information isn't available (https://github.com/ansible-collections/amazon.aws/issues/305). + +v1.4.1 +====== + +Minor Changes +------------- + +- module_utils - the ipaddress module utility has been vendored into this collection. This eliminates the collection dependency on ansible.netcommon (which had removed the library in its 2.0 release). The ipaddress library is provided for internal use in this collection only. (https://github.com/ansible-collections/amazon.aws/issues/273)- + +v1.4.0 +====== + +Minor Changes +------------- + +- aws_ec2 - Add hostname options concatenation +- aws_ec2 inventory plugin - avoid a superfluous import of ``ansible.utils.display.Display`` (https://github.com/ansible-collections/amazon.aws/pull/226). +- aws_ec2 module - Replace inverse aws instance-state-name filters !terminated, !shutting-down in favor of postive filters pending, running, stopping, stopped. Issue 235. (https://github.com/ansible-collections/amazon.aws/pull/237) +- aws_secret - add ``bypath`` functionality (https://github.com/ansible-collections/amazon.aws/pull/192). +- ec2_key - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/213). +- ec2_vol - Add support for gp3 volumes and support for modifying existing volumes (https://github.com/ansible-collections/amazon.aws/issues/55). +- module_utils/elbv2 - add logic to compare_rules to suit Values list nested within dicts unique to each field type. Fixes issue (https://github.com/ansible-collections/amazon.aws/issues/187) +- various AWS plugins and module_utils - Cleanup unused imports (https://github.com/ansible-collections/amazon.aws/pull/217). + +Bugfixes +-------- + +- ec2_vol - a creation or update now returns a structure with an up to date list of tags (https://github.com/ansible-collections/amazon.aws/pull/241). + +v1.3.0 +====== + +Minor Changes +------------- + +- aws_caller_info - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/208) +- aws_s3 - Add support for uploading templated content (https://github.com/ansible-collections/amazon.aws/pull/20). +- aws_secret - add "on_missing" and "on_denied" option (https://github.com/ansible-collections/amazon.aws/pull/122). +- ec2_ami - Add retries for ratelimiting related errors (https://github.com/ansible-collections/amazon.aws/pull/195). +- ec2_ami - fixed and streamlined ``max_attempts`` logic when waiting for AMI creation to finish (https://github.com/ansible-collections/amazon.aws/pull/194). +- ec2_ami - increased default ``wait_timeout`` to 1200 seconds (https://github.com/ansible-collections/amazon.aws/pull/194). +- ec2_ami_info - Add retries for ratelimiting related errors (https://github.com/ansible-collections/amazon.aws/pull/195). +- ec2_eni - Improve reliability of the module by adding waiters and performing lookups by ENI ID rather than repeated searches (https://github.com/ansible-collections/amazon.aws/pull/180). +- ec2_eni_info - Improve reliability of the module by adding waiters and performing lookups by ENI ID rather than repeated searches (https://github.com/ansible-collections/amazon.aws/pull/180). +- ec2_group - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/207) +- ec2_group_info - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/207) +- ec2_snapshot_info - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/208) +- ec2_vol - Add automatic retries on AWS rate limit errors (https://github.com/ansible-collections/amazon.aws/pull/199). +- ec2_vol - ported ec2_vol to use boto3 (https://github.com/ansible-collections/amazon.aws/pull/53). +- ec2_vpc_dhcp_option_info - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/208) +- module_utils/core - add helper function ``scrub_none_parameters`` to remove params set to ``None`` (https://github.com/ansible-collections/community.aws/issues/251). +- module_utils/waiters - Add retries to our waiters for the same failure codes that we retry with AWSRetry (https://github.com/ansible-collections/amazon.aws/pull/185) +- s3_bucket - Add support for managing the ``public_access`` settings (https://github.com/ansible-collections/amazon.aws/pull/171). + +Bugfixes +-------- + +- ec2 - Code fix so module can create ec2 instances with ``ec2_volume_iops`` option (https://github.com/ansible-collections/amazon.aws/pull/177). +- ec2 - ignore terminated instances and instances that are shutting down when starting and stopping (https://github.com/ansible-collections/amazon.aws/issues/146). +- ec2_group - Fixes error handling during tagging failures (https://github.com/ansible-collections/amazon.aws/issues/210). +- ec2_group_info - Code fix so module works with Python 3.8 (make dict immutable in loop) (https://github.com/ansible-collections/amazon.aws/pull/181) + +v1.2.1 +====== + +Minor Changes +------------- + +- ec2_eni - Add support for tagging. +- ec2_eni - Port ec2_eni module to boto3 and add an integration test suite. +- ec2_eni_info - Add retries on transient AWS failures. +- ec2_eni_info - Add support for providing an ENI ID. + +v1.2.0 +====== + +Minor Changes +------------- + +- ec2 module_utils - Update ``ec2_connect`` (boto2) behaviour so that ``ec2_url`` overrides ``region``. +- module_utils.core - Support passing arbitrary extra keys to fail_json_aws, matching capabilities of fail_json. + +Deprecated Features +------------------- + +- All AWS Modules - ``aws_access_key``, ``aws_secret_key`` and ``security_token`` will be made mutually exclusive with ``profile`` after 2022-06-01. + +Bugfixes +-------- + +- ec2 module_utils - Ensure boto3 verify parameter isn't overridden by setting a profile (https://github.com/ansible-collections/amazon.aws/issues/129) +- s3_bucket - Ceph compatibility: treat error code NoSuchTagSetError used by Ceph synonymously to NoSuchTagSet used by AWS + +v1.1.0 +====== + +Major Changes +------------- + +- ec2 module_utils - The ``AWSRetry`` decorator no longer catches ``NotFound`` exceptions by default. ``NotFound`` exceptions need to be explicitly added using ``catch_extra_error_codes``. Some AWS modules may see an increase in transient failures due to AWS''s eventual consistency model. + +Minor Changes +------------- + +- Add ``aws_security_token``, ``aws_endpoint_url`` and ``endpoint_url`` aliases to improve AWS module parameter naming consistency. +- Add support for ``aws_ca_bundle`` to boto3 based AWS modules +- Add support for configuring boto3 profiles using ``AWS_PROFILE`` and ``AWS_DEFAULT_PROFILE`` +- Added check_mode support to aws_az_info +- Added check_mode support to ec2_eni_info +- Added check_mode support to ec2_snapshot_info +- ansible_dict_to_boto3_filter_list - convert integers and bools to strings before using them in filters. +- aws_direct_connect_virtual_interface - add direct_connect_gateway_id parameter. This field is only applicable in private VIF cases (public=False) and is mutually exclusive to virtual_gateway_id. +- cloudformation - Return change_set_id in the cloudformation output if a change set was created. +- ec2 - deprecate allowing both group and group_id - currently we ignore group_id if both are passed. +- ec2_ami_info - allow integer and bool values for filtering images (https://github.com/ansible/ansible/issues/43570). +- ec2_asg - Add support for Max Instance Lifetime +- ec2_asg - Add the ability to use mixed_instance_policy in launch template driven autoscaling groups +- ec2_asg - Migrated to AnsibleAWSModule +- ec2_placement_group - make ``name`` a required field. +- ec2_vol_info - Code cleanup and use of the AWSRetry decorator to improve stability +- ec2_vpc_net - Enable IPv6 CIDR assignment + +Breaking Changes / Porting Guide +-------------------------------- + +- aws_s3 - can now delete versioned buckets even when they are not empty - set mode to delete to delete a versioned bucket and everything in it. + +Deprecated Features +------------------- + +- cloudformation - The ``template_format`` option had no effect since Ansible 2.3 and will be removed after 2022-06-01 +- cloudformation - the ``template_format`` option has been deprecated and will be removed in a later release. It has been ignored by the module since Ansible 2.3. +- data_pipeline - The ``version`` option had no effect and will be removed in after 2022-06-01 +- ec2 - in a later release, the ``group`` and ``group_id`` options will become mutually exclusive. Currently ``group_id`` is ignored if you pass both. +- ec2_ami - The ``no_device`` alias ``NoDevice`` has been deprecated and will be removed after 2022-06-01 +- ec2_ami - The ``virtual_name`` alias ``VirtualName`` has been deprecated and will be removed after 2022-06-01 +- ec2_eip - The ``wait_timeout`` option had no effect and will be removed after 2022-06-01 +- ec2_key - The ``wait_timeout`` option had no effect and will be removed after 2022-06-01 +- ec2_key - The ``wait`` option had no effect and will be removed after 2022-06-01 +- ec2_key - the ``wait_timeout`` option has been deprecated and will be removed in a later release. It has had no effect since Ansible 2.5. +- ec2_key - the ``wait`` option has been deprecated and will be removed in a later release. It has had no effect since Ansible 2.5. +- ec2_lc - The ``associate_public_ip_address`` option had no effect and will be removed after 2022-06-01 +- ec2_tag - deprecate the ``list`` option in favor of ec2_tag_info +- ec2_tag - support for ``list`` as a state has been deprecated and will be removed in a later release. The ``ec2_tag_info`` can be used to fetch the tags on an EC2 resource. + +Bugfixes +-------- + +- aws_ec2 - fix idempotency when managing tags +- aws_ec2 - fix idempotency when metrics are enable +- aws_s3 - Delete objects and delete markers so versioned buckets can be removed. +- aws_s3 - Try to wait for the bucket to exist before setting the access control list. +- cloudformation_info - Fix a KeyError returning information about the stack(s). +- ec2_asg - Ensure "wait" is honored during replace operations +- ec2_launch_template - Update output to include latest_version and default_version, matching the documentation +- ec2_transit_gateway - Use AWSRetry before ClientError is handled when describing transit gateways +- ec2_transit_gateway - fixed issue where auto_attach set to yes was not being honored (https://github.com/ansible/ansible/issues/61907) +- ec2_vol - fix filtering bug +- s3_bucket - Accept XNotImplemented response to support NetApp StorageGRID. diff --git a/ansible_collections/amazon/aws/CONTRIBUTING.md b/ansible_collections/amazon/aws/CONTRIBUTING.md new file mode 100644 index 000000000..2a61b0a11 --- /dev/null +++ b/ansible_collections/amazon/aws/CONTRIBUTING.md @@ -0,0 +1,81 @@ +# Contributing + +## Getting Started + +General information about setting up your Python environment, testing modules, +Ansible coding styles, and more can be found in the [Ansible Community Guide]( +https://docs.ansible.com/ansible/latest/community/index.html). + +Information about AWS SDK library usage, module utils, testing, and more can be +found in the [AWS Guidelines](https://docs.ansible.com/ansible/devel/dev_guide/platforms/aws_guidelines.html) +documentation. + +## AWS Collections + +There are two related collections containing AWS content (modules and plugins). + +### amazon.aws +This collection contains the `module_utils` (shared libraries) used by both collections. +Content in this collection is included downstream in Red Hat Ansible Automation Platform. + +Code standards, test coverage, and other supportability criteria may be higher in this collection. + +The `amazon.aws` collection is an [Ansible-maintained collection](https://docs.ansible.com/ansible/devel/community/contributing_maintained_collections.html). + +### community.aws +This collection contains modules and plugins contributed and maintained by the Ansible AWS +community. The `community.aws` collection is tested and generally assured to work in +conjunction with `amazon.aws`. + +New modules and plugins developed by the community should be proposed to `community.aws`. +Content in this collection that is stable and meets other acceptance criteria has the potential +to be promoted and migrated into `amazon.aws`. + +## Submitting Issues +All software has bugs, and the `amazon.aws` collection is no exception. When you find a bug, +you can help tremendously by [telling us about it](https://github.com/ansible-collections/amazon.aws/issues/new/choose). + +If you should discover that the bug you're trying to file already exists in an issue, +you can help by verifying the behavior of the reported bug with a comment in that +issue, or by reporting any additional information + +## Pull Requests + +All modules MUST have integration tests for new features. +Bug fixes for modules that currently have integration tests SHOULD have tests added. +New modules should be submitted to the [community.aws](https://github.com/ansible-collections/community.aws) collection +and MUST have integration tests. + +Expected test criteria: +* Resource creation under check mode +* Resource creation +* Resource creation again (idempotency) under check mode +* Resource creation again (idempotency) +* Resource modification under check mode +* Resource modification +* Resource modification again (idempotency) under check mode +* Resource modification again (idempotency) +* Resource deletion under check mode +* Resource deletion +* Resource deletion (of a non-existent resource) under check mode +* Resource deletion (of a non-existent resource) + +Where modules have multiple parameters we recommend running through the 4-step modification cycle for each parameter the module accepts, as well as a modification cycle where as most, if not all, parameters are modified at the same time. + +For general information on running the integration tests see the +[Integration Tests page of the Module Development Guide](https://docs.ansible.com/ansible/devel/dev_guide/testing_integration.html#testing-integration), +especially the section on configuration for cloud tests. For questions about writing tests the Ansible AWS community can +be found on Libera.Chat IRC as detailed below. + + +### Code of Conduct +The `amazon.aws` collection follows the Ansible project's +[Code of Conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html). +Please read and familiarize yourself with this document. + +### IRC +Our IRC channels may require you to register your nickname. If you receive an error when you connect, see +[Libera.Chat's Nickname Registration guide](https://libera.chat/guides/registration) for instructions. + +The `#ansible-aws` channel on [irc.libera.chat](https://libera.chat/) is the main and official place to discuss use and development +of the `amazon.aws` collection. diff --git a/ansible_collections/amazon/aws/COPYING b/ansible_collections/amazon/aws/COPYING new file mode 100644 index 000000000..10926e87f --- /dev/null +++ b/ansible_collections/amazon/aws/COPYING @@ -0,0 +1,675 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + diff --git a/ansible_collections/amazon/aws/FILES.json b/ansible_collections/amazon/aws/FILES.json new file mode 100644 index 000000000..3c8fd8b7b --- /dev/null +++ b/ansible_collections/amazon/aws/FILES.json @@ -0,0 +1,9028 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/bug_report.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb7804f39d220f7aa9841b068e873ca751373cbe0a361c68c887c492aee9052d", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/ci_report.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9a0d3d78e4f98fd54f3e11c603d039cd4b42619bf4b077ae13ee8ec9bb51240b", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2e5f08c57601d637ec507daec616f993993d16f51892ca62214932b4fad0dcd9", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/documentation_report.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "931b2c7f9865f5e3f9ae992daea9d2957290bd2ec63ab60f9825886091a0847e", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/feature_request.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee94dc240c8dffe2a54a9a2ae56c1db91912b71f25445c92cb6f0fee3b484cac", + "format": 1 + }, + { + "name": ".github/workflows", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows/docs-pr.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ebba7e47f9309166d3bed1d8046a7795b384617c4860243374a2cba326677d51", + "format": 1 + }, + { + "name": ".github/workflows/docs-push.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f02c7bdf8ee60a75d6e451e72092f7e4e68481809aa4bc6be3d44ffbf84af23", + "format": 1 + }, + { + "name": ".github/BOTMETA.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "315b4d80327704d571204b7d4c71fa87148ed3b661beedd20eae9d5cdcf1bd2b", + "format": 1 + }, + { + "name": ".github/patchback.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed69f87ea46171cb574fb77dc74fdbd7a269d4cad8d5ba6494d64d99842ef8e4", + "format": 1 + }, + { + "name": ".github/settings.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb31353134cff7d91b546a03cc6fec7caaf0dba62079ea66776e2994461e6c7b", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments/.keep", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1315dd0e1ccb4ce5ad344677e8fa17c8abc49ac32514c1b15cb8abfbff838f03", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "117e01090677560eea2b4ee9ccc612ba9402f84cd964a8746d8608e4ba123004", + "format": 1 + }, + { + "name": "docs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "docs/docsite", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "docs/docsite/rst", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "docs/docsite/rst/CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9f325505698f93d86a3d23f8139738d665583291230be8acc51ac88982f7801", + "format": 1 + }, + { + "name": "docs/docsite/rst/aws_ec2_guide.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "368b342ecbc661c9a76c4c9062c65b5fb0203d2a9601263a4b4376014c61841c", + "format": 1 + }, + { + "name": "docs/docsite/rst/dev_guidelines.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "06f3601f3679d4416cf63431569fc81947cccb9df59c0f7430da7b8d0b6a4bb5", + "format": 1 + }, + { + "name": "docs/docsite/rst/guide_aws.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "850b0424a753ca94f67100f4471c3f038a0437bae323e753c0c848dd515690bf", + "format": 1 + }, + { + "name": "docs/docsite/extra-docs.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c70b5a133033af33c690f1af359ae00cb2d5fd7a6bce704824e4788313917e65", + "format": 1 + }, + { + "name": "docs/docsite/links.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4eb9fb3cb720f1f1baba1d8b570017ffae0ccd41fc246d71c624a65d0f8abbf1", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6bab8fd6dda14494c4d4914ac45ee3878573ee7979e2c349dcfc347c8972b6cb", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/action", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/action/s3_object.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "12524a7116a7100afcddf181178182e8cffeb8d94d1ffd0d7c5872eea96d16f9", + "format": 1 + }, + { + "name": "plugins/callback", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/callback/aws_resource_actions.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "74133a3e3acfd3d373bd1290096f416ee7c30dc96dfc12c03ff2beb2a42eb02c", + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/aws.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5caf40df1026e68f17a9f9618e7a52a3da812be19d6d45f195210ff62e466f6b", + "format": 1 + }, + { + "name": "plugins/doc_fragments/aws_credentials.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5bf58fccfb29994200623e8e2122544477c3e649b1527fd6fb683e3e90b3de15", + "format": 1 + }, + { + "name": "plugins/doc_fragments/aws_region.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "074b3f366d8214f956b0aff167e9940e08ab7fc2f697815eff50021069a8b708", + "format": 1 + }, + { + "name": "plugins/doc_fragments/boto3.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2723089db42987a8c16f6c9b82feb237ab0bca16b5b60ebc9799ad536d3ef2a6", + "format": 1 + }, + { + "name": "plugins/doc_fragments/ec2.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "491b912fd5be6d4664cfb736716fb26b41a364fb6fd031b6aa388c91367af47e", + "format": 1 + }, + { + "name": "plugins/doc_fragments/tags.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "72bac6a89836056e2e3befd8b04181cf7caabb3a8114d57529d8a64d127724f9", + "format": 1 + }, + { + "name": "plugins/inventory", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/inventory/aws_ec2.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8112d334c0be0daf68d9f28bb771c3ebc887aa27f2b966f2a261a0ed8ee44cc2", + "format": 1 + }, + { + "name": "plugins/inventory/aws_rds.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03037bae47ea66706758a2a12695e89dcf09caf303da6304ab3c232888f4eb94", + "format": 1 + }, + { + "name": "plugins/lookup", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/lookup/aws_account_attribute.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0c6f24a362c120d856571082550097b9f9db926d4ea071ee63841fe65dbe1fd2", + "format": 1 + }, + { + "name": "plugins/lookup/aws_secret.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b3d6b83ba994746bec9c726202aa9756a06fc72c1292aacedc4116f678e090a", + "format": 1 + }, + { + "name": "plugins/lookup/aws_service_ip_ranges.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6dc5da9e049c3129816edc730592e784b55d87b9bf0db9cf1f0ebbc021d75d36", + "format": 1 + }, + { + "name": "plugins/lookup/aws_ssm.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "311ae0243db0867a16bf9c23665c3dd6c3e538c17bcbccf2f45f9a793dc830f3", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/_version.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da42772669215aa2e1592bfcba0b4cef17d06cdbcdcfeb0ae05e431252fc5a16", + "format": 1 + }, + { + "name": "plugins/module_utils/acm.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "033b53c2b5616e21890d0baf92f3bb3db45e28afa018f9d460fc0eea0cf0b0cc", + "format": 1 + }, + { + "name": "plugins/module_utils/arn.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da140c24ff09a8eb85eefcdcc628c5a0e3639153f8aaecac961e6e072949a212", + "format": 1 + }, + { + "name": "plugins/module_utils/batch.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "33962f31026bf905b5b9b523cbc4c22207fa2ae7a7edafecbc6ea4a0c48bb56e", + "format": 1 + }, + { + "name": "plugins/module_utils/botocore.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4ac59276efb8ad191ef728f34bdbb979399d69722ca28a1a6c84e1930740cc54", + "format": 1 + }, + { + "name": "plugins/module_utils/cloud.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c8f90d9efdab3bc95656d8ba88f3812bffbf872b23ec5743ed5055814c0f5ce", + "format": 1 + }, + { + "name": "plugins/module_utils/cloudfront_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47e14059db376c1223650186124b3b01597dc6bf716ec91f309bd8232857719b", + "format": 1 + }, + { + "name": "plugins/module_utils/core.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c270942bb7e2a821b6a30d5435ca1e058d179d73e0340e3e5a08a020d71e942c", + "format": 1 + }, + { + "name": "plugins/module_utils/direct_connect.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "57e6f1bbf32388e3864419baa48bc57d509f56dccbb8bbec0787bcdc4c54dcb6", + "format": 1 + }, + { + "name": "plugins/module_utils/ec2.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3134d55fe9a55745b20882067f4b433f8ae5a9cbc4b42da99259191f0a52498c", + "format": 1 + }, + { + "name": "plugins/module_utils/elb_utils.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fdb692e5d99229f7bbbf7b7a8db6069c83a149d441124f013fad973b51fa036f", + "format": 1 + }, + { + "name": "plugins/module_utils/elbv2.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc797fe6bac5e1695eee911750ff54df567a71721809e2346976a3ccd9f70ebe", + "format": 1 + }, + { + "name": "plugins/module_utils/iam.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2032d3de62e482fd70d986bfada6a7ae5d56fc5f8f57232706bde20499d33116", + "format": 1 + }, + { + "name": "plugins/module_utils/modules.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9f97a35e3e0036560c78f703c3af92ed25449589436b06308338763848ca9278", + "format": 1 + }, + { + "name": "plugins/module_utils/policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d852b84ebf8f39480978e7a7634244fa71edec243f61900e2ae9f2706fa64197", + "format": 1 + }, + { + "name": "plugins/module_utils/rds.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "624ce4df370086b65c6524c846b80cede8721ee9e635edf233c20eec6bf18547", + "format": 1 + }, + { + "name": "plugins/module_utils/retries.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4932d03621a8b3760cfd7a7017b8f708c176ef925caa741aa876db7a2385f40d", + "format": 1 + }, + { + "name": "plugins/module_utils/route53.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "29eab39fe5ee51733ff6ab64d6b32efb8af8ba9aedcf55032fdc6d3fe1a78724", + "format": 1 + }, + { + "name": "plugins/module_utils/s3.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aa281814786efd9c30ca8b2e558fe1ac2da8667c3b9c8cc0907de4e86b9c3af7", + "format": 1 + }, + { + "name": "plugins/module_utils/tagging.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d5267aa463184b5316f351d7b352af2e00f8aaa7781304962d644410a1931416", + "format": 1 + }, + { + "name": "plugins/module_utils/tower.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b20695b30e80c25fe2d7f5c4bb4117d7e26b639207c4f345acaa12d5d8b66113", + "format": 1 + }, + { + "name": "plugins/module_utils/transformation.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3903e5bc7a50f7dab8b7bb56f69b67c8a8ebacfaad508cd6557a7b641d9f25e2", + "format": 1 + }, + { + "name": "plugins/module_utils/urls.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b226a2190419eb792af107c3be5c41e199c2205aff97b1b6b02dad8d2f64e41b", + "format": 1 + }, + { + "name": "plugins/module_utils/version.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "96135d89c53fe969093bb84e0bd806e3a51d7c5a508ba8eeee415533c18133fc", + "format": 1 + }, + { + "name": "plugins/module_utils/waf.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da16448e2039f043e098c5545902f4103210118b2dab3f27bd0f89f6a6b1fbc7", + "format": 1 + }, + { + "name": "plugins/module_utils/waiters.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d1a9968505324c82cd0c31b87580a705a882b3bdc4a6f664a279808d8d2dc3b", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/autoscaling_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "98d907585c5b4c447933ca2e4d84aa9012da5498656728043f41787e2b45d4fe", + "format": 1 + }, + { + "name": "plugins/modules/autoscaling_group_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c73c2c4737263b70788f0926f1839714713dbaf0f52a612bcc81f417fbd9b6f0", + "format": 1 + }, + { + "name": "plugins/modules/aws_az_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "474985f13cd40b26d7e28221c27308afc1d706cb5b5631fb0321c7993c7687d3", + "format": 1 + }, + { + "name": "plugins/modules/aws_caller_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48617b5696d99e66e1985723015604018c97f86b45bc4b58837eda3b541d6e9f", + "format": 1 + }, + { + "name": "plugins/modules/cloudformation.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a27b633b3bdacfc66983192ad9ceb98cf43877f621e62fc6b25d1ccfcf09c40f", + "format": 1 + }, + { + "name": "plugins/modules/cloudformation_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "16035b88849abb0d2d8ac4192c9314eb5990408f5a046d2a3be8b77d825408a1", + "format": 1 + }, + { + "name": "plugins/modules/cloudtrail.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d54a73bb6aaf47f3e185586a8ff9625159fe07573f96be8d165a72570819b6d5", + "format": 1 + }, + { + "name": "plugins/modules/cloudtrail_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7e178332bac5616f83a9599f034f940275304865c5fc73e43865cc092d1e64e2", + "format": 1 + }, + { + "name": "plugins/modules/cloudwatch_metric_alarm.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b56994ed90bab914ad0b776e69b6cd8dd1a64d72f0b2290a0deb86d3688ec6e4", + "format": 1 + }, + { + "name": "plugins/modules/cloudwatch_metric_alarm_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8eeacf0de7a256294de87e54c0269b55c8a7621353cd5b024512c8d1f82f8920", + "format": 1 + }, + { + "name": "plugins/modules/cloudwatchevent_rule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9c3410cedab596df47c9aef488a80ea02181b0c4021f784c28ea4d847693f7b", + "format": 1 + }, + { + "name": "plugins/modules/cloudwatchlogs_log_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2d89f0e2e5fbae65c871b7673f60a35a72528c7692d0a48dee8b3de9d39ed07a", + "format": 1 + }, + { + "name": "plugins/modules/cloudwatchlogs_log_group_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14b6c83c28b1db54b02dd1651cf692bae8b0826686137f3ee9557d4662e53a61", + "format": 1 + }, + { + "name": "plugins/modules/cloudwatchlogs_log_group_metric_filter.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7a1250e5b67d737a252cdee6044ec6b12674ea6a40f910389b32a49032b316dd", + "format": 1 + }, + { + "name": "plugins/modules/ec2_ami.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a860e54bee3e75558a7d90f1022068cc9f727af92b1c85ca529dc28829fa7455", + "format": 1 + }, + { + "name": "plugins/modules/ec2_ami_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1182f8de1ddc475a8d3812f7b3013cb241a2ac179cf66f9d12702e0691a230d1", + "format": 1 + }, + { + "name": "plugins/modules/ec2_eip.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36c2ae46646fb73208a193eee51dce50c28bf9e0ea1a4d453122483bffbd5e5c", + "format": 1 + }, + { + "name": "plugins/modules/ec2_eip_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a89fa148a094c02fd20ba50d7aab757b9656ce80bf927e4e47583771985b2830", + "format": 1 + }, + { + "name": "plugins/modules/ec2_eni.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "055610c01f15ca2d17765341343da61e0ac30e1b649bfc2a5c2d17aa757d6450", + "format": 1 + }, + { + "name": "plugins/modules/ec2_eni_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "02dabe31ea2b3a7ba5e74d89ecb4ca239bdd3237df68fbd65f3d4dff8a3fd158", + "format": 1 + }, + { + "name": "plugins/modules/ec2_instance.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aba9d1940d669ede122487c30cdf0244567466c6c6f9e4abcd45d2fce195688f", + "format": 1 + }, + { + "name": "plugins/modules/ec2_instance_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc42dd60e5cf5102833494df573b402ccc95eb136a6315da390c5f7849cd3b5f", + "format": 1 + }, + { + "name": "plugins/modules/ec2_key.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ffb187f8590776f65dd4bbbf4afa5385efa544d8bf8c1f522038d5f139c45f2", + "format": 1 + }, + { + "name": "plugins/modules/ec2_metadata_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8332475dba616115c4ee58028306b1b0dc27629b5ed088b8bc15b1e824d94623", + "format": 1 + }, + { + "name": "plugins/modules/ec2_security_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91db63eaaac6ee45f518d2faf151c9c9d415f77e8a06e668724ffc3c6e1cbaa7", + "format": 1 + }, + { + "name": "plugins/modules/ec2_security_group_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e674173c0b92d9301dc5c987aaf825539fa653c06a399363d37393b301656624", + "format": 1 + }, + { + "name": "plugins/modules/ec2_snapshot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "509f86268664cea64e3f74665caf18788e9e5bac93d72eb47794b511b506187d", + "format": 1 + }, + { + "name": "plugins/modules/ec2_snapshot_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "730d5699c6237d5686fbc2f1017ff767498d265ebef33768352c7de5304075cb", + "format": 1 + }, + { + "name": "plugins/modules/ec2_spot_instance.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "77899e970ba9936c99c007972d1d9596f7aac67fce4c449d761dc49b6c7beefd", + "format": 1 + }, + { + "name": "plugins/modules/ec2_spot_instance_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "585824a60c62b71f96d67c62216f248ff2847185e3dde6b05de5acf8549292d0", + "format": 1 + }, + { + "name": "plugins/modules/ec2_tag.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c956590d6985eed3dde7eef2b43de44969dc39c4a1e243df0bd33899dcfe701", + "format": 1 + }, + { + "name": "plugins/modules/ec2_tag_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d544e9d07ad827d519e4a859e92c854327742dcd5c32426deaccb5e9b1ce71ec", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vol.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c8e438e7cec060b9c43b8242b4de3a10cfc65ac4b7067135f53f9531afb7ef33", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vol_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2e545eb4ad0ac1f2b32e83ffb48f7ddfd4ff242f8330b5ff2583756b5b43137b", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vpc_dhcp_option.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "06a709720c058c7e3592f204fe159648679bc8a337c410a11f0851099d435b92", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vpc_dhcp_option_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "10ed53674be771c22becc5b276babc22dc259c4b4ba720b91339220093e16b4b", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vpc_endpoint.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3fb8036691b814e58e4c629514cf36086a54d9deb51aa425870b20dc224e3476", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vpc_endpoint_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "531760afb97c5477b4835da889fd2322f08904c2a99a32a2f3f7b4aebd03b931", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vpc_endpoint_service_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c771d0115e5c8f164b51406c07894fa1659883d08f70410c619d9bfd93d48dc", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vpc_igw.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac30fa925f7397f1fc18c408ca5c6572917661b53db02cdd62e7e43b09857899", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vpc_igw_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "11df5f24037224ea4557f64a04004f56198ea017be48a343bf24309c0a60ba1e", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vpc_nat_gateway.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "32eb7cd5a5250295c6dad240f045c6455721b15cd199dc75c2f07c2bf4ceb60a", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vpc_nat_gateway_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "464012a0c5b8c9f3b9f60cc1c67808a01620afc90ef4109aaf673b79e07eed0d", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vpc_net.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "17984ab867246fac678c77713ad082f0e8a27818d87f13050025dc183b4952fa", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vpc_net_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc1536c858b535c9a7feccca9c2246ecd78bfafae06fa7749fb4210a25f91d3e", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vpc_route_table.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c5112f687074dc23dae80f5bdfefff4b01b0fa1007e6b9811930ec0695196a0", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vpc_route_table_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bfc18886baf77e9520ac14069c99bf4d4eef07814683978f8ebddefb91583c3f", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vpc_subnet.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9c1fb9415ae90be1987425ad8b23519b55bc390e8ce6917b0d9ad84ffef7546", + "format": 1 + }, + { + "name": "plugins/modules/ec2_vpc_subnet_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "43d5f6d3d7f5631d3cea9b9c5c6c8724d11564302ae4c2ad0dd781c4c9fce129", + "format": 1 + }, + { + "name": "plugins/modules/elb_application_lb.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ca26a548ddc69c4158557bb923e6e32eff67a71c26475029aeaa2e9b668b2311", + "format": 1 + }, + { + "name": "plugins/modules/elb_application_lb_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cade0c3b1e320fc230106d865388e2b8a59dcce1df247ffa42b8dba4e014898c", + "format": 1 + }, + { + "name": "plugins/modules/elb_classic_lb.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "96656cb9910b53ab557b0ea950458c0a79519c377ddd4cce24792160a73d2ca3", + "format": 1 + }, + { + "name": "plugins/modules/iam_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a6180b31398e962168cdecef08f87235f7b47756ff5272879865d6c7e3de83da", + "format": 1 + }, + { + "name": "plugins/modules/iam_policy_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1659b362f862bc69b6daf7af5db67308bd532b1cb9fcb2c7eff16bfa9fe727e1", + "format": 1 + }, + { + "name": "plugins/modules/iam_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b4d31f0ef1d31d1053e6d61d87257fc988fb30d9ee3e78cb11696d24d2a4e4a", + "format": 1 + }, + { + "name": "plugins/modules/iam_user_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea652282a8008020c85f78891e09b9f6a108019a1b8623c3b8f98c434b49e416", + "format": 1 + }, + { + "name": "plugins/modules/kms_key.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "84855cd18ba19a73cebabd665b3f37b699d202a22b748cbdf0eafd9b3e914d55", + "format": 1 + }, + { + "name": "plugins/modules/kms_key_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "02034a550d9a9b9af6bd1f517a0560da915204f8bb4f4845a3fa478c9bd5636c", + "format": 1 + }, + { + "name": "plugins/modules/lambda.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e0f6a58dfaa970e4a5419a53230084ee6f3004ce86f5a7f4e129ead71da4eb80", + "format": 1 + }, + { + "name": "plugins/modules/lambda_alias.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "64251c3d3dcfea6b52647823abca5a10f3f61450f65157acb99c331ff46b1e87", + "format": 1 + }, + { + "name": "plugins/modules/lambda_event.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fceb5e0a1592a410f12aa100eef5149ddb4344a15afc46eaef34fc6a0dd48cd2", + "format": 1 + }, + { + "name": "plugins/modules/lambda_execute.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fe7cc0094395e1515305fc8fb85554036c8528f8213c3b9210580dd14056d819", + "format": 1 + }, + { + "name": "plugins/modules/lambda_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fdaf27c3cec32b3fff649ec050fa871f06b883bbb4503c63bbb9c45e59de94a5", + "format": 1 + }, + { + "name": "plugins/modules/lambda_layer.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff5c446fed8694ca1e49ede728f4798b377f37fd0c3765b8992a322ac99fafad", + "format": 1 + }, + { + "name": "plugins/modules/lambda_layer_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1589588861d7614575831144edd858f29b3495967e6891d86a526e3a16cdc1ee", + "format": 1 + }, + { + "name": "plugins/modules/lambda_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0018bf928b3b508230fb83e82aaf924c66665b988d6ff9f55bee59aacff067ef", + "format": 1 + }, + { + "name": "plugins/modules/rds_cluster.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c7d0e88057fb50311c5414fa27ebdcac13921d5625906e92ecdee869591b8fe3", + "format": 1 + }, + { + "name": "plugins/modules/rds_cluster_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ede1485240030f32a4e42c223cb894557fde54febbbb2e602e5e6946a037015d", + "format": 1 + }, + { + "name": "plugins/modules/rds_cluster_snapshot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "347c43b4dd85bd435fff3576f55224752d9f16e32519d12e448b2f0841a8dce2", + "format": 1 + }, + { + "name": "plugins/modules/rds_instance.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "29e7b8cc5b27d48e02a156340bd4967317ee15fca82290a547ceab7a5d700528", + "format": 1 + }, + { + "name": "plugins/modules/rds_instance_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d336a63fd28ce594fdbc82d25006654fbd7477bd7e4d3e57ccc5cbf2dbc52d41", + "format": 1 + }, + { + "name": "plugins/modules/rds_instance_snapshot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "494bb66507149d458c505f3fbd07a145e7b4d0e71ba243de431788a2ecd8e8e2", + "format": 1 + }, + { + "name": "plugins/modules/rds_option_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1d9e7cb79d4042eb2d30e6844cd8193124ba1628fca11a8420dd069d5109ba7c", + "format": 1 + }, + { + "name": "plugins/modules/rds_option_group_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9e81e9ec9a8ca6f28006dd3ed733587446cacbb051528f4841f47763c1ab7afa", + "format": 1 + }, + { + "name": "plugins/modules/rds_param_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2b01e7b856056d4145c03a5be15108f7112bbff0c98c2c02b6ea277a6088064", + "format": 1 + }, + { + "name": "plugins/modules/rds_snapshot_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b13cdc0a6a9c63aa7ea43c0a57ad5752b3b610ca05bfe4b68b7588fec025f42f", + "format": 1 + }, + { + "name": "plugins/modules/rds_subnet_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "021867822f89d58d261af22bf975d411baf87fab5f36b5251fcf1e7c8003ecb6", + "format": 1 + }, + { + "name": "plugins/modules/route53.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "97a26474c421a683069fc437c44f5a0dfa0c2ea946d3b493193963b962dfaabb", + "format": 1 + }, + { + "name": "plugins/modules/route53_health_check.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aaee5ff346c9ae4e7f2ae0a450a73fc206fe97814f6efc477a34f09e2c541b21", + "format": 1 + }, + { + "name": "plugins/modules/route53_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f5801f4ee3cc33e466123bf47544402132097dc2d7ad11603fc7e90d73ea6890", + "format": 1 + }, + { + "name": "plugins/modules/route53_zone.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0719721c51e60247e466ecb4c82fa542bf85cedc0bed9a324b063de278a73e9b", + "format": 1 + }, + { + "name": "plugins/modules/s3_bucket.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "64b4f4f1f6c0ab6edb2ca015fed29032ef6e0b5d9bd0b427d9225a39bc769770", + "format": 1 + }, + { + "name": "plugins/modules/s3_object.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d03e6dee844119dbe1dcc13ada18111927e4f77345696992b95019efc26c2e8a", + "format": 1 + }, + { + "name": "plugins/modules/s3_object_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22eb6916260424398c231ced8b91ef11ae0d88da72b158ea15c8e27059d9ed83", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c6abcfc99d7925638df412ccfe9cd7a92321ec631d0642e0e62831143718c3b", + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/roles", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e9764ecb19aea8de1a2ca3f3a6359a2350bef9d521d0d8dc13f827d99835043b", + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1170144ae278bd8f5c48e2805b80299f25424623609969af58776f40dac7ac8e", + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_cleanup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bbae7cf19421186e23270481a6524e65283281d436aa8563780083381f4b9116", + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c61c231bfa57c47ac62f553e78e6da66a655ac06db90c6842ee65c85b824e26", + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/instance_detach.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "19115795e0c208a41e7b7de6a13658a29da2aff91174896c09137e674f076ee3", + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1a64bea9720ef26383b0e3cf881e22a5afee65511bca82241db9fcd16af54f6", + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/tag_operations.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4182a7ea8253c6353fdfbc03521be8c01e1f2e10eee76d847beb99b63b4bc983", + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "74a2d04d78bd1d788a9e7e2bd600ca114774af6e5841bd84fdf9f3d062993ea9", + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "085c1a0efd610ebd39ed2197186299db0eefa28720665e0deebda055861dfc7b", + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "65fb456e53bbfc5e714bbf6f87ed8f80c9ee3c10f8be6e63362face9e4520a04", + "format": 1 + }, + { + "name": "tests/integration/targets/autoscaling_group/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d2e53b13c18d9f57b9ac05cf209ab9ea0db765e0b8c4e0698e26747cef903d23", + "format": 1 + }, + { + "name": "tests/integration/targets/aws_az_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/aws_az_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/aws_az_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/aws_az_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/aws_az_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4553d6453cd93e7745083c40410127744ba59a7934c07e39913ef6b9c7a5ae2a", + "format": 1 + }, + { + "name": "tests/integration/targets/aws_az_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/aws_az_info/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47d7f0170663266b9c80b357a113128c721f64f7782736c399471404ef6170be", + "format": 1 + }, + { + "name": "tests/integration/targets/aws_caller_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/aws_caller_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/aws_caller_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/aws_caller_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/aws_caller_info/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee3b4355d2876a8648831474ce0b430c22c21035551ba77c0a125f4e2866a0e8", + "format": 1 + }, + { + "name": "tests/integration/targets/aws_caller_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/callback_aws_resource_actions", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/callback_aws_resource_actions/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/callback_aws_resource_actions/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/callback_aws_resource_actions/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/callback_aws_resource_actions/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4514e38376fcaaeb52cb4841f3aeeb15370a01099c19e4f2ed6a5f287a49b89a", + "format": 1 + }, + { + "name": "tests/integration/targets/callback_aws_resource_actions/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69adafe3d0fda0d28e1a1f90961cb46fda5d33824a13ac30bcc4501d5a20f0ce", + "format": 1 + }, + { + "name": "tests/integration/targets/callback_aws_resource_actions/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b0e3eec1879e80beb50b7605a73d7a7b2508c37cde442d60317630a9f3320ead", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudformation", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudformation/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudformation/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "343a3227698a485b984745e791f5e44ff8797a3b60fcd54d0a4641bb0369b012", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudformation/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudformation/files/cf_template.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f612313fe9e8c40c55eba290f6af3b814a3702cf728a6c5630e24f0e8787fa8", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudformation/files/update_policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bcb41e725f7fae8be4356633beb391dd1870e344d626b105a3e2f14f3b3e5e96", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudformation/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudformation/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1d851188d9e6d7d833aabae61c46f0f9421f9138c6b348905598866242259c8", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudformation/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudformation/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0032da0d3260e186a7cfce8b8a19b73bc8e1aa2d7d187fb36c75eb746682a9d9", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudformation/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "28ee2ca3290c5220d7576cad86a78a42efb7a97df52a20521a36d520192c6e9c", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1115fba7e640503f5fb8fdb12795a6cb189ef2afaab6bcd265fac67da7297304", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a9ccccd516393a3ab43f9b632c05dbb2011d01ad1226c99fca1bed2e76f01570", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/tasks/tagging.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb8c8d4e022c12939ac15ddca43cd4429256c7b568862278561e7d23c2b3d1dd", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-assume-policy.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f7ca29f60f15eca1653df8d3c33d246ac59bd43a2004ac05af7fcda0f77ffd1", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-policy.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "81fbd2d5a8dfee0c43c8d8e7052b088a596534aab3061a2fe3afb38fd35f5717", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69ec1545cfc0a9907104ef64bd882fdc77f3626f544fbf476446d107b98d9b7e", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/templates/cloudwatch-policy.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5d994d97e28073f1fec4bab7fc4ae79df30ab43a3a160b64b7ae97ba2680e9be", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/templates/kms-policy.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4cd4972ae039790c3bec28bdbe28edfdf4de3d175e3934c7a358f5cae4d4363", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/templates/s3-policy.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4b092e3e56c11d148ac4c80ce60ec0c68546ad3193635191f7baf21ddd0863ab", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/templates/sns-policy.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5a4aefba7da584789a411e88e38831f1f792d9d554672b33071aeb5f1fbc8996", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "895d45473cdeb7ab4d5982453c53a7a33628aa6b69bb2597c74c31c6ba25c780", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b86a883b3949a30880b1b3f002c6cef73b71b0022b5188e46564fb918b1cc060", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d5487def9a0810a49fcae2470e47ae0365191a204a915a59a2daf86e1e84f3c", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatch_metric_alarm", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatch_metric_alarm/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatch_metric_alarm/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7817a280ae2722bfc5bf36afc82ab422977abac14ddbdfbd283aa7abf24e8398", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatch_metric_alarm/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatch_metric_alarm/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c6abcfc99d7925638df412ccfe9cd7a92321ec631d0642e0e62831143718c3b", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatch_metric_alarm/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatch_metric_alarm/tasks/env_cleanup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "326e3cd41b9fd788eecaa006c4b45d249b9c18bafd5c3d49162a622302c1bd93", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatch_metric_alarm/tasks/env_setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "75505237f36804bc29489af313588c45b6751aaf6791ac700dfa1ce92180c2eb", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatch_metric_alarm/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f28d564aa3804e8b012006606b7b0772480117d83d57f6e7a0c8b6550a9b504f", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatch_metric_alarm/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchevent_rule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchevent_rule/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchevent_rule/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "24eb0cee508b8646f67136dbadc4f380cfed37a47746f6f32a4395ee1e4b8408", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchevent_rule/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchevent_rule/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "73c208b8fd45c0ea9df82d23e70be0eb967ade5dcb223406fff1fd12fe495bd8", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchevent_rule/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchlogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchlogs/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchlogs/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31801ec2271333a53a75c3af3b17563295d0976ca7617799ee632db30d11204e", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchlogs/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchlogs/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchlogs/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchlogs/tasks/cloudwatchlogs_tests.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6c4c54641eea829b8d4603c692626e665d0aacd146714c832dfb742b75db4579", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchlogs/tasks/create-delete-tags.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e054e7a303e58e95aa63c919c74723115be3e2e61d3dfb671054b46b8b8d1466", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchlogs/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "07f48337c984d33c6e6e0e89844dd8e1fd2c209116b6a940aef5d8602f284105", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchlogs/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "35a14b6301b7771da2217cadfee86774083a39f7b72862a428fd6d11e817b8b5", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_ami", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_ami/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_ami/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fda077db8f4b5063b06b862d71449c2d0dc861c927c5d5a6c048f491dc2924b6", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_ami/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_ami/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef260835a842068b9673b71c5b045834d70881d1934207592d8ceaf344069c12", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_ami/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_ami/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d41dc340902dcda6081cee1caebdd69d786d877cbe3a0c965c12a90ee2c8fe05", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_ami/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_ami/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ac9125dea1e9dfcac93d6142fe3deb7f2d84c6f25c9c5ed72718073ad304fe9", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_ami/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1931c614be41a33f3a57f0706aec1983e7787f891321385ea14097856cc6fa69", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eip", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eip/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eip/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4b9a707456ce677c6005dbefa175d7e2e94feabab0d70b3f34a00d335c8a68fd", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eip/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eip/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c6abcfc99d7925638df412ccfe9cd7a92321ec631d0642e0e62831143718c3b", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eip/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eip/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44d0fd8cb3179fc0263ef5fd6d8b1070d2f35ed5a6637937d9ca01a03ded384c", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eip/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d6f53830fe1da397588556732c784f125aed97fba0ef0662934844f90cc1fe7", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eni", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eni/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eni/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f03fac61ee3fcda5b1602f1ffee6f24159080797c7c50b725b5ba1fc3d888ca1", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eni/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eni/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1d851188d9e6d7d833aabae61c46f0f9421f9138c6b348905598866242259c8", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eni/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eni/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b40f0f33f3b9c96c4210802b3b388883e67245765a1f4e653307e30961368835", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eni/tasks/test_attachment.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fc4e545021465b0f55e002bc6558f76a56d7069e5d434d7168238de2600d5db9", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eni/tasks/test_create_attached_multiple.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8cd4df898083e671f7c778877f1b7ecef955828ab6a165e704cb871c1e5f4cc2", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eni/tasks/test_deletion.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "81b2131235b4b108521ecc267a90aaf2b9e8ec03a04bd97b667d27e7673b4aed", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a61ced1afa0fd43c872a05770f55e29e5f5945ed7a2e07fc086d7c6ef7b58bf", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d30bd3ab2a60e469d096a3c3dbfaa7a14309efe20674bf31db2b1c84eea4ca5c", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5254eceba1d8492a0667fddf8576099ce3ce3a2bdfea899938cdadac61bf0fe9", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fe62b6c02b10a2cc9afd20df974e512cd4aa28eee45803f143caffa3834cebaf", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1cf49f0f4a7aa392e797a16f9ccd76469e4a34450a761db0dda611d78eed447", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eni/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9159c859ae9e7385c9e0765a72d38715c84dc1dd3323fef80625ad769a2b430f", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_block_devices", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_block_devices/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_block_devices/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "558e1212dd38fde7c60f15dfa88293cfcfecb8f373d08e57cfb9b8f9585a28c8", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_block_devices/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_block_devices/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2b18aa55ec2e995c0a9da468d4ff43bb552d905f215c9c04d067c63efb5bf6ef", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_block_devices/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_block_devices/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "697017fd694c6ec0d62e7f5fba6787a6f441d5ca3f5a54ac50414965317113d5", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_block_devices/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "19d0f5d65ab0bfbf40516fc2524f9a44311aa831fe298135c17f471905e0209e", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_checkmode_tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_checkmode_tests/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_checkmode_tests/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb5cf91af2a024a617591b954a5967b3f68866a4891aa99050de88c49d2fab8c", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_checkmode_tests/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_checkmode_tests/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bef049fc4dea0c69eef24ff12eaabf3669cf2bffed85980bd5da50bedb4692c1", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_checkmode_tests/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_checkmode_tests/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff2ca950ac1634229a0a3d46a73ab3c2f2d238471b72ffc7b85dcd12e88bdbce", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_checkmode_tests/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "19d0f5d65ab0bfbf40516fc2524f9a44311aa831fe298135c17f471905e0209e", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_cpu_options", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_cpu_options/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_cpu_options/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c6dda2f4319c75ec4ee725a20b1a52e71b2f98d73cfb01f72ef91eb1d1c9aba7", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_cpu_options/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_cpu_options/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a9fc69f0dc513250777eb2d696cf3d3686c821b16e52c39457d6a1426ab57b5d", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_cpu_options/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_cpu_options/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "911121cd93efc4ba3715f4596f9e5c773449cd3ec6149a5ba220440ba4312383", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_cpu_options/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_default_vpc_tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_default_vpc_tests/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_default_vpc_tests/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "45481a6fb83c53cc2b1e916c8aebbe6fb626dc9751c4f05aec3170877efcac66", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_default_vpc_tests/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_default_vpc_tests/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2696e3813c9fde54bc42250cfd5662464818c23d7c3e0a7d679000dc95d8221", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_default_vpc_tests/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_default_vpc_tests/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6eb0c0580983662aeb13943b633636add2fb3dbec3fe48720b68e3f5093f074e", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_default_vpc_tests/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "19d0f5d65ab0bfbf40516fc2524f9a44311aa831fe298135c17f471905e0209e", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_ebs_optimized", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_ebs_optimized/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_ebs_optimized/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "771f1f8c7c3652564f98df0d730338fad201603aad967f779c5b93d350bfa384", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_ebs_optimized/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_ebs_optimized/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea456897b1f0b0e9edc35e65d722ce84351d603d9264ab4645da398cce6b0dd9", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_ebs_optimized/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_ebs_optimized/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6b9e72f06911e99593fe42a5d42e79e4e2a329eaaf9f05eddb1a9299b5b419d7", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_ebs_optimized/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_external_resource_attach", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_external_resource_attach/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_external_resource_attach/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09152f817fffef74ba3adeb27d8e0f3a8ce7047eb4c577707cab11d7b8f7f5b6", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_external_resource_attach/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_external_resource_attach/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "40b0c86e91cc53759c3d81fba0125d8ddb4f017d435ce4ecbecbaf2561c3b86e", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_external_resource_attach/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36efea26d0ba7356e46a79f32af04e187bf3251960304704af266704fc19d3c5", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_external_resource_attach/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_hibernation_options", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_hibernation_options/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_hibernation_options/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0df1609270173d770eefe53a36d12bd52164bc74efb14a720cf9f7d03647e1c0", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_hibernation_options/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_hibernation_options/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef49cc8298e739f5c05a08f70f4b2ffc6aed8750fdf40044a7adb17d27cecec0", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_hibernation_options/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_hibernation_options/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7dec3c21e723ab78a812002a5ac29374bbc420184de05bcefad68f6b271fe0ea", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_hibernation_options/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_iam_instance_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_iam_instance_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_iam_instance_role/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3efb603d0ee3ebcae0ec0703fdf2f5c4351d2e45e67ace978d6cef83bbb5f904", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_iam_instance_role/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_iam_instance_role/files/assume-role-policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1950c6acf71cbeef3bbb546a07e9c19f65e15cf71ec24d06af26532c9dfab68", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_iam_instance_role/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_iam_instance_role/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09a247582f4208768b90322a02a69ec35fe27b9a5dd7f5aecf86db7f0c624138", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_iam_instance_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b2bdaf6201f486c2142cf6069f27dbcddc48f2b6fdac9e6b6bc12446728d844b", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_iam_instance_role/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "19d0f5d65ab0bfbf40516fc2524f9a44311aa831fe298135c17f471905e0209e", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_minimal", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_minimal/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_minimal/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b0d7bdc41f0c29c35484d7b221767884d59fa3567813542c36c0c2fdc904b276", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_minimal/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_minimal/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f48aac6d730150600a2e4713d98601d4b154910bf43fd102b91ca0be1187af57", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_minimal/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_minimal/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e2d6ac07b5e51f5950652e32804f3c4fb4cbf005dee45af17c70e1afe77c4262", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_minimal/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_multiple", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_multiple/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_multiple/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e041a77ae817b7cfc2a861d3fc132a684066c6d6be624e40033752e2c9fd4581", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_multiple/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_multiple/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2699c57a8fb27f0f951809dc5810cd4f0564df0427b1bf05829ceab02a1cc2ad", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_multiple/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_multiple/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b3811d3b4eb985dceacbc06dd7e9a041a05a231ab39069dfe42dea90bbeab981", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_multiple/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "53e6939960ca4fc4d6ee256245600be295bf92853e87cd9be792672babc17fa3", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_no_wait", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_no_wait/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_no_wait/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5126dd2f11f8f34d0ede998d43607bf17de052dfa2aabd2b1a81ba8b6c925e10", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_no_wait/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_no_wait/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aeafd7fe740c4583cd27a52cc8037be15cd0e6c005d33c0de4b23abd6d47177f", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_no_wait/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_no_wait/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f3857bec0e39afe9de7f20c7163e9b79c877eb3f9ed5aa47d0b4e928313cfd44", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_instance_no_wait/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_metadata_options", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_metadata_options/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_metadata_options/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f424ac5e001e4d541cb9d7d98bbeb6fb71fe2b49dc412b3359eb2b698b3326a", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_metadata_options/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_metadata_options/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "586ffe2a20ffb753d4e7dc6ccf5bb1ce67c814342c23a7d9def6c583ab2818fc", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_metadata_options/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_metadata_options/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "280fd9edf9ebb68f63ec9bb65bb63e5a16f08e005aa4ee570daabe77fb25eed1", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_metadata_options/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_security_group", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_security_group/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_security_group/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9195538411ba28af20b1c3ce260eb1206988fe62e8310316a31e6f830a6f8faa", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_security_group/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_security_group/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5a9b52acddd9d11f70b1afc019bc2284a2adba718322b36fbd913b59dc5c29f4", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_security_group/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_security_group/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "523d97570d5e268df483f7908b78d6e02ed788b2c36992d8626d2090b41b747f", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_security_group/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_state_config_updates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_state_config_updates/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_state_config_updates/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b12a1c04e3fef6db7211abe9afca81288af476ca2a4319eccbf2dba151b4f4a9", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_state_config_updates/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_state_config_updates/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "139d957630a9cb1a2d1321361e1ace0fada1d628477a9e1bcef168a9a5acb72d", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_state_config_updates/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_state_config_updates/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c30cc0c4d13ffd359fe19edd733f4df223a9939363330baada40e9a4923c9653", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_state_config_updates/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bbb6c8571b46fcee6a2b5a98a65bad929f49d8d24dd35e2c29270b925261f55e", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be39dd1cacce48583d6280e03a118e46cfc908a2163936e54c8470bf6f5fc4a5", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "72734a33b60c61678718564705086400e53f68a8058fcb6f88dd359bce42c30a", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_termination_protection", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_termination_protection/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_termination_protection/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ff3f089572898d888685b33f07f1f9f768af93527934d9f5cc078d739b58d34", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_termination_protection/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_termination_protection/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4a79cde2016a01dff9fb8383c017303a484d2e5ec6b9f74370ae68a28a9fd67", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_termination_protection/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_termination_protection/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ab0ea3bfa27bf85e7b636c77b70fd26233b5fce0099083108705bbf43486ecba", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_termination_protection/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_uptime", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_uptime/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_uptime/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3642cf059f9bc498228597aac751f90b71101c7692bd81d084a773dba273b9ae", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_uptime/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_uptime/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "67b05955b48ab707ee06a421f2c44173f877b2ce293262dcb66ceb3e0e7c4b2c", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_uptime/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_uptime/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc27504c33cba9ebe7f2ace688ba542142199335eed42a9cc71e84f1adf093cf", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_uptime/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_key", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_key/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_key/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "26aad832330421916caec9fe34ebc8d1bfa90d867b66ad745f4c12ebe84cc3c3", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_key/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_key/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3cf583b4d998f96b6aee51f9ab086732cf4c9872edca8058897bd76248b70608", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_key/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_key/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "53afa4fa84b2573ab18b905ad612a91961f6e78cce3684403a0c4110676456d1", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_key/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf4fb8f0e9df1f65d20fb104f78d7eb3f5a36caaaefb05c0b3e1411e06fb6211", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_metadata_facts", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_metadata_facts/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_metadata_facts/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c294f0a3c12f8ae47d00ce83410f66a51308b6611261bc8a97f228402e904505", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_metadata_facts/playbooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "96ecd8a0109229981ae394291e77a7b2bffab6f1ee40fd6da3cc31d70e364ea6", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a5525db05ad8c0aed4b16873c4d429f4783107a5fbc050d8f17bbd0baa59f705", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3eecc08e4bfd50d4acb88b1700a8ffcc6747cca51e886205efa2f1e11beae0ad", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_metadata_facts/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_metadata_facts/templates/inventory.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7c6d0291d7fb8c6b51f18ed6809582ffcad49c57a2978b1aaf461e994c1c2256", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_metadata_facts/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f07c68a27cc071e171721b6ca9f0a746ca96e6c0c8ef34168a6b7576f6dbf7e2", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_metadata_facts/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc4362a0e08261f353f20a25bdff675183addfdca62c700c6d04315efb908f47", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0f708cce7788b24124e9ac7b36c00ebefe26cc05ce69404f5a6538b09a928e0a", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/data_validation.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "abdc617375c38e979faec977c117e0222b562dd57790967cd70285eae414a564", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/diff_mode.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2e7c53854f63ff9d694e53c71d918577b9db2813e898844c1e218fb717be1f9", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/egress_tests.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "45866ac187b9b2d08e62c7192534c1fb4324d1074c7ce0e99f23af7a4542725b", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/group_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fe9c254d9db27cb08dd78f8a915affa46b8c29bd3910c8bf36fc6a6887f94dda", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/icmp_verbs.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7d9869fa6694eb6314d89e642e4cfbd202235e5fa2cd8ff2eb6c1dff248f4fdd", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/ipv6_default_tests.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f5e31c187ae076f3fc2f56b32526515b419319301459030a2dfccb9ed48c5887", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "98202e16abe09e43229136945781b8c9571a0ccfbe7499d13b8f29f462b684fb", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/multi_account.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c5249cb541d660e400607344b991860732c733b0db1b02a471b9e1a531446a49", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/multi_nested_target.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0e3bf023c0515b10dc60136e6764b152d38f2235df06d4c566d7140c8ebd47a", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/numeric_protos.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "255ae824e4a300df540242151e8cc8035b06646af0761009dcd4b68dfd807579", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/rule_group_create.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d03bd8726223c5716f4379701481af67c51f4c46683438d01ab294503e58d65c", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "12f9d6cf5aa25e27922fd1e3301b30f1972f54371dcc3a5c58249ae29d1ddf48", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_snapshot", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_snapshot/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_snapshot/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "873903f9abb784a3e395685d19806c065347dad6f1ace7bc67638e3e842692e9", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_snapshot/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_snapshot/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1d851188d9e6d7d833aabae61c46f0f9421f9138c6b348905598866242259c8", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_snapshot/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_snapshot/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "260739ae7af733e61ceb38c6678f3ad2c61954bf4f0ef8f046e2a260c48e2d28", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_snapshot/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0d795dbf72b8c1338bbdc7e386715c5f9f53eda9a5d43f61915e58c1d3847237", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_spot_instance", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_spot_instance/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_spot_instance/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e63089a34e6352d80cece0c5551d5a43c560295facbb549e9277c2c3e113afa2", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_spot_instance/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_spot_instance/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1c33088718db08689d0331e05f8f62ffba98125ee70cc597b822a2d8abdc2513", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_spot_instance/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_spot_instance/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "21d2fd3e53ec0805cb499189651268863673b49a76b163fb0827c423bf1935b7", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_spot_instance/tasks/terminate_associated_instances.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5ce802f50c6f5a4ce6657b67e6aa93dc04c224a9dc7be9853dacabadf354d94a", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_spot_instance/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "50cbafbb10bd16de32679f4ccf37c9ba04750c01efaa766e3bb711beae548fd7", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b756aced2d19afadd3589244b1937cc90f8a96f709d5ea966f6a55a96bc4d3a3", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6436de2db81e7174e0acf9632a9d8cce617986c855675ab4265fe11954aac1d1", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79db6a6656e23e90127a8759ccb5371abb6b58652f871c5e12c72d9387bec871", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c68801d5d9a4189a5e8f2bcc2b939f9d995786d81dcda63ab340812b8bfdfd26", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vol", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vol/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vol/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6406c0bbbe832898fc958d854f7ced5ce2764f9a27212deee526e20c884b4256", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vol/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vol/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1d851188d9e6d7d833aabae61c46f0f9421f9138c6b348905598866242259c8", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vol/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vol/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a54823391b6a18fc0d67f2f6167173c57146dfe6c3f7dbf3c058748f14c4da5e", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vol/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8accea56774dc7a0f00c7edbe541f32bb70e225fe337109168b2f5c0ceb39973", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_dhcp_option", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_dhcp_option/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a1a63e4e346ae31af24867279086058701f3bdb09586918e6451fc4766459488", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_dhcp_option/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_dhcp_option/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_dhcp_option/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "26cc856de01f82f19ab3e52dbed151f02055b8fbb2f186eb3c15d0218e5df571", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_dhcp_option/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "55bde63e4f9fd46da09e93ba507f4f32495ea895bee4d441bc50500a81071c12", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f2af99e1e5f8bd175311f939526a42db00ccf44d159413e1efd313fa8f64e963", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d93249274841baf16f40cd81a2d5d45998657b730dc1d403c58b63c70db320c", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint_service_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f124b988f1cf4242dfee3dd179059596c9074a8da01c9a45215d01b0d31b09ad", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d1f5c0c649eb9d5e890f11221eab12970ab1b861cfd3602d761789066027df8", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "461593e4cb3cfe358d76f487c60090ca33644c2eb8a3ed51243932f74c86ed31", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_igw", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_igw/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_igw/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba41c73b84da2a29f97375701091b2606096e9a07d3c3c0514a73f5e79c0fed2", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_igw/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_igw/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_igw/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_igw/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b606d37f005b3f104cc7f797528096a6eba723864a38b6a70e4fb59bc4675a1b", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_igw/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "99b1514cbe706973df0b2b91dea44eb9222a080d9bffe5768656c3bdbe42c056", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_nat_gateway", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_nat_gateway/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_nat_gateway/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "93704cdd612081cd2ca9e64a6bbfc0b8d1be1926b1df0408d98af1b05cff988b", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_nat_gateway/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_nat_gateway/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_nat_gateway/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e43ad8f4fc0453954f25e70da2ba8565b3d2fe89e04429c8c3c223add01b8b58", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_nat_gateway/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d61b105a69bae047980e85d2bed257b272069c3b0fac90135b95a639d435a9f3", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_net", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_net/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_net/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "77e4d3e9cb6695db10b0a64b81ff7eb2207dd4c1501b7c768c737e466f7427e0", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_net/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_net/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_net/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_net/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "40102958c415d84ae3f6cc4e1288d5361da4d85e51f6b210367f636a7fc235d0", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_net/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0207940db8ca0d920265404a52b42af659833e58f0ec731f774cd1ddc23f45b", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_route_table", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_route_table/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_route_table/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e7ad1e1518204ec8211cbe2aa48b8b7d86a04b6eec712a9a3914538ba241621b", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_route_table/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_route_table/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c6abcfc99d7925638df412ccfe9cd7a92321ec631d0642e0e62831143718c3b", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_route_table/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_route_table/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9da469a0e6a3528c8c9e1b9923c0a2f52c7ef4cbbfdf2eff68ee26fd13a76b7a", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_route_table/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5635521e21b810f2f9774080f73d7490b43e2325db3ddc2d3e8280b184b59dbe", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_subnet", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_subnet/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_subnet/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c1e9227cad9cc7c427615ec8e92e428d6f7a84c5620f70cfc8f12f8995306be0", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_subnet/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_subnet/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1c33088718db08689d0331e05f8f62ffba98125ee70cc597b822a2d8abdc2513", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_subnet/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_subnet/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de531c6e79b36bf46dbef863e0d2220538cc4b623f72a308c534673a02a7c87f", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_subnet/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0cab1bb4ce6a89a690d07d5b692bd0ddb5ef2430b036bd10566995661e454496", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_application_lb", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_application_lb/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_application_lb/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "00ec7389828db771b1112827429a8bc35946be8b7042b2ebfc47b04e39f99f85", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_application_lb/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_application_lb/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f779a65103c66e8b932834751833f0abf036d864ee1c062daa16430210fb465a", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_application_lb/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_application_lb/templates/policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9e23861ed04c68829794a9697ffc1814412b02fd28da421621139dc2b76b017c", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_application_lb/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe07ce7419b7816c616c4e55a8f5723b6c5f17b20a68105022db2d872564be3", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2430941389290ad8ff9f9e774b283cc06b00ec075700a3f5c3227f318ad1d509", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf34521be7e4bc6bc4b8519a57206430c67214c501e7e335c71424a7079ced07", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc61ae4f3c58a152986296e41eb968be76b9a6d3ba1c6d2b167d420a2ab49f88", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/basic_public.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "21037708db901115f2e6cde37fac4f71998074c76af61e3bdf5f747914754236", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "02b1c64c3cd27653e179cab4f84f5a7641b06aaf3dcaf8bc85c14b522a9016fb", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fc910cb3baf887ed67babc96b70a0d546b8c9db6d041566bc03748da6bcbad4", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4bcc5651ada4b1cba51d8969c56edce1eeac0d0349aa51fe89e4fc63add70cc0", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/complex_changes.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d024d5a5ac80552c3e42bc43c4b25e8007c19c1ece9f94e7c394a262db13d930", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/describe_region.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b225677a511f60f3a5588079fefafa7b503f26eb4f8e09d400462ae33a28400a", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/https_listeners.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc89cb728b3fdc28a8c3f8f02dcfdfe878e2e31601386c1d9ffd67df5c06629d", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14238fa77e41cbce02055ae03dcc39b19e7fe0b6b9971d9e11ea29e6954263d1", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/missing_params.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e7a36b4d4849e75bb9d813d27ead50dea429b804e625bd86b4fcaa4b1c4d0bb9", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/schema_change.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ce8d10c0731df0d8dba2b49787cada5d84b3262686a3e282a4b90946ed1e0814", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1e42aea202f0f2b0dc8d90ea6edcc2b2fe9150b3d132be86afa17cd160cb8e82", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9cfabccd72d651f5239920f5b33a54de4a7a815ec712af1a90d60ba75d1f4894", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2250983fedb6d723cbc06a5e5cd5be17d9e03ea9043e10756e9960038107d73a", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "07e3b6e530fd110822e2c17eef6615088d337fee9696b5221ec8f6b35b0f4699", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e8b93f83e77ab10e1582f3a43b75f65d58a538bc9b77d6b6c4ca843f05518bb2", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3746d258fb19751d61a7895aa25c59e7be07fc4dc1b85ee581697e083ddd8b0f", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eafbc65e9ea7a3966e778640a1c309d418a93a5cfb2ec765adbca06b25cdc301", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e42a9726ce505e5b8992c431e530de69055d6bed8a731e60fc6b3776935729ef", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f5a1fcfe03f3c1ba57f8142e0573a8ba8f2dcf2c2c36b23fd6fc098f49e9b889", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "daba3ca272e15b5a5c25b9433892dc3705a63048a1c5f298be6fd87e85303495", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3430cacaba58d46881257a2937543d212391d6ab5224a9664ed1b91381bcf42b", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d1bf8b8792a4f1a2ba42339a78a77308dcbb9446d97921cc61e2a2daddac2906", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da7a7902701108067ceee62a4144f280a52d636866df4ce75477fb846a371b2c", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "59f0345b108aa516d87e25c13f8cefc5367f2d2b6eff55f09854556435343db8", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "08f3462710bda1a06157a465c041528c800ee5e77060774e288185489428b2f0", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/templates/s3_policy.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de059f94471359d3123d3cdf2b2cdf0ed239d44cbabcf093314cb6991b266116", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a37372c5df29a85df3d7759887f11d5caceba506dfd51e32059f86f8fa879c8b", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "16a2c2f6008f6c2e62fc1a566539679ea95ffa546fe82071a6be5f0d8a0d0f33", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d755ebbc3a0fee576cdd1c4c8f5a729bd5699a7c44db85e8c71a5505d65fd4ad", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/files/no_access.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a8c0e5d34416275601b83555c466f1cb5047ab4fdab5564440dd012b878cca6b", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/files/no_access_with_id.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d230ad90176b0220c0214ff379452636e5300b22b41fd4bdb4444ceb6a52277", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/files/no_access_with_second_id.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f0fcd78f060bfa68ad97f43ab859d8a3164dbb2cdf9179c840eea4530e1caa90", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/files/no_trust.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ea1765d324f799b2681560118c047890f7eb44769584f4836c40c36712d835c", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3906b5b1ac793dc9142d44394595fe5dce86911d233d770e7de846bc6f593ccf", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/tasks/object.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff92b23dff61545afef1f1e10ab656502760e3bd68ca9b3c9390e2285c8bdc3c", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c73654d230ec4ba4133feb04f30e762cbb533e7334f7d1cc660842e39cee8ad", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "542ea8bf8b179d109690b3c6fff741cf835e568085bd4e546f5472662ed032a7", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bf6cfc2489ef6639b194c92d2e862279be349e3fb835e6a2a754fe3fa2d7379f", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "82578d1c532c27a90942dbe22985a0b4228da71c7710c9516368df712226c627", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/create_environment_script.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fab6eae1dd6c175c638a8d241f65106938c898687eb864e8c2dd1c5b7761ed2", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "605a7f63f212908dfca5b8f52a01def2c2cb06500c4c4bc33f7356d6b4eb35d9", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44a9f1885f675a872bebe0a1af0c40551688c8ccc1aeb700e74926a8edf69278", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "498cfcf7efc5761cba09e57a85481daa5f4624efba1e16d0ebb41b7bca5ee0ac", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "65f7d9de4718364d9febb90752aa521f07115c1f2e0bf5ffbd7098a3a664efef", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fb7b069cb3653ca58ed6793a4e85a414ea6e9843fba4547a2768367fc4fbe7c3", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a57efe1ec08416ea90c3a80c03d0d3240a928933d5f46251acf97c9375b0a0e", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f343af936f7105a81f703b55b5ed86bd3aab8b35ca6dc0672c5e5cca8dda3c16", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b106059917e67c3143a9f6d8142f7e5495bb9a81593a645c1497123bc556f534", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c462b0dbec58aa61c0b73aaed918ef2a7d68b2ec9faa18d1d522f78057411283", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "99c76c175488e05045ae6030db411dfdbca54607d087756d5906d723eaccb9a5", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "129f9f8966cd68c9a9052956f968d24f576524c46b2c6aacdda2b3d166703adf", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e5843ad1d15bd2c8d031c7041c78691ad63890a3461d199397c5b8786663573", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dab55f2fe7b49fad7d719b494d4d17672b171d484755e66830b96c7cd0e61d83", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_include_or_exclude_filters.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dee03378a2649c212a5c9b2c27407bdb928944740ff3a1e917a106e45c29aef0", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_literal_string.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "12afc04fbc175f7b343f35f2e6899a8225afa5af0cab5705d7708aaecbcff792", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_use_contrib_script_keys.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e355fd30d06e1fe489a771f376736308eb0f573227b746fd668d0b9b9017e113", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "06f7fada2b3d684329de1bce7e46970733ae4614ffc7878fa406cf45bdc46cda", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b9771837ad83a89cc76088bf8aa09be6f6d5e8c980f3ed4d72fb41fcb192af6", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "212ab399c1d1f49130ad7755b99b619def84d42129f1a7d4e66ba24fcbd76c10", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6777b9eaea5da24615ec813fcab4f75cfd6fb02870eff6021fad80ca104f505b", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "43bad11c0867b7e50eba2a7319c390c4014e8f14817bf4e7ceb415e2dddc0f32", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3fd1a1237388ff3b4514333f334271152cf33a6daf1da2b6e3bf74f01c4db03c", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags_classic.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e263f7ccc1b26ff35e0553e57f86618eb20055339d87bedf5acabc0760d8c84f", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostvars_prefix_suffix.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "99d5225654f9db2ee82fef42c4fb22d2baca13f2bfde099915aed3ffdfbdffeb", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_include_or_exclude_filters.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "921fb290a6b74b12988cce58a07ca052396ebd9d8313e7affca103a32b27b022", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_literal_string.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0b4886b818bbb37aa279a2dfb4c7035beecff05a31d18c1ee1676059795a9012", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_template.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "afa5f9d7fc119409ecb2e6b5f45409ed738750034d7d96fc34580d64dd84b811", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_use_contrib_script_keys.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2cfe0d4af5b96e2dba042d80cb5de7dd62eb3eff3d1203486aadf76a9119c881", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b0ba872ea056642fd052ba20c4f3cccc7e0397ccb828974bb2f267b4d9aa38f0", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c46b125a194647d691b85692510eadb2af8f73307b0b52619206c18d00378d88", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/test.aws_ec2.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1424ca34811cf10a2176b56269860dcc9e82cdfc3e7bc91db10658aceb8f11e0", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31b80c73e9e0abea01d5836da0de13fa1bf5a391313b4543ad8bdd2adfd415cf", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9bfd878b0292519892805d79d87a218146bd930bdf61e667ec0d26357f21c208", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b22eb19a90f4ac43ea966bd586df79c6ada6ef3e6a6e46df2f5b65cf82e4f00a", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c8d37631bfbc5a896140e0c9ca74f4144f51d5a161da353fab4026ac797d8c", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_with_hostvars_prefix_suffix.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "db52490888a480faff33f65f5740b9f1bd0c3f8cb99ac8daa137e2403f93ff9c", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79a8713d5f26b63425c2a5f6f9f481286ca8ed081604d5c7e0fd725197b758b7", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d12bd329752b9a2c0bacb9c7ce2ab839f112eaa35390e8fb0b15efa1ec5ba36a", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da0e9005ecd179d83b2bbca91b68842bbf03233e5be7e228f431743cb6131f21", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/templates/inventory.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "046bbce61938b67a8f51d9e99de64b82a588659550436b858d10975ddaf716ce", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "54ede14b2ec95c3c6606905775d3885120039da90433e409ad5002ad78c65d5b", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c1d723e784e6b7d66b15519e612c6758a132fd8cd814fa68959929fc9f577294", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/templates/inventory_with_hostvars_prefix_suffix.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c29be72ba78e8fddb826c1f1d21e8fa4cd300fb1a5211958071a19879a02a971", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6b7573399ec5210a67f93fa47cb62827da6839b4ce43490bbfa70d51e731259", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af97d9a033a74b083532cd1aab57952ea9df03e2ad6debf6ed3bb3456d41dd85", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/test.aws_rds.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/roles", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/roles/aws_kms", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/roles/aws_kms/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/roles/aws_kms/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c82f8095db0c9a559a1d632721729a088c9179fd82cc8aa79669f666877eac84", + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/roles/aws_kms/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a5ff0edca25fd6a728a353693a7569292e1e52a2ce140a92f725c4412d93ffa", + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/roles/aws_kms/tasks/test_grants.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "272a8fd44d3ce3635079bbf882200239f364b2946624e732e726daf84e32843b", + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/roles/aws_kms/tasks/test_modify.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a0f9a62d09598ce24f52330ff1e057675d9cba1f3bdf2ba0aea4703a2e1db9d", + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/roles/aws_kms/tasks/test_multi_region.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4f73d328daca1b0297440cb36acc7faf7d4ea27072bc61792e994716cc765103", + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/roles/aws_kms/tasks/test_states.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2e63aae4393984dc673cbebc6651ab3986247ea6b90fd932d93fa7863d548b2d", + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/roles/aws_kms/tasks/test_tagging.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b00c0061bd60bc6a9ba44f1eb207f5081823d798b1c587cf5627a2afeb068bca", + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/roles/aws_kms/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy-no-key-rotation.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb9de34589691fb37000392fb15437a0e5ce81007ccc46cced12b15a912c1b3c", + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3618715e5d129f5e18e0364f2b52bca33e1368390f8e6300ba9cb80e4366f3a", + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "73537c39656d72365b5b51d3a502d27393ca7da09c890905268e6dcede7c2d9f", + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c847e40e7eca6cb9de0f7d31d405fe2ad773971e9e2dbe131010bad195178283", + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6b40fee3d22d8b16bb742a41faacae1739b10df622b332ba5cf4af7fb41b137d", + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "55b269961ef0126633256ea7c581c3703268a25ca7577394d0c31ea2a35ef19c", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c19dce9ce3d0ec9bf88620e270a98cd440487c7bfed34c4da96b831d4e7869a2", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda/files/mini_lambda.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "16130d3d2b7a9a49db068ff7cf7affa7879c5fadf8f35d4c80e82541c7fe2042", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda/files/minimal_trust_policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9cc71a38c80e687ad1946218fb689594cfb66a3deb9437944c40e670a4a8633", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e452f2bb46fce963e479af5522e11515adc1275d0290e66db1c727c0854a668b", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6bf78402727104789fdd09e87ed25db80b12fc25ddaee751bdd395db7c38f22", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda/tasks/tagging.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d73654cd0f7daf076e651f27b3c204cecf6cc43b04f7d52670f965e5725231be", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "acdd9f1f6fc2157f2f0e2aee45e4ec0c40eaee3888d76d6f0d096ab8b5c07e64", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_alias", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_alias/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_alias/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "664745ab6d32fd9071c63a8211719e81b24bf19e363aa29dad7b0d1b0f988e32", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_alias/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_alias/files/mini_lambda.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "16130d3d2b7a9a49db068ff7cf7affa7879c5fadf8f35d4c80e82541c7fe2042", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_alias/files/minimal_trust_policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9cc71a38c80e687ad1946218fb689594cfb66a3deb9437944c40e670a4a8633", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_alias/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_alias/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_alias/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_alias/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "516f3312eb57a0c125adeaf838a4275b5e02d2e81bb5cffd87df7e14b86d5259", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_alias/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_event", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_event/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_event/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2eb53a7d754cb621f75687f984e27221fa8f3e6a3308246e2f9db2ec6dd5b913", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_event/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_event/files/mini_lambda.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "16130d3d2b7a9a49db068ff7cf7affa7879c5fadf8f35d4c80e82541c7fe2042", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_event/files/minimal_trust_policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9cc71a38c80e687ad1946218fb689594cfb66a3deb9437944c40e670a4a8633", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_event/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_event/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c194021b0d45c42953cf7350a8f275023b2945c48c93c971246d002f61483bcb", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_event/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_event/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a086cd63acebc6fb1c56f4def9ff3dca0d4c3b5c98bc8dfdf1eebf71fff65d00", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_event/tasks/setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb988af25eccc07c2da418d04be1c3518051ef35d8ee9da1d15ede0945951a4a", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_event/tasks/teardown.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd9e2a9a6725a078ebf1c04a813f62d34b3ab6d55b8b127c3c411ac9e4c86c01", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_event/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_layer", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_layer/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_layer/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b79f0d546c3cba1b22799ee214957cd1a168218bfc8f0ff25f035ad02d48eb0c", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_layer/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_layer/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "caea944e35affd11a58be4588f53a39fce27439e1509a0363babe6832d36ca88", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_layer/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "afa12c97da4fecfa5d0f191025ea927554d40560423525847c2675bcbb0fa2a8", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_policy", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_policy/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_policy/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c9fcedd889550626451608ae3eebe274918937439d3ea36f1c88f68eaa589a0", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_policy/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_policy/files/mini_http_lambda.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1853968e773137af7affc69e465ade98d6c13b0ca56b711b9cd2887344e34e91", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_policy/files/minimal_trust_policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9cc71a38c80e687ad1946218fb689594cfb66a3deb9437944c40e670a4a8633", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_policy/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_policy/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_policy/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_policy/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "415cae22b86289843c727380b00e97f39035172771b59da4d9bc57971ae349bf", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_policy/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eed3a83d47a68b8e76f44b037a164fccb6c4ba380b5206efae3207fe399d127b", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_policy/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/legacy_missing_tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/legacy_missing_tests/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/legacy_missing_tests/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/legacy_missing_tests/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0c492cf4db5808785c66f4fb6229857d306bf3dc32fed7d078db970064542c23", + "format": 1 + }, + { + "name": "tests/integration/targets/legacy_missing_tests/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6179bf7d20f7c33c1ee6847bb04348ab09b2103c8352b6a119b60f20dfa89d3c", + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_account_attribute", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_account_attribute/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_account_attribute/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_account_attribute/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_account_attribute/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6500060d4ee06642300066f277634203e32639982b32220c5d31e96d775a6cbd", + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_account_attribute/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_secret", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_secret/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_secret/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_secret/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_secret/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1bd9f68779d5e7462cb7e1e3bdd340191125efd3dd06205a25830c2dae0f79cc", + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_secret/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_service_ip_ranges", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_service_ip_ranges/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_service_ip_ranges/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_service_ip_ranges/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_service_ip_ranges/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5529a23559f8549351f81b7a7598c4ced65016725ba188e6e94cc45eb1266924", + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_service_ip_ranges/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_ssm", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_ssm/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_ssm/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4e11fd06db20ddb5ef0fe6fe683937e50ec4e7150af594468c4dfb4bc8f564b3", + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_ssm/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_ssm/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_ssm/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_ssm/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4370d99e0b2ae5e437c0484d7ff585b246c9847c9821ed8c342edcc2d1b036d5", + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_aws_ssm/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_botocore_recorder", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_botocore_recorder/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1e73ceb5d8bb3200f6cdf422fc8043a8889113d77ddddc2be20bf2222a7a19bf", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_botocore_recorder/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "169eadd8fde24c168511a04b50094761058265ca92419d8e8bda99b2b3f519e9", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_botocore_recorder/record.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7c3d771209cd6aec089477cda1bef6e4b861229ec4a33d57fdaec60678c4f99c", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_botocore_recorder/recording.tar.gz", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "efd891389fedb438e04fa23e66c9a29cf3fd481d5c206f144bb2b920aee06ae7", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_botocore_recorder/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0d755becb6ca2683ac527c98cca54f3095f923a16fd1be57bf1ee5bafab2008f", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/amazonroot.pem", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c43952ee9e000ff2acc4e2ed0897c0a72ad5fa72c3d934e81741cbd54f05bd1", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/isrg-x1.pem", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22b557a27055b33606b6559f37703928d3e4ad79f110b407d04986e1843543d1", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6bcaf886524922e05fae62d6b7efefd576925c7148e948fe0b43ba41f14bdb47", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd00caf0e9d7beb2c20dd7a7c76486ab72dcbb840404099c0e8d349cdd2d193f", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "96f95ee62565f62141122c6ebf63bb25d472f88135703716f395ba64c8ed30d3", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "90995fadd544d2ac3490121a30cd7414fdb89495231bdf16535a6b6c7d491638", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7c8d0f5147bcb991f8f393e55d775d1eb135b38e5704f53ef2944efa85fc8d8d", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b63ff3b3058da02396d2322c56e9fe7dd6ed282a247bcc841647ee7dab6e2127", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6602661786674f3269b75dab51476b0b6829a7d3c9e57338bda192a32bb2f768", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/templates/boto_config.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba7335ce0c8b8a32fc82bf7522a0f93d69190ff9895f4804985d2c08b7b3fd37", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/templates/session_credentials.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6104b125462eb5b6c5e5067e6c5b9041f0804c29755200fda62f0472a4a29f1e", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bcd07fe5c09a3e5e835002d1087f4a1b3aae2786926cc1d1504c6d5a20636975", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4514e38376fcaaeb52cb4841f3aeeb15370a01099c19e4f2ed6a5f287a49b89a", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d827deae19bd3b04df573d0dea7bda1dfe94334bc0815b392bf2b2a12dc113e9", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0d48d5adc889ec75147bf7ed1200f2cd1cde582de74e2523b9687e0204167cb5", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d416d3ebcd9ea58c450a07ec98a78f42423bde3fdf2396971c8af836169e7b17", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/roles", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/library", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc44c40027380e6a9a3a956be9f78bec67c8380287860c7db30f0f03d9e76cee", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd00caf0e9d7beb2c20dd7a7c76486ab72dcbb840404099c0e8d349cdd2d193f", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0ba97256d76043838f14cc1e067aeb46643d4c1d40defca3f8332fe8c2de157a", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4514e38376fcaaeb52cb4841f3aeeb15370a01099c19e4f2ed6a5f287a49b89a", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2013d9803d3dfbf66388e1ef4228f2d74d348f524c01c3018bc7b464c0ec88b8", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b36bef221fbf1264fb6d387a52e5ca42d167ef7973225a30c7cd6005d6494ca4", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/roles", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/roles/rds_cluster", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "74311fd252ba30d07c77e207bea37a1c566e52f8f465ba5dad223fc680fe0c65", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb3f91d54eee30e53e35b2b99905f70f169ed549fd78909d3dac2defc9ed8d3b", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4ac8e608a6867bcc5375a1db64787c523948674084a1c3b61b68d37e003070e2", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "85166092969db79c5b6145e8ce098929ca190b189830f0c46d98288e85f4063a", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create_sgs.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "506fc6355fbd5ecfc1ca25009ceb359ec1fabcc7135db26ecc913aafcabeb62f", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_modify.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b7e20545a3bb03c43756e5dfa2bdb5a3b7c853ea49eda56d1e2323eab2b15cfe", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_promote.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0fb1602bbc79e598fe5d9ecaecda422c5a719e1cdd52e0b338ba747fc1a0aa88", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_restore.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b38c226fdb2b1bcb682f99162973ba048aec42a55630e41e13a803ba5292f94d", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_tag.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e79b6be0d5e954a13d7fb35ef3abaf1ba5c248f9ca6bcbd1b36489f9fb8331ef", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8bfe98bb85eb155136d362f7016c7170c8b81454fcaf270525214f82ce94aea6", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1bfc45161167ced3786d8197e20dab02921f101a1871d94c25d89480a36eb8c7", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "301a6e4d4a8f17a435eea98c5f86e4fb4160139a8f0f002165167f3721ce7eb2", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0b2dd95c4737e8e2dd8eca5da901f55a087dbb5650a4eab5a41a66c581dcce43", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_multi_az", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_multi_az/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_multi_az/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc017fb99a9a99d9be7ffdb18ed34086249006df98271bbb2997fd28e65d39c3", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_multi_az/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_multi_az/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b2f0d274f535a224a2a3e50c15e4b0706486a32e6d18cb1807d323065b907089", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_multi_az/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_multi_az/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "906b29aa10c6416e8a9e1c626e6f09ff425a47aac77de4caa770be7245b716d4", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_multi_az/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f23e235cde074cfce2be11a047a03ca7356bddf72e28a33edab44dd89b72661f", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_snapshot", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_snapshot/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_snapshot/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ba2b462427c3fa8d10262f2249b1e6dd514a37aaf95206bd8541bb2d2d198f8", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_snapshot/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_snapshot/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9a3c1389faadbafa6b3322044983a3af4867d6c33626fa3e89d072ddd7a58e31", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_snapshot/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_snapshot/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_snapshot/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d93440c6e474ab8063defa5ff0e0649a16c92a6c5de875920f07a0e03298aac", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_aurora", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_aurora/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_aurora/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "524590ab1600df2d1806f831aa527662ebe6e21c9470d790514c8f61be5d3e5e", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_aurora/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_aurora/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b1015b558bc4f41b9eed24d2dd4a6ed4343dd3303c329e6cc2c51ee61b7c992a", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_aurora/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e8746aed499a4ff9be1898f1fb0110f0eb8b8e5d8820a4152f39df099ec1799e", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_complex", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_complex/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_complex/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b36d0b196f0ecb2623d9b99be17e31d88c4fba569aee06c0867f31091c4938e", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_complex/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_complex/files/enhanced_monitoring_assume_policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "caceff5bf3cd001617cbaf57a54982c114161db6b30f5f2fc2eaaea9a5f1df7e", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_complex/files/s3_integration_policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5dd2f786edfc49a726b5f3ad98826319c182d7f0022cca58e91089aaf648a7fd", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_complex/files/s3_integration_trust_policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "177a5727bdfa0d4300fb25c7d3b3dfe0f6b3797cefdf5e79137e3fd4206d765f", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_complex/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_complex/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bd136471d05616a40868db6bf5ea2a8ba3d0137151490fdecfe581c545a2b939", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_complex/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_modify", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_modify/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_modify/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "826b00e874493425ef2de1cf1474fc9c3733e2d70f776da4efc82931fb5ca177", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_modify/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_modify/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dec3558cb2662ac6e4f829a850a8bf5c3c68db5e3cf5bf60591fa0dc89b32e04", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_modify/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_processor", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_processor/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_processor/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b01f34a02b793687f9f55838cf74bb7342b219e78246c79ffd009b3e3cefd60", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_processor/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_processor/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ca6fc9fb10b9b0963a7975ad52ce9f5af2f0c0945b077c462402b710d7af155b", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_processor/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_replica", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_replica/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_replica/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f96fb71f24f9134abf68e92dddaabcbd183b656bf31898cd4e631e75a1062a5c", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_replica/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_replica/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "90a2926fecd2a13cd5abe3c95c64ef7506758941d38f124eb8a96316dd6c44ad", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_replica/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ce110431e0fb0e4da70a54aaa177a0695319d034e41ad599dc3d409de8c83fa3", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_restore", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_restore/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_restore/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "81ae2922a4ed566439f15ecc53ac5c2c7d37eeaa5052c303206bcc9f41ce6ddf", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_restore/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_restore/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0d522f8a3996b494a4ab50c8b7abc988a7d0733173be0f4be893a342d9aedebd", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_restore/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1882b290034e4bf36c20d8f56903842be49c3e5a06be7260b2ea18061e39f328", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_sgroups", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_sgroups/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_sgroups/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "81ae2922a4ed566439f15ecc53ac5c2c7d37eeaa5052c303206bcc9f41ce6ddf", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_sgroups/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_sgroups/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "827c673e8a91541815d98a105cc596f6afff225d776c560a10d883ec4a5f1496", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_sgroups/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_snapshot", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_snapshot/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_snapshot/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3ac5d264763668c46d6489c6edcbed16b48968b2c9d5809e083adbdc97d660e5", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_snapshot/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_snapshot/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b0766387416c6342bc36c29830f284895ab75295a989e25b937b3db820cbb416", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_snapshot/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_snapshot/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_snapshot/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05aefa9e952bc1d1297394596de9bacaccc947919551a683c36c50a504d3dfbb", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_snapshot_mgmt", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_snapshot_mgmt/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_snapshot_mgmt/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "584b9ec76ffbc48e4f00f46ab30ce38a5eb6b8e4537d22c86cc7b5c5e85e65ab", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_snapshot_mgmt/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_snapshot_mgmt/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b5218fa374396085133d8975e80b90bb8b47d83f94dd6bc94e0283efbe679423", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_snapshot_mgmt/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8691747cef5bb035d965b18b7825aa53b01c7b689ab2d06c71db2c696ba70c79", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_states", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_states/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_states/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "81ae2922a4ed566439f15ecc53ac5c2c7d37eeaa5052c303206bcc9f41ce6ddf", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_states/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_states/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a7357246d8e135a9ab90b9c6af2ea0bf358defddb9a91cc05a571b5bd038ba78", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_states/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8691747cef5bb035d965b18b7825aa53b01c7b689ab2d06c71db2c696ba70c79", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_tagging", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_tagging/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_tagging/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8e4806cc6d639be67743022d6e4a31dc068d158c0e41ec955ff1ecc810287221", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_tagging/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_tagging/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a8b294521173ab690e818ceb6b640753e6f0392e94f9fca5b1419602b2b531b2", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_tagging/tasks/test_tagging_gp3.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "52768d3f8993edc65103fcdaed2ed4d1ccc9e71d6517f6250b2587c8663bbf8d", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_tagging/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_upgrade", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_upgrade/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_upgrade/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "474055de936f44fbd4893612a8526051b3eb2ec221eea082758acc6045e0f333", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_upgrade/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_upgrade/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "335eda47078acf773989e9d816ecea8a92386f9af1ab42fb144be533d3036384", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_upgrade/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_option_group", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_option_group/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_option_group/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "49efe05452af19859c0dc32a6d36a63786133201c8fd44308b6b2289c77a5875", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_option_group/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_option_group/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_option_group/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_option_group/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fb0246036c2c5c750bcf7b722070931cbcd015e69cfce1b1a7151102e1893a2f", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_option_group/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8652e31d97a6c515044c1c5a865c69f2fd9bc60d5a8748f2abb69d303585d63c", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_param_group", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_param_group/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_param_group/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d6dd953f30023f15bf076e8c7eeae263a355f108c921fff3325a0878395324d", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_param_group/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_param_group/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_param_group/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_param_group/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "944ff1c18ed987a688117790cbec1db97da855954e2f9634fce071e36d39b5e2", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_param_group/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_subnet_group", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_subnet_group/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_subnet_group/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dcaa67e43240fca90cc7809ce937db5bf8b96c995c0576409d934fd80c84638c", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_subnet_group/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_subnet_group/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_subnet_group/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_subnet_group/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "57275d80fa114a57b2b41abcd031bf8c4f2f432225d367730b770c8acf08c7d4", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_subnet_group/tasks/params.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8940422805de73da82abe5de8d6599c300e42248cd26dba9c544a76f528e5b11", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_subnet_group/tasks/tests.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c67f6fb1e73d53112e87ecb17a499d645dc2a05f12160493edcab595ca31e506", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_subnet_group/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/route53", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/route53/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/route53/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "45913a9f6259d9d690031c65ddfae8883602f7bc75aadac876493facd61de2b5", + "format": 1 + }, + { + "name": "tests/integration/targets/route53/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/route53/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/route53/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/route53/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "26c09d58bcb1c90168504533b73f50669f2a5d7dbe69b2deb0ac2a75a21cea5a", + "format": 1 + }, + { + "name": "tests/integration/targets/route53/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/route53/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/route53/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "29cace64dbb5655b90b466e4a94a9b3a0d537242839608c82586c635a7dbaab7", + "format": 1 + }, + { + "name": "tests/integration/targets/route53_health_check", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/route53_health_check/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/route53_health_check/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "73f221ffbb930d0cb87617f499e226b671443b8bbff90a2344da709316f52db3", + "format": 1 + }, + { + "name": "tests/integration/targets/route53_health_check/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/route53_health_check/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1c33088718db08689d0331e05f8f62ffba98125ee70cc597b822a2d8abdc2513", + "format": 1 + }, + { + "name": "tests/integration/targets/route53_health_check/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc830698101af1948fdb537c31e012d750357d90ab32e0e0fbe7eb11697a1560", + "format": 1 + }, + { + "name": "tests/integration/targets/route53_health_check/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "142759dac1fb1a6c98cad1eac4880bddebf637c01336d618d744dbec6460b621", + "format": 1 + }, + { + "name": "tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e61440bedfea2a1553f610d4739905bcb8cba4cbd4841b3ed262f4255d072e8b", + "format": 1 + }, + { + "name": "tests/integration/targets/route53_health_check/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/route53_zone", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/route53_zone/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/route53_zone/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/route53_zone/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/route53_zone/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44311dedb728b15bbfb7e925c957a9eb8e06f110f6561d768b2d7a16fb6bba5e", + "format": 1 + }, + { + "name": "tests/integration/targets/route53_zone/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b5e9d08c9a76f78ed8fab44dddd4ac6c95c265e2c2e0f0601b5a8675f837d24e", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "17d1ee3af0799937fea09c67d39b2fa6db3011eed3a66b35a1efecfd37e2f5eb", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91e93279b8786b51f02b977698bd746354925da566badc39c1d7abe7f5c97f06", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91ed53097a628c36279d06dc0871641c4be2ad6b00082a417bc00ac49fc8bb3e", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5fbd6cf43ff040ece99a8bda5b6a19f0db00d6a6255355d9350000554b513a15", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_bucket_key.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8cac748741f5bcb9aa5f9c7d34eadb617d30251b8891cf22b3033a7287ba7d65", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "38bbf6e23d7373be55e45138794cf172c8581a4e154374fd2e264a618da3029e", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8c5674d3a85b0f6b1bdc63c370d893ab66b50f31902f74d40cd0f8ca56aa8e74", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb329e1232fcd539f96bda674734113096dac7d481948b0cec7cb375866ce8db", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f4cb3a405fb533cb08dc3e92afa5e21aa5178d14fc16b76397002075bf399a4b", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/object_lock.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3cc9349553130b3882531d68b718a6a0c7eef0fadbafd61c877c7ee32979d921", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/ownership_controls.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d6bfe1c63d1653914eecaaffafcea3534ba7f49b0fb553f9acb28c33df9cfdda", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6579b6d24a454acc95d6edace268c2140286d5b8f8403428d417c551aa77461b", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9a686815fd35ecbea7a1310198d9ff2173f73e6451737d3dcf5888d3a84ba140", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc5581c40a96552645a5d3f77e55a4bb85519fa0b6cc03835bdad7df55425e82", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "90814034e9ea0322b97a562c269a1fcb7b6f9e7534fb50bcbfd10d839b0dcf81", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b9d1d9f3c5f7bc6b8816ac3ae16f19c9784dbb01d2a080efcd5936ef25518ee", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b0539f4f1b5ef699987dc4170b0b8e122428fbae01e06e0b58d2171d82193bc", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8395f20d527042f70de0e5a24a1db4d728bac43bcde06c3ac053c885774e0e6a", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d2e53b13c18d9f57b9ac05cf209ab9ea0db765e0b8c4e0698e26747cef903d23", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "975882873a49fcfb84e767de7134c3c36e82da151d2e2cf1d2ae234cac300599", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/files/hello.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c98c24b677eff44860afea6f493bbaec5bb1c4cbb209c6fc2bbb47f66ff2ad31", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/files/test.png", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bae277f309fbffab9590300ccc1e75805c9795bbcef69edfda22c5b2327e12ba", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "20b877febfeb0cef408412e714e64c901d3b3a9b293332df3be2f0e0b9214f1a", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/tasks/copy_object.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0cacebf43504621eca11f84162d378245889f5780c86c799ecec24d1e41e2960", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/tasks/copy_object_acl_disabled_bucket.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "28aad322dc9c49ac47cb93ae6a7fd48dac3190f11618b885f070ddc9ae54acd0", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/tasks/delete_bucket.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a67015b150f14f4164b43efdafee0326b033529ca0eaf614e0a708933cb7f00a", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "793e2edd177993cbf7850ae0e9ef2e27f276497b2ca3e92635df67e7681649cc", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/templates/policy.json.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a09d7c1dccacb2ea440736d61005e07bb469c9f04b153c4596bce1b586e14bd4", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/templates/put-template.txt.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d8c9f0fc47011f7279babb0a29cb8f7812e4037c757d28e258d81ab7e82ca113", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da6542f9ffbf6dfd96214cc7e7c08e8bd4662a5479a21ad1b3f79ad2b163c9ad", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_botocore_pip", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_botocore_pip/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_botocore_pip/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "982778216860d979fd936609ed62defea823593c1607059c898dc75e08d7498e", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_botocore_pip/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_botocore_pip/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b7ddacbb461ad683fce34906dc092a378c637e4cb58ad3cd7b14db4bcffa8d6f", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_botocore_pip/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_botocore_pip/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_botocore_pip/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_botocore_pip/tasks/cleanup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f43b9a2bb665a9791c75ed1168e318b4b008bb952a5332ec347fc292f8c23700", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_botocore_pip/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "43f8289616713bc54adab58aa6176e455b78ab8f78a6f77e41d9cd32c817d472", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_facts", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_facts/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_facts/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f919f9a8b41fcd9133993ce449a967c7c6b8dee6ae50c4badd7b952a5d905bc7", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_facts/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_facts/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_facts/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_facts/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7d03d30a328d5758d05cec67692df82d906021b2d9823c6a67e8c3f51cd057d1", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_instance_env", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_instance_env/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_instance_env/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "430ffe568d6b28926de8e55c4a30653f7bd47a55599fb083666ee231d18d98bc", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_instance_env/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_instance_env/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a1bbde72a691b864a705c227489f550fe81e2a825bed7b9a313fbe4d06456bd5", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_instance_env/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_instance_env/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_instance_env/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_instance_env/tasks/cleanup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b543f3eec63c49e802ffe8421ce1e38a1b1e3f7b2cbbf7151572673b62b5bd2e", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_instance_env/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "56a15e0ec3b6e00a7735f50ba42cd296a5dc767c5fdf5010694caf05b3be4d9d", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "050157a29c48915cf220b3cdcf5a032e53e359bdc4a210cd457c4836e8e32a4d", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e273324ab90d72180a971d99b9ab69f08689c8be2e6adb991154fc294cf1056e", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "507b75d7436a7b9364dad3be782e05f1ecea8656f91221e13abb901d80c023ca", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "766ab141899717320ba54e2bb1a6ba8cbc3cc7642d0023670154b49981ed1a91", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3fd85bd6c3cf51c061eb221197d5653e5da0e101543b3c037f5066d6c73b1501", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e29ee6a8db94d6de88c8458762f594f05d906f454f7c9977fd618d52b09e52f0", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_sshkey", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_sshkey/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff0a1e350e24b5323b3d05f802c735e283f734d860a29fdeffa8a4b9b85e87a6", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_sshkey/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_sshkey/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_sshkey/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_sshkey/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "972169dd7d4774a9f05a10e7b7a41046e4ca1c1461fb30dd828c98fec938684d", + "format": 1 + }, + { + "name": "tests/integration/constraints.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "591bc7bcc41461d47b97991b920ce187502c20e877eb412259f6797a1a7388f2", + "format": 1 + }, + { + "name": "tests/integration/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "57068021cd523f4527f6ffb55d7ceb57da12553aaec58aa4f4f276ee3f3239b9", + "format": 1 + }, + { + "name": "tests/integration/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1a5badcc85aa3148f4dc72bce205bbd366621a7101431369102b264bf28f57b4", + "format": 1 + }, + { + "name": "tests/integration/requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5b07d7a319baa2e0f895c919405376ee8ea3c10e0c780430c7128f70519d03ab", + "format": 1 + }, + { + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.10.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a20cbabc70cf2098b78b862d252444c4699d58d9c4b7a71fe66dd3768c75c6af", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.11.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "65f52e9bd0fac32ddbc222d8affa0dd3e2edfc7e8df57eabeca8c35318c094f2", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.12.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "65f52e9bd0fac32ddbc222d8affa0dd3e2edfc7e8df57eabeca8c35318c094f2", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.13.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "65f52e9bd0fac32ddbc222d8affa0dd3e2edfc7e8df57eabeca8c35318c094f2", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.14.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "65f52e9bd0fac32ddbc222d8affa0dd3e2edfc7e8df57eabeca8c35318c094f2", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.15.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a20cbabc70cf2098b78b862d252444c4699d58d9c4b7a71fe66dd3768c75c6af", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.9.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "27f9cf4fe2eb4d2abba961293fbc0a08efd0cc8ec82418020b15cfbbb55bbcfd", + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/compat/builtins.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7163336aa20ba9db9643835a38c25097c8a01d558ca40869b2b4c82af25a009c", + "format": 1 + }, + { + "name": "tests/unit/compat/mock.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99", + "format": 1 + }, + { + "name": "tests/unit/compat/unittest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5401a046e5ce71fa19b6d905abd0f9bdf816c0c635f7bdda6730b3ef06e67096", + "format": 1 + }, + { + "name": "tests/unit/mock", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/mock/loader.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cfe3480f0eae6d3723ee62d01d00a0e9f58fcdc082ea1d8e4836157c56d4fa95", + "format": 1 + }, + { + "name": "tests/unit/mock/path.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c44806a59e879ac95330d058f5ea6177d0db856f6e8d222f2ac70e9df31e5e12", + "format": 1 + }, + { + "name": "tests/unit/mock/procenv.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d53f1c9e04f808df10e62a3eddb460cc8251d03a2f89c0cbd907d09b5c785d9", + "format": 1 + }, + { + "name": "tests/unit/mock/vault_helper.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4535613601c419f7d20f0c21e638dabccf69b4a7fac99d5f6f9b81d1519dafd6", + "format": 1 + }, + { + "name": "tests/unit/mock/yaml_helper.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fada9f3506c951e21c60c2a0e68d3cdf3cadd71c8858b2d14a55c4b778f10983", + "format": 1 + }, + { + "name": "tests/unit/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/module_utils/arn", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/module_utils/arn/test_is_outpost_arn.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22ccd5b436880f23d8232228042506c6c7659eff0c164b3cccedaad930fd0943", + "format": 1 + }, + { + "name": "tests/unit/module_utils/arn/test_parse_aws_arn.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6192a9b029d9a1fa28275dfc835e1641536d1dcb04c57f19df59d28b8599eab4", + "format": 1 + }, + { + "name": "tests/unit/module_utils/botocore", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/module_utils/botocore/test_is_boto3_error_code.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d4dcb8abfb5528b37c98c41b37f927983a3551f2022bd1bae14d85ec61e3941e", + "format": 1 + }, + { + "name": "tests/unit/module_utils/botocore/test_is_boto3_error_message.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae920a358c7da9fd3de67710e1f6584ad10bd07afe57e9709e035c406fc3f50f", + "format": 1 + }, + { + "name": "tests/unit/module_utils/botocore/test_normalize_boto3_result.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e0b0349bd332c9d27ebaaf0ffc57d7a81261f1977447504be91efc04dfdbc2d1", + "format": 1 + }, + { + "name": "tests/unit/module_utils/cloud", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/module_utils/cloud/test_backoff_iterator.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3352926af31d7ebd97611ebadf33db205fb438f8a331ad320d0195f729d919e9", + "format": 1 + }, + { + "name": "tests/unit/module_utils/cloud/test_cloud_retry.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d416cbe19747eaf0cfc886266dca77faee3b54de38ccced909d4bcdf68304fc0", + "format": 1 + }, + { + "name": "tests/unit/module_utils/cloud/test_decorator_generation.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e720f5a7439842830bf10f5150855e6ad4c828c03f3b3fc39c8f7f943a0a0f36", + "format": 1 + }, + { + "name": "tests/unit/module_utils/cloud/test_retries_found.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2cc7676be00869b0ec7e59b1d6a9049eee526d56934a7b9b4e7915ab8814817", + "format": 1 + }, + { + "name": "tests/unit/module_utils/cloud/test_retry_func.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5aecb4bd65801deac7aaf903b9650b761b0710a0a21287701ffa49cac80fc0c3", + "format": 1 + }, + { + "name": "tests/unit/module_utils/elbv2", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/module_utils/elbv2/test_prune.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be06f68dfa3a2fb1bf87bca1b5e735751f3d19f72dc6e4db0b23d2626c286c63", + "format": 1 + }, + { + "name": "tests/unit/module_utils/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/module_utils/modules/ansible_aws_module", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/module_utils/modules/ansible_aws_module/test_fail_json_aws.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f3f9db5ac6c0fcea915097ea5df0d4bc410416d4192022c1c2bee582a558f5f", + "format": 1 + }, + { + "name": "tests/unit/module_utils/modules/ansible_aws_module/test_minimal_versions.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e56057ee64bf569727007832eec9e13548151836610a6d68455151e7b3bc82c7", + "format": 1 + }, + { + "name": "tests/unit/module_utils/modules/ansible_aws_module/test_require_at_least.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b5069924db7c7832eb2d245411119c1c01c468073aff45de2d6ccf063df201c1", + "format": 1 + }, + { + "name": "tests/unit/module_utils/policy", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/module_utils/policy/test_compare_policies.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3fcd97dce4753509b30ee45bec7e592fe0403f282ae67041582d15c53cf0b276", + "format": 1 + }, + { + "name": "tests/unit/module_utils/retries", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/module_utils/retries/test_awsretry.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0cd2fc613469281341abdecae2d76279b40cf020f67615309ec3bacb4a3e0b54", + "format": 1 + }, + { + "name": "tests/unit/module_utils/transformation", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/module_utils/transformation/test_ansible_dict_to_boto3_filter_list.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bd64deae0b645086ad9bec3e9cb5c253a9f8c7733f491735724f3d7327d33067", + "format": 1 + }, + { + "name": "tests/unit/module_utils/transformation/test_map_complex_type.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bfb8255079de8a5cf6810cc265ceb5fb9851d01434605c372bbab113912ebc18", + "format": 1 + }, + { + "name": "tests/unit/module_utils/transformation/test_scrub_none_parameters.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "743ef515ac3ddb8536a0668bfc299ae31b66ede7a4d6ef94441f19303c6b455b", + "format": 1 + }, + { + "name": "tests/unit/module_utils/conftest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3df1489e1c1dd0f14a7a8e61049c7b7e110a26b9c45515a737a66daf20ffb6c7", + "format": 1 + }, + { + "name": "tests/unit/module_utils/test_elbv2.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af7e4cace102c632aa5d13ee94482f138ee99a65210732fc0bda6c0ba5d845ef", + "format": 1 + }, + { + "name": "tests/unit/module_utils/test_iam.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d93ce12baf08621831f8b6c0f662f864e23889523f32de1c1c35c831d797a9f3", + "format": 1 + }, + { + "name": "tests/unit/module_utils/test_rds.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae5b5e1bb654f0d383af5e1ac2ad621cfe4e031740349e0017f2f301619d253b", + "format": 1 + }, + { + "name": "tests/unit/module_utils/test_s3.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6e52ba51bce8f40bf665a8902a31b8dc4e362761c4e82401057c857b2ebd260f", + "format": 1 + }, + { + "name": "tests/unit/module_utils/test_tagging.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d67bbd3ac2595becb3214af74580458a88243c547abc4c5549d1ac2d88a1bcca", + "format": 1 + }, + { + "name": "tests/unit/module_utils/test_tower.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "221091b25e8af1ddf25ffbf2cf2c2221b6cc2921443b1ea3f408dd931d72e19a", + "format": 1 + }, + { + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/inventory", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/inventory/test_aws_ec2.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3adf8db5aea9b24490828f2c551d0f37f7dfae21a43748b45fbb029d03fb804f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/ec2_instance", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/ec2_instance/test_build_run_instance_spec.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "57f37737577c5ed91a0c8040c25eab05a786366ee077724813777e1e40024e49", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f404a585a43bdd587267977793ac6c543b1da3cf27edf074cf2f69c097805311", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/fixtures", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/fixtures/certs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/fixtures/certs/a.pem", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef0266ee8cf74a85694bf3ce1495260913b5ca07189b0891bbfc8d4c25b374ea", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/fixtures/certs/b.pem", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2937cb7102c4d4902b09aada2731c1b0165e331dbfde9990644c4c3ee1544b21", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/fixtures/certs/chain-1.0.cert", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "428e852fcbe67bbdbb2d36fb35bef4b2fb22808587212e19f3225206ceb21c12", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/fixtures/certs/chain-1.1.cert", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0325c21e49992708528ebf66162c18e1e1eb2a0837c6d802b1cf3bde73ec06bc", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/fixtures/certs/chain-1.2.cert", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9e3dfae7a19d402a8de1a2b65fcc49c43ff489946e8ca9e96efa48783e26546", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/fixtures/certs/chain-1.3.cert", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef1018e15bb9fad1e7a4f15aa6191e80042fc7fc08ef4bec3e115d96a9924b98", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/fixtures/certs/chain-1.4.cert", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4e15c84bcf1024f5bb0b2940844fdc4ed97ba90ef7991b513d1659b43a0e7783", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/fixtures/certs/chain-4.cert", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "37fb85917db1cd90b5881c8d3d3a9d51ae7c9b904020d0ffbf0734bcf11bb666", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/fixtures/certs/simple-chain-a.cert", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9e4b01f50b09f45fcb7813e7d262a4e201786f0ecd76b45708abe55911b88fd2", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/fixtures/certs/simple-chain-b.cert", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9e4b01f50b09f45fcb7813e7d262a4e201786f0ecd76b45708abe55911b88fd2", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/fixtures/thezip.zip", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "02a319fb1a6d33b682f555eefb98f2a75b2a3be363e1614c373431b4f30fda7f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.CreateStack_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "adf858c3c40416e8bc9273ea17d578448c6497841cd05ae48616f49d0a44d723", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DeleteStack_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1659b6d17d4004dbeba28d635a752c4601c08c0f99a0d8c10f18487e0a215d8e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "790f9822331226175639d3e8c2645cfca0152f1e0fe24c82ab715e499ea070ac", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_2.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "664f1be3c9bb37661bcccba1de32aa3de0fdb08edcf8b276d726acfecc29baad", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_3.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e8967f3d6bfc91be380066e1a70070f0a33a239c9548b02c44c92ad550741cdc", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_4.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be7d4745aa48792bbb544043808428f39dd75a1dd0f75d928d2e7626d22ed762", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_5.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a016652b1a0138353843f04780fae13e60226a22c4093d200bc07c8a89d75d44", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_6.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0306607b12dcf8490f9e5fdca401eb9bb39e3f5507327f87996b804c825a50c5", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_7.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "49426404c58cc23230c46a32d193591ee51bb270486618bb5f76bf8b1cd63d86", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ffa553afc86b6600a849bdc2baa7fff8a27b94019800ffe85e7edd0ea81ad000", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_2.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b3fcc844d47ebcb9a759b328b8b513245bc2f9e6feded2b42806301d878d7bff", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_3.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0ab32bc29e7af611043dafd9d25ba246951cd826f96baafcae8d27d2432ae1d3", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_4.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "98875bdf3813bbeeb89c537778974a65f6365644de28431a35412753124848fa", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_5.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a628694b44084384a0e2dbe1800797ef96ea12de2c05c65f716f37d26a1a0006", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_6.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "938495a4a09e83ede7e4c3a282cb93b1de0dd10435e4f670c301ab4ab4bc63e6", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_7.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "18a0d86b4fe1d679715ab099b8413d22e6a47ff960c876525ef3dd79e77d18f6", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.CreateStack_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4b83e42429a361b2be7b2524340d20264b43f5b0e4cb44fe5bafc3670e9f9d03", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DeleteStack_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1659b6d17d4004dbeba28d635a752c4601c08c0f99a0d8c10f18487e0a215d8e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bf9e2fefb8c13c2b5040c8b502f6aa799f6da6b69c1a8e48e4e870536222df8b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_2.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f4d8ae33d4fe9f0aaa4c6c744174b1ad849d5881154fc8a5eb32fd8ee07566e0", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_3.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d75b82445e89303b2b4af1ef3161d4e315c6d02014c6df00165a9c526fc9bc56", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_4.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7edd15131c553f2bff19840a66cd2498cf98c4f93bd8164a51ab3eb81a619ba9", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_5.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e6f4a74e04f58505d1132c6981fffc1f24e79cbad86c69883677b3cb1703df5d", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_6.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f57b0469a084bb8891bdd14610b2dba1ef3baaab8436cff1413065e276012db", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_7.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ead6f1c137dfc1628237502c6a955d3338770ff85f1715027f022e7773ed9992", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "41999395cf5a12be8eccc991675c44fe12b20433ed7cc7ca541f568b377c7a33", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_2.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e53c53e08397b8fca0f8e6a69a5254bb092b4f403f0fec0d9bff4352c3cc1192", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_3.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05ac9850aa91e5ed4753d901e9bf0641c08c7be9148b633681cea76c94747fc8", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_4.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "148d631b9e7bf07824a845880565c98a102dd0864a40328320db40f545ee7834", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_5.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9ab3c3b4d2e19ca6f764f2e8288dea1e52157dc1d49a319717bd65a3cc770e1", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_6.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fa726d3ba3ac078180b857893f2c5aec60526a8d60323a8cc06121a4bacdf982", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_7.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3bec7dd62d084a3d115ea7f05a34052625b80d56839022b9ebcee2583053412", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c585064211547fe7d0e560cabf12512ee49ca2bbc8622c3a615333aec1eb3dbb", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_2.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a58899e48f620454d1a1c59a261dec5f527970ae4b68f60a2e7bccef29ab5df8", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStacks_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "15ea45877e966ada21f276359194aea32061bbb5fbf9269782518ff9c94fecc7", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/get_nonexistent_stack", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/get_nonexistent_stack/cloudformation.DescribeStacks_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "362d4ebe6fb6b538c0f74a6326a7697d6d129a77c3bfffedc24a5cac14b20e5a", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/invalid_template_json", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/invalid_template_json/cloudformation.CreateStack_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1cb3d77662d35b7703f65278ffdca78e6eb520e96fb3807d39ea3aa02086c1b7", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.CreateStack_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69ca28e411219d5afe76ad868d648f072fbbd2047223aed04f51c451a901dcc7", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d18b0ee2d7aab11783d7ddca1bd7b822775a2e87286cae87a8bb37a25c8dbd22", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_2.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "450c976b530fffd60e39b84a237375f46fb82bb8d09ec77a38d5ac3b87c59e18", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_3.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc3383f46239477355a56db754bdaa2185283e10cf6e9a7bfeb1813c4488afd3", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_4.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "18bbc5347bdc51e636840dda8020e4fe198d144d2d7bdfb5b800fbbf9521b551", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_5.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b829b3d4436f35e812bdb84da99c1d31a1c9b8be476a4b1ecab3587f7ee0f6e9", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0cfa4607aa88d41fa7229383460169b9fc76c3bba6593f82d320f71c6c866325", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_2.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ce2c0c2869f52248ea296808b1f10cf0ee3491c18784c9270c8bd55087a8250", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_3.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f197d5a0c7cb66d160864e359e2a62c857af76c1d1b0530180ea2e35fdb20efe", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_4.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7163b875588f3e98a210fb5291149ec0f6c83213a533086ad8f37e2f9dfa012f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_5.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a2720e847e9e878deab88eaa919c0e14e97210f581138c6341ff97a85da1b38", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.CreateStack_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2864dab59c7432ad2ae594e121ee581cface7b130025cd88d0cb4aead4215168", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DeleteStack_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c09d7d26c96cb5b734e0198b88b00a13fc0d54d65b444278497c17c0f877fa29", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d1711eba6a7c18f0ed7e00a1602dcd0dde519205fe6afc42446e1f222b9fe48", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_2.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "026ca2db13f88bfb4b469d5cd3c2ad5cf6635305fdbbab11e9d5d1d3330b26c2", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8f4dc01c750d860f317a98f598bf3acd7edfbc970054b2793013dfcad61c82f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_2.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6acc11fdfc1929b45d589d8c77c2f9fae80d48e840d0e9cf630e362d6b288d4a", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.CreateStack_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "985c5ada32ac440bc971b553e75cb8516c52b9e78b50e6750d4d92ab2e4a9634", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DeleteStack_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d1a0160fbde4f68c768aaf73182e2369a95721f2bb2e7ab5e9ee42016747dfa7", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e2832a3c70031ba07c44b0f8b291a04251052c22f764110bf0cd034d406bfbd", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_2.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fe29fabc6f34c58976b23132558a2024af53e655f825cd8e5d1b2f39cc89ddcd", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_3.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dfc55200a0f4d01d94845448b7c67f175cdf56e49df4bf9305525e7ffe543c64", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_1.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91a1e065a4854be515095aba447d6a011bb3bac6f8d5b0e3a9081f74ef873096", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_2.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d677c34f0715af2049abef7a479d1362760a0c089ff741d9ac0beed56849251", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_3.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "afe46889f0ec4537b13694f164343440b1fcb0334c539a5a7ec895d36fcf7953", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/placebo_recordings/.gitkeep", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/conftest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "909818cefd5093894a41494d1e43bd625538f57821375a564c52fe4219960967", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_cloudformation.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f24e08c02194adda4bcc6c939f681cc875914bc10de485063f1c40dfdadf9060", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ec2_ami.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "762878ffaa25c727b5fcd8b90760db542ec5cf002ee282a6be0ff604c5e4bcee", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ec2_key.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ca3e264c626a2c424a3b4a3cfc86c315d62a8591a24f04771efacf9402dc54d2", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ec2_security_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "94f4e13d071974d2c863c95a49a35433d792d90ea366555075bdf06ed64956fe", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ec2_vpc_dhcp_option.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7191e49cc2abf2ae41072bb9ac132127e5834a752f643daf62d5b1b5f1538c53", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_kms_key.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8c44eae044b09869d3e5ea7c16b745a20ee55dee06e62cc641f06c8399756c2d", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_lambda_layer.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fbe92e5cdbbf6d42c7ebc4ff4c32a36cb6dda920b9d7d9bd59ac86de19030b8a", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_lambda_layer_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a67ed0b1e3987a0ab4d8ebd260c9e421293cf566dd824816ecd417b93c80bb21", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_s3_object.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d2a76b990be73994a3b4153d18cec4887b4b2e55031159ec92bc3736d79b7055", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/utils.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b68f9ac9c8f02f1e87b0294a125adb102c718f6e3e5f856ec3401b2b890003cf", + "format": 1 + }, + { + "name": "tests/unit/utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/utils/amazon_placebo_fixtures.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "64958b54e3404669d340a120f6b2c7ae79f323e6c930289514eba4569d1586c1", + "format": 1 + }, + { + "name": "tests/unit/constraints.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "591bc7bcc41461d47b97991b920ce187502c20e877eb412259f6797a1a7388f2", + "format": 1 + }, + { + "name": "tests/unit/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eef01a3e82340df07adca400d1c793de2ee8feab98bbbdeeff594ed73dd4c0fa", + "format": 1 + }, + { + "name": "tests/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "20a8e9084418efa961e370c0800d96034cd78e142a0e0ccb17edba348ada4a5f", + "format": 1 + }, + { + "name": "tests/config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d75ecdecbd76691b04ec2d5fcf9241a4366801e6a1e5db09785453cd429c862", + "format": 1 + }, + { + "name": ".coveragerc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "954b1d6e8841b793b93cb103455cc97a3cedcc6e1dc40c26f2b790b73e8e730b", + "format": 1 + }, + { + "name": ".gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5a00777ca107231dc822535458402764507be2cf2efa433ea184bb2163e07027", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9f325505698f93d86a3d23f8139738d665583291230be8acc51ac88982f7801", + "format": 1 + }, + { + "name": "CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "70d31e3dface97408b2568c3e252e03e2b7cc1fc487e1200975cb2320550c98a", + "format": 1 + }, + { + "name": "COPYING", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0ae0485a5bd37a63e63603596417e4eb0e653334fa6c7f932ca3a0e85d4af227", + "format": 1 + }, + { + "name": "PSF-license.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83b042fc7d6aca0f10d68e45efa56b9bc0a1496608e7e7728fe09d1a534a054a", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "312913ed024a3f5845df674521732eed936d5574036530ceace784752e34e2bc", + "format": 1 + }, + { + "name": "bindep.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87c61ee29c6b14665943e7f7ffc4ce51c3e79e70b209659161b278bca45abb12", + "format": 1 + }, + { + "name": "requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69d234edeaedcedfa2e796dc5f0f9ddabad4bfb3959100d8814a07cedf702c2f", + "format": 1 + }, + { + "name": "test-requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "246aeb6a58b0b9f432898b9965fed8527303c575c94661299678bf42df8a5f3e", + "format": 1 + }, + { + "name": "tox.ini", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7137e2bf64959ff133e1df3727f968635829ad10219ca5cce28f72f90d1b57a9", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/MANIFEST.json b/ansible_collections/amazon/aws/MANIFEST.json new file mode 100644 index 000000000..5870a24cb --- /dev/null +++ b/ansible_collections/amazon/aws/MANIFEST.json @@ -0,0 +1,32 @@ +{ + "collection_info": { + "namespace": "amazon", + "name": "aws", + "version": "5.5.1", + "authors": [ + "Ansible (https://github.com/ansible)" + ], + "readme": "README.md", + "tags": [ + "amazon", + "aws", + "cloud" + ], + "description": null, + "license": [], + "license_file": "COPYING", + "dependencies": {}, + "repository": "https://github.com/ansible-collections/amazon.aws", + "documentation": "https://ansible-collections.github.io/amazon.aws/branch/stable-5/collections/amazon/aws/index.html", + "homepage": "https://github.com/ansible-collections/amazon.aws", + "issues": "https://github.com/ansible-collections/amazon.aws/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b0faf817d09dbc9f1f00f4c34cd47ad9322ac54da584e7b259fd6275a425b57", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/PSF-license.txt b/ansible_collections/amazon/aws/PSF-license.txt new file mode 100644 index 000000000..35acd7fb5 --- /dev/null +++ b/ansible_collections/amazon/aws/PSF-license.txt @@ -0,0 +1,48 @@ +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021 Python Software Foundation; +All Rights Reserved" are retained in Python alone or in any derivative version +prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. diff --git a/ansible_collections/amazon/aws/README.md b/ansible_collections/amazon/aws/README.md new file mode 100644 index 000000000..99373e145 --- /dev/null +++ b/ansible_collections/amazon/aws/README.md @@ -0,0 +1,131 @@ +# Amazon AWS Collection +The Ansible Amazon AWS collection includes a variety of Ansible content to help automate the management of AWS instances. This collection is maintained by the Ansible cloud team. + +AWS related modules and plugins supported by the Ansible community are in the [community.aws](https://github.com/ansible-collections/community.aws/) collection. + +## Ansible version compatibility + +Tested with the Ansible Core 2.12, and 2.13 releases, and the current development version of Ansible. Ansible Core versions before 2.11.0 are not supported. In particular, Ansible Core 2.10 and Ansible 2.9 are not supported. + +Use amazon.aws 4.x.y if you are using Ansible 2.9 or Ansible Core 2.10. + +## Python version compatibility + +This collection depends on the AWS SDK for Python (Boto3 and Botocore). Due to the +[AWS SDK Python Support Policy](https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) +this collection requires Python 3.6 or greater. + +Amazon have also announced the end of support for +[Python less than 3.7](https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/). +As such support for Python less than 3.7 by this collection has been deprecated and will be removed in a release +after 2023-05-31. + +## AWS SDK version compatibility + +Starting with the 2.0.0 releases of amazon.aws and community.aws, it is generally the collection's policy to support the versions of `botocore` and `boto3` that were released 12 months prior to the most recent major collection release, following semantic versioning (for example, 2.0.0, 3.0.0). + +Version 5.0.0 of this collection supports `boto3 >= 1.18.0` and `botocore >= 1.21.0` + +All support for the original AWS SDK `boto` was removed in release 4.0.0. + +## Included content + +See the complete list of collection content in the [Plugin Index](https://ansible-collections.github.io/amazon.aws/branch/stable-5/collections/amazon/aws/index.html#plugin-index). + + + +## Installing this collection + +You can install the AWS collection with the Ansible Galaxy CLI: + + ansible-galaxy collection install amazon.aws + +You can also include it in a `requirements.yml` file and install it with `ansible-galaxy collection install -r requirements.yml`, using the format: + +```yaml +--- +collections: + - name: amazon.aws +``` + +A specific version of the collection can be installed by using the `version` keyword in the `requirements.yml` file: + +```yaml +--- +collections: + - name: amazon.aws + version: 3.1.1 +``` + +The python module dependencies are not installed by `ansible-galaxy`. They can +be manually installed using pip: + + pip install -r requirements.txt + +or: + + pip install boto3 botocore + +## Using this collection + +You can either call modules by their Fully Qualified Collection Name (FQCN), such as `amazon.aws.ec2_instance`, or you can call modules by their short name if you list the `amazon.aws` collection in the playbook's `collections` keyword: + +```yaml +--- + - name: Setup an instance for testing + amazon.aws.ec2_instance: + name: '{{ resource_prefix }}' + instance_type: t2.nano + image_id: "{{ (amis.images | sort(attribute='creation_date') | last).image_id }}" + wait: yes + volumes: + - device_name: /dev/xvda + ebs: + volume_size: 8 + delete_on_termination: true + register: instance +``` + + +### See Also: + +* [Amazon Web Services Guide](https://docs.ansible.com/ansible/latest/scenario_guides/guide_aws.html) +* [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details. + +## Contributing to this collection + +We welcome community contributions to this collection. If you find problems, please open an issue or create a PR against the [Amazon AWS collection repository](https://github.com/ansible-collections/amazon.aws). +See [Contributing to Ansible-maintained collections](https://docs.ansible.com/ansible/devel/community/contributing_maintained_collections.html#contributing-maintained-collections) for more details. + +You can also join us on: + +- Libera.Chat IRC - the ``#ansible-aws`` [irc.libera.chat](https://libera.chat/) channel + +### More information about contributing + +- [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html) - Details on contributing to Ansible +- [Contributing to Collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections) - How to check out collection git repositories correctly +- [Guidelines for Ansible Amazon AWS module development](https://docs.ansible.com/ansible/latest/dev_guide/platforms/aws_guidelines.html) +- [Getting Started With AWS Ansible Module Development and Community Contribution](https://www.ansible.com/blog/getting-started-with-aws-ansible-module-development) + +## Release notes + +See the [rendered changelog](https://ansible-collections.github.io/amazon.aws/branch/stable-5/collections/amazon/aws/docsite/CHANGELOG.html) or the [raw generated changelog](https://github.com/ansible-collections/amazon.aws/tree/stable-5/CHANGELOG.rst). + +## Roadmap + + + +## More information + +- [Ansible Collection overview](https://github.com/ansible-collections/overview) +- [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html) +- [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html) +- [Ansible Collection Developer Guide](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html) +- [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) + +## Licensing + +GNU General Public License v3.0 or later. + +See [COPYING](https://www.gnu.org/licenses/gpl-3.0.txt) to see the full text. diff --git a/ansible_collections/amazon/aws/bindep.txt b/ansible_collections/amazon/aws/bindep.txt new file mode 100644 index 000000000..1f29b25b4 --- /dev/null +++ b/ansible_collections/amazon/aws/bindep.txt @@ -0,0 +1,4 @@ +# Needed by the ec2_key integration tests (generating EC2 format fingerprint) +openssl [test platform:rpm] +gcc [test platform:rpm] +python3-devel [test platform:rpm] diff --git a/ansible_collections/amazon/aws/changelogs/changelog.yaml b/ansible_collections/amazon/aws/changelogs/changelog.yaml new file mode 100644 index 000000000..2daf440bb --- /dev/null +++ b/ansible_collections/amazon/aws/changelogs/changelog.yaml @@ -0,0 +1,1734 @@ +ancestor: null +releases: + 1.1.0: + changes: + breaking_changes: + - aws_s3 - can now delete versioned buckets even when they are not empty - set + mode to delete to delete a versioned bucket and everything in it. + bugfixes: + - aws_ec2 - fix idempotency when managing tags + - aws_ec2 - fix idempotency when metrics are enable + - aws_s3 - Delete objects and delete markers so versioned buckets can be removed. + - aws_s3 - Try to wait for the bucket to exist before setting the access control + list. + - cloudformation_info - Fix a KeyError returning information about the stack(s). + - ec2_asg - Ensure "wait" is honored during replace operations + - ec2_launch_template - Update output to include latest_version and default_version, + matching the documentation + - ec2_transit_gateway - Use AWSRetry before ClientError is handled when describing + transit gateways + - ec2_transit_gateway - fixed issue where auto_attach set to yes was not being + honored (https://github.com/ansible/ansible/issues/61907) + - ec2_vol - fix filtering bug + - s3_bucket - Accept XNotImplemented response to support NetApp StorageGRID. + deprecated_features: + - cloudformation - The ``template_format`` option had no effect since Ansible + 2.3 and will be removed after 2022-06-01 + - cloudformation - the ``template_format`` option has been deprecated and will + be removed in a later release. It has been ignored by the module since Ansible + 2.3. + - data_pipeline - The ``version`` option had no effect and will be removed in + after 2022-06-01 + - ec2 - in a later release, the ``group`` and ``group_id`` options will become + mutually exclusive. Currently ``group_id`` is ignored if you pass both. + - ec2_ami - The ``no_device`` alias ``NoDevice`` has been deprecated and will + be removed after 2022-06-01 + - ec2_ami - The ``virtual_name`` alias ``VirtualName`` has been deprecated and + will be removed after 2022-06-01 + - ec2_eip - The ``wait_timeout`` option had no effect and will be removed after + 2022-06-01 + - ec2_key - The ``wait_timeout`` option had no effect and will be removed after + 2022-06-01 + - ec2_key - The ``wait`` option had no effect and will be removed after 2022-06-01 + - ec2_key - the ``wait_timeout`` option has been deprecated and will be removed + in a later release. It has had no effect since Ansible 2.5. + - ec2_key - the ``wait`` option has been deprecated and will be removed in a + later release. It has had no effect since Ansible 2.5. + - ec2_lc - The ``associate_public_ip_address`` option had no effect and will + be removed after 2022-06-01 + - ec2_tag - deprecate the ``list`` option in favor of ec2_tag_info + - ec2_tag - support for ``list`` as a state has been deprecated and will be + removed in a later release. The ``ec2_tag_info`` can be used to fetch the + tags on an EC2 resource. + major_changes: + - ec2 module_utils - The ``AWSRetry`` decorator no longer catches ``NotFound`` + exceptions by default. ``NotFound`` exceptions need to be explicitly added + using ``catch_extra_error_codes``. Some AWS modules may see an increase in + transient failures due to AWS''s eventual consistency model. + minor_changes: + - Add ``aws_security_token``, ``aws_endpoint_url`` and ``endpoint_url`` aliases + to improve AWS module parameter naming consistency. + - Add support for ``aws_ca_bundle`` to boto3 based AWS modules + - Add support for configuring boto3 profiles using ``AWS_PROFILE`` and ``AWS_DEFAULT_PROFILE`` + - Added check_mode support to aws_az_info + - Added check_mode support to ec2_eni_info + - Added check_mode support to ec2_snapshot_info + - ansible_dict_to_boto3_filter_list - convert integers and bools to strings + before using them in filters. + - aws_direct_connect_virtual_interface - add direct_connect_gateway_id parameter. + This field is only applicable in private VIF cases (public=False) and is mutually + exclusive to virtual_gateway_id. + - cloudformation - Return change_set_id in the cloudformation output if a change + set was created. + - ec2 - deprecate allowing both group and group_id - currently we ignore group_id + if both are passed. + - ec2_ami_info - allow integer and bool values for filtering images (https://github.com/ansible/ansible/issues/43570). + - ec2_asg - Add support for Max Instance Lifetime + - ec2_asg - Add the ability to use mixed_instance_policy in launch template + driven autoscaling groups + - ec2_asg - Migrated to AnsibleAWSModule + - ec2_placement_group - make ``name`` a required field. + - ec2_vol_info - Code cleanup and use of the AWSRetry decorator to improve stability + - ec2_vpc_net - Enable IPv6 CIDR assignment + fragments: + - 107_info_check_mode.yml + - 108-ec2_vol-deprecate-list.yml + - 27800-ec2_vpc_net-ipv6-support.yml + - 28-ec2_ami_info_int_bool_filter.yml + - 52_direct_connect_gateway_id.yml + - 54435_aws_s3_fix_removing_versioned_buckets.yaml + - 61279-ec2_launch_template-output.yml + - 61284-ec2_asg-idempotency.yml + - 61735-wait-for-s3-bucket-to-exist-before-modifying.yaml + - 61933-ec2_transit_gateway-honor-auto_attach-setting.yaml + - 62290-fix-cloudformation_info-KeyError.yaml + - 63752-cloudformation-return-changeset-id.yaml + - 64230-deprecate-unused.yml + - 64368-deprecate-unused.yml + - 65555-amazon-sanity-required.yml + - 65960-ec2_vol-filtering-bugfix.yml + - 66840-ec2_tag-deprecate-list.yaml + - 66863-ec2_asg-max_instance_lifetime-and-honor-wait-on-replace.yaml + - 66966-ec2-group-and-group_id.yml + - 66979-ec2_vol_info-ansibleawsmodule.yaml + - 67045-ec2_asg_mixed_instance_policy.yml + - 67247-fix-ec2_transit_gateway-retries.yaml + - 67462-s3_bucket-accept-storagegrid-response.yaml + - 93-deprecate-accidental.yml + - 99-awsmodule.yml + - porting-guide.yml + release_date: '2020-08-13' + 1.2.0: + changes: + bugfixes: + - ec2 module_utils - Ensure boto3 verify parameter isn't overridden by setting + a profile (https://github.com/ansible-collections/amazon.aws/issues/129) + - 's3_bucket - Ceph compatibility: treat error code NoSuchTagSetError used by + Ceph synonymously to NoSuchTagSet used by AWS' + deprecated_features: + - All AWS Modules - ``aws_access_key``, ``aws_secret_key`` and ``security_token`` + will be made mutually exclusive with ``profile`` after 2022-06-01. + minor_changes: + - ec2 module_utils - Update ``ec2_connect`` (boto2) behaviour so that ``ec2_url`` + overrides ``region``. + - module_utils.core - Support passing arbitrary extra keys to fail_json_aws, + matching capabilities of fail_json. + fragments: + - 121-ec2_url-resolution-order.yaml + - 129-verify_overridden.yml + - 140-fail_json_aws_keys.yml + - 151-deprecate-profile-credential-combination.yml + - 71484-ceph-tag-set-compat.yaml + release_date: '2020-08-28' + 1.2.1: + changes: + minor_changes: + - ec2_eni - Add support for tagging. + - ec2_eni - Port ec2_eni module to boto3 and add an integration test suite. + - ec2_eni_info - Add retries on transient AWS failures. + - ec2_eni_info - Add support for providing an ENI ID. + fragments: + - 141-ec2_eni-boto3.yml + release_date: '2020-10-07' + 1.3.0: + changes: + bugfixes: + - ec2 - Code fix so module can create ec2 instances with ``ec2_volume_iops`` + option (https://github.com/ansible-collections/amazon.aws/pull/177). + - ec2 - ignore terminated instances and instances that are shutting down when + starting and stopping (https://github.com/ansible-collections/amazon.aws/issues/146). + - ec2_group - Fixes error handling during tagging failures (https://github.com/ansible-collections/amazon.aws/issues/210). + - ec2_group_info - Code fix so module works with Python 3.8 (make dict immutable + in loop) (https://github.com/ansible-collections/amazon.aws/pull/181) + minor_changes: + - aws_caller_info - add AWSRetry decorator to automatically retry on common + temporary failures (https://github.com/ansible-collections/amazon.aws/pull/208) + - aws_s3 - Add support for uploading templated content (https://github.com/ansible-collections/amazon.aws/pull/20). + - aws_secret - add "on_missing" and "on_denied" option (https://github.com/ansible-collections/amazon.aws/pull/122). + - ec2_ami - Add retries for ratelimiting related errors (https://github.com/ansible-collections/amazon.aws/pull/195). + - ec2_ami - fixed and streamlined ``max_attempts`` logic when waiting for AMI + creation to finish (https://github.com/ansible-collections/amazon.aws/pull/194). + - ec2_ami - increased default ``wait_timeout`` to 1200 seconds (https://github.com/ansible-collections/amazon.aws/pull/194). + - ec2_ami_info - Add retries for ratelimiting related errors (https://github.com/ansible-collections/amazon.aws/pull/195). + - ec2_eni - Improve reliability of the module by adding waiters and performing + lookups by ENI ID rather than repeated searches (https://github.com/ansible-collections/amazon.aws/pull/180). + - ec2_eni_info - Improve reliability of the module by adding waiters and performing + lookups by ENI ID rather than repeated searches (https://github.com/ansible-collections/amazon.aws/pull/180). + - ec2_group - add AWSRetry decorator to automatically retry on common temporary + failures (https://github.com/ansible-collections/amazon.aws/pull/207) + - ec2_group_info - add AWSRetry decorator to automatically retry on common temporary + failures (https://github.com/ansible-collections/amazon.aws/pull/207) + - ec2_snapshot_info - add AWSRetry decorator to automatically retry on common + temporary failures (https://github.com/ansible-collections/amazon.aws/pull/208) + - ec2_vol - Add automatic retries on AWS rate limit errors (https://github.com/ansible-collections/amazon.aws/pull/199). + - ec2_vol - ported ec2_vol to use boto3 (https://github.com/ansible-collections/amazon.aws/pull/53). + - ec2_vpc_dhcp_option_info - add AWSRetry decorator to automatically retry on + common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/208) + - module_utils/core - add helper function ``scrub_none_parameters`` to remove + params set to ``None`` (https://github.com/ansible-collections/community.aws/issues/251). + - module_utils/waiters - Add retries to our waiters for the same failure codes + that we retry with AWSRetry (https://github.com/ansible-collections/amazon.aws/pull/185) + - s3_bucket - Add support for managing the ``public_access`` settings (https://github.com/ansible-collections/amazon.aws/pull/171). + fragments: + - 122-aws_secret-add-on_missing-and-on_denied-option.yml + - 171-s3_bucket-public_access.yml + - 177-fix-ec2-volume-creation-issue-with-iops.yaml + - 180-ec2_eni-stabilisation.yml + - 181-ec2-group-info-python-fix.yaml + - 184-scrub-none-params.yaml + - 185-waiter-retry-failures.yml + - 194-ec2-ami-max-attempts.yaml + - 195-ec2_ami-retries.yml + - 197-ignore-terminated-instances.yaml + - 199-ec2_vol-retries.yml + - 20-aws_s3-content.yml + - 207-ec2_group-retries.yml + - 208-info-retries.yaml + - 211-fix-error-handling-during-tagging-failure.yaml + - 53-ec2_vol-boto3-port.yml + release_date: '2020-12-10' + 1.4.0: + changes: + bugfixes: + - ec2_vol - a creation or update now returns a structure with an up to date + list of tags (https://github.com/ansible-collections/amazon.aws/pull/241). + minor_changes: + - aws_ec2 - Add hostname options concatenation + - aws_ec2 inventory plugin - avoid a superfluous import of ``ansible.utils.display.Display`` + (https://github.com/ansible-collections/amazon.aws/pull/226). + - aws_ec2 module - Replace inverse aws instance-state-name filters !terminated, + !shutting-down in favor of postive filters pending, running, stopping, stopped. + Issue 235. (https://github.com/ansible-collections/amazon.aws/pull/237) + - aws_secret - add ``bypath`` functionality (https://github.com/ansible-collections/amazon.aws/pull/192). + - ec2_key - add AWSRetry decorator to automatically retry on common temporary + failures (https://github.com/ansible-collections/amazon.aws/pull/213). + - ec2_vol - Add support for gp3 volumes and support for modifying existing volumes + (https://github.com/ansible-collections/amazon.aws/issues/55). + - module_utils/elbv2 - add logic to compare_rules to suit Values list nested + within dicts unique to each field type. Fixes issue (https://github.com/ansible-collections/amazon.aws/issues/187) + - various AWS plugins and module_utils - Cleanup unused imports (https://github.com/ansible-collections/amazon.aws/pull/217). + fragments: + - 188-httprequestmethodconfig-keyerror.yaml + - 192-aws_secret-bypath-option.yaml + - 213-ec2_key-retries.yml + - 215-gp3-and-change-support-for-ec2_vol.yaml + - 217-duplicate-imports.yml + - 226_avoid_extra_Display_import.yaml + - 237_replace_inverse_ec2_aws_filter.yaml + - 241_ec2_vol-returns-an-up-to-date-tag-dict-of-the-volume.yaml + - 25-aws_ec2-hostname-options-concatenation.yaml + release_date: '2021-02-05' + 1.4.1: + changes: + minor_changes: + - module_utils - the ipaddress module utility has been vendored into this collection. This + eliminates the collection dependency on ansible.netcommon (which had removed + the library in its 2.0 release). The ipaddress library is provided for internal + use in this collection only. (https://github.com/ansible-collections/amazon.aws/issues/273)- + fragments: + - 273-vendor-ipaddress-utility.yml + release_date: '2021-03-05' + 1.5.0: + changes: + bugfixes: + - ec2_vol - create or update now preserves the existing tags, including Name + (https://github.com/ansible-collections/amazon.aws/issues/229) + - ec2_vol - fix exception when platform information isn't available (https://github.com/ansible-collections/amazon.aws/issues/305). + minor_changes: + - AWS inventory plugins - use shared HAS_BOTO3 helper rather than copying code + (https://github.com/ansible-collections/amazon.aws/pull/288). + - AWS lookup plugins - use shared HAS_BOTO3 helper rather than copying code + (https://github.com/ansible-collections/amazon.aws/pull/288). + - aws_account_attribute - add retries on common AWS failures (https://github.com/ansible-collections/amazon.aws/pull/295). + - aws_ec2 inventory - expose a new configuration key ``use_contrib_script_compatible_ec2_tag_keys`` + to reproduce a behavior of the old ``ec2.py`` inventory script. With this + option enabled, each tag is exposed using a ``ec2_tag_TAGNAME`` key (https://github.com/ansible-collections/amazon.aws/pull/331). + - aws_ec2 inventory - expose to new keys called ``include_filters`` and ``exclude_filters`` + to give the user the ability to compose an inventory with multiple queries + (https://github.com/ansible-collections/amazon.aws/pull/328). + - aws_ec2 inventory plugin - Added support for using Jinja2 templates in the + authentication fields (https://github.com/ansible-collections/amazon.aws/pull/57). + - cloudformation - added support for StackPolicyDuringUpdateBody (https://github.com/ansible-collections/amazon.aws/pull/155). + - ec2_metadata_facts - add support for IMDSv2 (https://github.com/ansible-collections/amazon.aws/pull/43). + - ec2_snapshot_info - add the ``max_results`` along with ``next_token_id`` option + (https://github.com/ansible-collections/amazon.aws/pull/321). + - ec2_tag - use common code for tagging resources (https://github.com/ansible-collections/amazon.aws/pull/309). + - ec2_tag_info - use common code for tagging resources (https://github.com/ansible-collections/amazon.aws/pull/309). + - ec2_vol - add the ``purge_tags`` option (https://github.com/ansible-collections/amazon.aws/pull/242). + - ec2_vol - use common code for tagging resources (https://github.com/ansible-collections/amazon.aws/pull/309). + - ec2_vpc_net - use a custom waiter which can handle API rate limiting (https://github.com/ansible-collections/amazon.aws/pull/270). + - ec2_vpc_subnet - use AWSRetry decorator to more consistently handle API rate + limiting (https://github.com/ansible-collections/amazon.aws/pull/270). + - ec2_vpc_subnet - use common code for tagging resources (https://github.com/ansible-collections/amazon.aws/pull/309). + - module_utils.cloudfront_facts - linting cleanup (https://github.com/ansible-collections/amazon.aws/pull/291). + - module_utils.ec2 - linting cleanup (https://github.com/ansible-collections/amazon.aws/pull/291). + - module_utils/core - add a helper function ``normalize_boto3_result`` (https://github.com/ansible-collections/amazon.aws/pull/271). + - module_utils/core - add parameter ``descend_into_lists`` to ``scrub_none_parameters`` + helper function (https://github.com/ansible-collections/amazon.aws/pull/262). + - module_utils/ec2 - added additional helper functions for tagging EC2 resources + (https://github.com/ansible-collections/amazon.aws/pull/309). + - sanity tests - add ignore.txt for 2.12 (https://github.com/ansible-collections/amazon.aws/pull/315). + fragments: + - 155-support-for-StackPolicyDuringUpdateBody.yaml + - 241_ec2_vol-add_purge_tags_parameter.yaml + - 242_ec2_vol-preset-the-name-tag-of-the-volume.yaml + - 261-scrub-params-descend-into-lists.yml + - 270-vpc-waiters.yaml + - 271-normalize_boto3_result.yml + - 288-has_boto3.yml + - 291-lint-cleanup.yml + - 295-aws_account_attribute-awsretry.yml + - 305-ec2_vol-no-platform.yml + - 309-ec2_tags.yml + - 321-ec2_snapshot_info-add_max_results_and_next_token_parameters.yaml + - 328_aws_ec2_inventory_includes_entries_matching.yaml + - 331_aws_ec2_inventory_use_contrib_script_compatible_ec2_tag_keys.yaml + - 43-ec2_metadata_facts-IMDSv2.yml + - 57-aws_ec2-support-for-templates.yml + - ignore_212.yml + release_date: '2021-04-27' + 2.0.0: + changes: + breaking_changes: + - 'ec2_instance - instance wait for state behaviour has changed. If plays require + the old behavior of waiting for the instance monitoring status to become ``OK`` + when launching a new instance, the action will need to specify ``state: started`` + (https://github.com/ansible-collections/amazon.aws/pull/481).' + - ec2_snapshot - support for waiting indefinitely has been dropped, new default + is 10 minutes (https://github.com/ansible-collections/amazon.aws/pull/356). + - ec2_vol_info - return ``attachment_set`` is now a list of attachments with + Multi-Attach support on disk. (https://github.com/ansible-collections/amazon.aws/pull/362). + - ec2_vpc_dhcp_option - The module has been refactored to use boto3. Keys and + value types returned by the module are now consistent, which is a change from + the previous behaviour. A ``purge_tags`` option has been added, which defaults + to ``True``. (https://github.com/ansible-collections/amazon.aws/pull/252) + - ec2_vpc_dhcp_option_info - Now preserves case for tag keys in return value. + (https://github.com/ansible-collections/amazon.aws/pull/252) + - module_utils.core - The boto3 switch has been removed from the region parameter + (https://github.com/ansible-collections/amazon.aws/pull/287). + - module_utils/compat - vendored copy of ipaddress removed (https://github.com/ansible-collections/amazon.aws/pull/461). + - module_utils/core - updated the ``scrub_none_parameters`` function so that + ``descend_into_lists`` is set to ``True`` by default (https://github.com/ansible-collections/amazon.aws/pull/297). + bugfixes: + - aws_s3 - Fix upload permission when an S3 bucket ACL policy requires a particular + canned ACL (https://github.com/ansible-collections/amazon.aws/pull/318) + - ec2_ami - Fix ami issue when creating an ami with no_device parameter (https://github.com/ansible-collections/amazon.aws/pull/386) + - ec2_instance - ``ec2_instance`` was waiting on EC2 instance monitoring status + to be ``OK`` when launching a new instance. This could cause a play to wait + multiple minutes for AWS's monitoring to complete status checks (https://github.com/ansible-collections/amazon.aws/pull/481). + - ec2_snapshot - Fix snapshot issue when capturing a snapshot of a volume without + tags (https://github.com/ansible-collections/amazon.aws/pull/383) + - ec2_vol - Fixes ``changed`` status when ``modify_volume`` is used, but no + new disk is being attached. The module incorrectly reported that no change + had occurred even when disks had been modified (iops, throughput, type, etc.). + (https://github.com/ansible-collections/amazon.aws/issues/482). + - ec2_vol - fix iops setting and enforce iops/throughput parameters usage (https://github.com/ansible-collections/amazon.aws/pull/334) + - inventory - ``include_filters`` won't be ignored anymore if ``filters`` is + not set (https://github.com/ansible-collections/amazon.aws/issues/457). + - s3_bucket - Fix error handling when attempting to set a feature that is not + implemented (https://github.com/ansible-collections/amazon.aws/pull/391). + - s3_bucket - Gracefully handle ``NotImplemented`` exceptions when fetching + encryption settings (https://github.com/ansible-collections/amazon.aws/issues/390). + deprecated_features: + - ec2 - the boto based ``ec2`` module has been deprecated in favour of the boto3 + based ``ec2_instance`` module. The ``ec2`` module will be removed in release + 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/424). + - ec2_vpc_dhcp_option - The ``new_config`` return key has been deprecated and + will be removed in a future release. It will be replaced by ``dhcp_config``. Both + values are returned in the interim. (https://github.com/ansible-collections/amazon.aws/pull/252) + major_changes: + - amazon.aws collection - Due to the AWS SDKs announcing the end of support + for Python less than 3.6 (https://boto3.amazonaws.com/v1/documentation/api/1.17.64/guide/migrationpy3.html) + this collection now requires Python 3.6+ (https://github.com/ansible-collections/amazon.aws/pull/298). + - amazon.aws collection - The amazon.aws collection has dropped support for + ``botocore<1.18.0`` and ``boto3<1.15.0``. Most modules will continue to work + with older versions of the AWS SDK, however compatibility with older versions + of the SDK is not guaranteed and will not be tested. When using older versions + of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/502). + - ec2_instance - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.ec2_instance``. + - ec2_instance_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.ec2_instance_info``. + - ec2_vpc_endpoint - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.ec2_vpc_endpoint``. + - ec2_vpc_endpoint_facts - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.ec2_vpc_endpoint_info``. + - ec2_vpc_endpoint_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.ec2_vpc_endpoint_info``. + - ec2_vpc_endpoint_service_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.ec2_vpc_endpoint_service_info``. + - ec2_vpc_igw - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.ec2_vpc_igw``. + - ec2_vpc_igw_facts - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.ec2_vpc_igw_facts``. + - ec2_vpc_igw_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.ec2_vpc_igw_info``. + - ec2_vpc_nat_gateway - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.ec2_vpc_nat_gateway``. + - ec2_vpc_nat_gateway_facts - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.ec2_vpc_nat_gateway_info``. + - ec2_vpc_nat_gateway_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.ec2_vpc_nat_gateway_info``. + - ec2_vpc_route_table - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.ec2_vpc_route_table``. + - ec2_vpc_route_table_facts - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.ec2_vpc_route_table_facts``. + - ec2_vpc_route_table_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.ec2_vpc_route_table_info``. + minor_changes: + - aws_ec2 - use a generator rather than list comprehension (https://github.com/ansible-collections/amazon.aws/pull/465). + - aws_s3 - Tests for compatibility with older versions of the AWS SDKs have + been removed (https://github.com/ansible-collections/amazon.aws/pull/442). + - aws_s3 - Tests for compatibility with older versions of the AWS SDKs have + been removed (https://github.com/ansible-collections/amazon.aws/pull/442). + - aws_s3 - add ``tags`` and ``purge_tags`` features for an S3 object (https://github.com/ansible-collections/amazon.aws/pull/335) + - aws_s3 - new mode to copy existing on another bucket (https://github.com/ansible-collections/amazon.aws/pull/359). + - aws_secret - added support for gracefully handling deleted secrets (https://github.com/ansible-collections/amazon.aws/pull/455). + - aws_ssm - add "on_missing" and "on_denied" option (https://github.com/ansible-collections/amazon.aws/pull/370). + - cloudformation - Tests for compatibility with older versions of the AWS SDKs + have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). + - cloudformation - Tests for compatibility with older versions of the AWS SDKs + have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). + - ec2_ami - ensure tags are propagated to the snapshot(s) when creating an AMI + (https://github.com/ansible-collections/amazon.aws/pull/437). + - ec2_eni - fix idempotency when ``security_groups`` attribute is specified + (https://github.com/ansible-collections/amazon.aws/pull/337). + - ec2_eni - timeout increased when waiting for ENIs to finish detaching (https://github.com/ansible-collections/amazon.aws/pull/501). + - ec2_group - Tests for compatibility with older versions of the AWS SDKs have + been removed (https://github.com/ansible-collections/amazon.aws/pull/442). + - ec2_group - Tests for compatibility with older versions of the AWS SDKs have + been removed (https://github.com/ansible-collections/amazon.aws/pull/442). + - ec2_group - use a generator rather than list comprehension (https://github.com/ansible-collections/amazon.aws/pull/465). + - ec2_group - use system ipaddress module, available with Python >= 3.3, instead + of vendored copy (https://github.com/ansible-collections/amazon.aws/pull/461). + - ec2_instance - Tests for compatibility with older versions of the AWS SDKs + have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). + - ec2_instance - Tests for compatibility with older versions of the AWS SDKs + have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). + - ec2_instance - add ``throughput`` parameter for gp3 volume types (https://github.com/ansible-collections/amazon.aws/pull/433). + - ec2_instance - add support for controlling metadata options (https://github.com/ansible-collections/amazon.aws/pull/414). + - ec2_instance - remove unnecessary raise when exiting with a failure (https://github.com/ansible-collections/amazon.aws/pull/460). + - ec2_instance_info - Tests for compatibility with older versions of the AWS + SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). + - ec2_instance_info - Tests for compatibility with older versions of the AWS + SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). + - ec2_snapshot - migrated to use the boto3 python library (https://github.com/ansible-collections/amazon.aws/pull/356). + - ec2_spot_instance_info - Added a new module that describes the specified Spot + Instance requests (https://github.com/ansible-collections/amazon.aws/pull/487). + - ec2_vol - add parameter ``multi_attach`` to support Multi-Attach on volume + creation/update (https://github.com/ansible-collections/amazon.aws/pull/362). + - ec2_vol - relax the boto3/botocore requirements and only require botocore + 1.19.27 for modifying the ``throughput`` parameter (https://github.com/ansible-collections/amazon.aws/pull/346). + - ec2_vpc_dhcp_option - Now also returns a boto3-style resource description + in the ``dhcp_options`` result key. This includes any tags for the ``dhcp_options_id`` + and has the same format as the current return value of ``ec2_vpc_dhcp_option_info``. + (https://github.com/ansible-collections/amazon.aws/pull/252) + - ec2_vpc_dhcp_option_info - Now also returns a user-friendly ``dhcp_config`` + key that matches the historical ``new_config`` key from ec2_vpc_dhcp_option, + and alleviates the need to use ``items2dict(key_name='key', value_name='values')`` + when parsing the output of the module. (https://github.com/ansible-collections/amazon.aws/pull/252) + - ec2_vpc_subnet - Tests for compatibility with older versions of the AWS SDKs + have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). + - ec2_vpc_subnet - Tests for compatibility with older versions of the AWS SDKs + have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). + - integration tests - remove dependency with collection ``community.general`` + (https://github.com/ansible-collections/amazon.aws/pull/361). + - module_utils/waiter - add RDS cluster ``cluster_available`` waiter (https://github.com/ansible-collections/amazon.aws/pull/464). + - module_utils/waiter - add RDS cluster ``cluster_deleted`` waiter (https://github.com/ansible-collections/amazon.aws/pull/464). + - module_utils/waiter - add Route53 ``resource_record_sets_changed`` waiter + (https://github.com/ansible-collections/amazon.aws/pull/350). + - s3_bucket - Tests for compatibility with older versions of the AWS SDKs have + been removed (https://github.com/ansible-collections/amazon.aws/pull/442). + - s3_bucket - Tests for compatibility with older versions of the AWS SDKs have + been removed (https://github.com/ansible-collections/amazon.aws/pull/442). + - s3_bucket - add new option ``object_ownership`` to configure object ownership + (https://github.com/ansible-collections/amazon.aws/pull/311) + - s3_bucket - updated to use HeadBucket instead of ListBucket when testing for + bucket existence (https://github.com/ansible-collections/amazon.aws/pull/357). + fragments: + - 252_boto3_refactor_ec2_vpc_dhcp_option.yaml + - 290-lint-cleanup.yml + - 297-scrub_none_parameters-descend-default.yml + - 298-python3.6.yml + - 311-s3_bucket-allow-object-ownership-configuration.yaml + - 318-s3-upload-acl.yml + - 334-ec2_vol-iops-and-throughput-issues.yaml + - 335-aws_s3-tagging-object-feature.yaml + - 337-ec2_eni-fix-idempotency-security-groups.yml + - 346-ec2_vol-boto3-requirements.yml + - 350-route53-waiter.yml + - 356-ec2_snapshot-boto3-migration.yml + - 357-s3_bucket-use-head.yml + - 359-aws_s3-add-copy-mode.yml + - 361-drop-community.general-support-for-integration.tests.yml + - 362-ec2_vol-add-multi-attach-parameter.yml + - 370-aws_ssm-add-on_missing-and-on_denied-option.yml + - 383_ec2_snapshot_tags.yml + - 386_ec2_ami_no_device.yml + - 391-s3_bucket-enc_notimplemented.yml + - 414-ec2_instance-support-controlling-metadata-options.yml + - 424-deprecate-ec2.yml + - 433-ec2_instance-throughput.yml + - 437-ec2_ami-propagate-tags-to-snapshot.yml + - 442-boto3-minimums.yml + - 442-boto3-minimums.yml + - 455-lookup_aws_secret-deleted.yml + - 460-pylint.yml + - 461-ipaddress.yml + - 464-rds_cluster-waiter.yml + - 465-pylint.yml + - 481-ec2_instance-wait_sanity.yml + - 483-ec2_vol_fix_returned_changed_var.yml + - 487-ec2_spot_instance_info-add-new-module.yml + - 501-ec2_eni-timeout.yml + - include_filters_with_filter.yaml + - migrate_ec2_instance.yml + - migrate_ec2_vpc_endpoint.yml + - migrate_ec2_vpc_igw.yml + - migrate_ec2_vpc_nat_gateway.yml + - migrate_ec2_vpc_route_table.yml + modules: + - description: request, stop, reboot or cancel spot instance + name: ec2_spot_instance + namespace: '' + - description: Gather information about ec2 spot instance requests + name: ec2_spot_instance_info + namespace: '' + release_date: '2021-09-03' + 2.1.0: + changes: + bugfixes: + - AWS action group - added missing ``ec2_instance_facts`` entry (https://github.com/ansible-collections/amazon.aws/issues/557) + - ec2_ami - fix problem when creating an AMI from an instance with ephemeral + volumes (https://github.com/ansible-collections/amazon.aws/issues/511). + - ec2_instance - ensure that ec2_instance falls back to the tag(Name) parameter + when no filter and no name parameter is passed (https://github.com/ansible-collections/amazon.aws/issues/526). + - s3_bucket - update error handling to better support DigitalOcean Space (https://github.com/ansible-collections/amazon.aws/issues/508). + deprecated_features: + - ec2_classic_lb - setting of the ``ec2_elb`` fact has been deprecated and will + be removed in release 4.0.0 of the collection. The module now returns ``elb`` + which can be accessed using the register keyword (https://github.com/ansible-collections/amazon.aws/pull/552). + minor_changes: + - aws_service_ip_ranges - add new option ``ipv6_prefixes`` to get only IPV6 + addresses and prefixes for Amazon services (https://github.com/ansible-collections/amazon.aws/pull/430) + - cloudformation - fix detection when there are no changes. Sometimes when there + are no changes, the change set will have a status FAILED with StatusReason + No updates are to be performed (https://github.com/ansible-collections/amazon.aws/pull/507). + - ec2_ami - add check_mode support (https://github.com/ansible-collections/amazon.aws/pull/516). + - ec2_ami - use module_util helper for tagging AMIs (https://github.com/ansible-collections/amazon.aws/pull/520). + - ec2_ami - when creating an AMI from an instance pass the tagging options at + creation time (https://github.com/ansible-collections/amazon.aws/pull/551). + - ec2_elb_lb - module renamed to ``elb_classic_lb`` (https://github.com/ansible-collections/amazon.aws/pull/377). + - ec2_eni - add check mode support (https://github.com/ansible-collections/amazon.aws/pull/534). + - ec2_eni - use module_util helper for tagging ENIs (https://github.com/ansible-collections/amazon.aws/pull/522). + - ec2_instance - use module_util helpers for tagging (https://github.com/ansible-collections/amazon.aws/pull/527). + - ec2_key - add support for tagging key pairs (https://github.com/ansible-collections/amazon.aws/pull/548). + - ec2_snapshot - add check_mode support (https://github.com/ansible-collections/amazon.aws/pull/512). + - ec2_vol - add check_mode support (https://github.com/ansible-collections/amazon.aws/pull/509). + - ec2_vpc_dhcp_option - use module_util helpers for tagging (https://github.com/ansible-collections/amazon.aws/pull/531). + - ec2_vpc_endpoint - added ``vpc_endpoint_security_groups`` parameter to support + defining the security group attached to an interface endpoint (https://github.com/ansible-collections/amazon.aws/pull/544). + - ec2_vpc_endpoint - added ``vpc_endpoint_subnets`` parameter to support defining + the subnet attached to an interface or gateway endpoint (https://github.com/ansible-collections/amazon.aws/pull/544). + - ec2_vpc_endpoint - use module_util helper for tagging (https://github.com/ansible-collections/amazon.aws/pull/525). + - ec2_vpc_endpoint - use module_util helpers for tagging (https://github.com/ansible-collections/amazon.aws/pull/531). + - ec2_vpc_igw - use module_util helper for tagging (https://github.com/ansible-collections/amazon.aws/pull/523). + - ec2_vpc_igw - use module_util helpers for tagging (https://github.com/ansible-collections/amazon.aws/pull/531). + - ec2_vpc_nat_gateway - use module_util helper for tagging (https://github.com/ansible-collections/amazon.aws/pull/524). + - ec2_vpc_nat_gateway - use module_util helpers for tagging (https://github.com/ansible-collections/amazon.aws/pull/531). + - elb_classic_lb - added retries on common AWS temporary API failures (https://github.com/ansible-collections/amazon.aws/pull/377). + - elb_classic_lb - added support for check_mode (https://github.com/ansible-collections/amazon.aws/pull/377). + - elb_classic_lb - added support for wait during creation (https://github.com/ansible-collections/amazon.aws/pull/377). + - elb_classic_lb - added support for wait during instance addition and removal + (https://github.com/ansible-collections/amazon.aws/pull/377). + - elb_classic_lb - migrated to boto3 SDK (https://github.com/ansible-collections/amazon.aws/pull/377). + - elb_classic_lb - various error messages changed due to refactor (https://github.com/ansible-collections/amazon.aws/pull/377). + - module_utils.ec2 - moved generic tagging helpers into module_utils.tagging + (https://github.com/ansible-collections/amazon.aws/pull/527). + - module_utils.tagging - add new helper to generate TagSpecification lists (https://github.com/ansible-collections/amazon.aws/pull/527). + fragments: + - 377-ec2_elb_lb-boto3.yml + - 430-add_support_for_ipv6_addresses.yml + - 507-fix_cloudformation_changeset_detection.yml + - 508-s3_bucket-digital_ocean.yml + - 509-ec2_vol_add_check_mode_support.yml + - 512-ec2_snapshot_add_check_mode_support.yml.yml + - 516-ec2_ami_add_check_mode_support.yml + - 520-ec2_ami-tagging.yml + - 522-ec2_eni-tagging.yml + - 523-ec2_vpc_igw-tagging.yml + - 524-ec2_vpc_nat_gateway-tagging.yml + - 525-ec2_vpc_endpoint-tagging.yml + - 526-ec2_instance_search_tags.yml + - 527-ec2_instance-tagging.yml + - 531-use_tags_handlers.yml + - 534-ec2_eni_add_check_mode_support.yml + - 544-vpc-endpoint-add-subnets-sg-option.yml + - 548-ec2_key-tagging.yml + - 551-ec2_ami-tag-on-create.yml + - 552-elb_classic_lb-fact.yml + - 557-action_group-missing-entry.yml + release_date: '2021-11-11' + 2.2.0: + changes: + bugfixes: + - aws_ec2 inventory - use the iam_role_arn configuration parameter to assume + the role before trying to call DescribeRegions if the regions configuration + is not set and AWS credentials provided without enough privilege to perform + the DescribeRegions action. (https://github.com/ansible-collections/amazon.aws/issues/566). + - ec2_vol - Sets the Iops value in req_obj even if the iops value has not changed, + to allow modifying volume types that require passing an iops value to boto. + (https://github.com/ansible-collections/amazon.aws/pull/606) + - ec2_vol - changing a volume from a type that does not support IOPS (like ``standard``) + to a type that does (like ``gp3``) fails (https://github.com/ansible-collections/amazon.aws/issues/626). + - ec2_vpc_igw - fix 'NoneType' object is not subscriptable error (https://github.com/ansible-collections/amazon.aws/pull/691). + - ec2_vpc_igw - use paginator for describe internet gateways and add retry to + fix NoneType object is not subscriptable error (https://github.com/ansible-collections/amazon.aws/pull/695). + - elb_classic_lb - handle security_group_ids when providing security_group_names + and fix broken tasks in integration test (https://github.com/ansible-collections/amazon.aws/pull/592). + minor_changes: + - ec2_instance - add count parameter support (https://github.com/ansible-collections/amazon.aws/pull/539). + fragments: + - 539-ec2_instance_add_count_param.yml + - 592-elb_classic_lb-handle-sg-ids-fix-tests.yml + - 606-ec2_vol-set-iops-even-if-unchanged-for-boto-req.yml + - 619-inventory-aws_ec2-assume-arn-role-when-listing-regions.yml + - 626-ec2_vol-iops-when-source-does-not-have-iops.yml + - 691-ec2_vpc_igw-fix-null-igw-error.yml + - 695-ec2_vpc_igw-fix-nonetype-with-paginator.yml + release_date: '2022-03-22' + 3.0.0: + changes: + breaking_changes: + - aws_caller_facts - Remove deprecated ``aws_caller_facts`` alias. Please use + ``aws_caller_info`` instead. + - cloudformation_facts - Remove deprecated ``cloudformation_facts`` alias. Please + use ``cloudformation_info`` instead. + - ec2_ami_facts - Remove deprecated ``ec2_ami_facts`` alias. Please use ``ec2_ami_info`` + instead. + - ec2_eni_facts - Remove deprecated ``ec2_eni_facts`` alias. Please use ``ec2_eni_info`` + instead. + - ec2_group_facts - Remove deprecated ``ec2_group_facts`` alias. Please use + ``ec2_group_info`` instead. + - ec2_instance_facts - Remove deprecated ``ec2_instance_facts`` alias. Please + use ``ec2_instance_info`` instead. + - ec2_snapshot_facts - Remove deprecated ``ec2_snapshot_facts`` alias. Please + use ``ec2_snapshot_info`` instead. + - ec2_vol_facts - Remove deprecated ``ec2_vol_facts`` alias. Please use ``ec2_vol_info`` + instead. + - ec2_vpc_dhcp_option_facts - Remove deprecated ``ec2_vpc_dhcp_option_facts`` + alias. Please use ``ec2_vpc_dhcp_option_info`` instead. + - ec2_vpc_endpoint_facts - Remove deprecated ``ec2_vpc_endpoint_facts`` alias. Please + use ``ec2_vpc_endpoint_info`` instead. + - ec2_vpc_igw_facts - Remove deprecated ``ec2_vpc_igw_facts`` alias. Please + use ``ec2_vpc_igw_info`` instead. + - ec2_vpc_nat_gateway_facts - Remove deprecated ``ec2_vpc_nat_gateway_facts`` + alias. Please use ``ec2_vpc_nat_gateway_info`` instead. + - ec2_vpc_net_facts - Remove deprecated ``ec2_vpc_net_facts`` alias. Please + use ``ec2_vpc_net_info`` instead. + - ec2_vpc_route_table_facts - Remove deprecated ``ec2_vpc_route_table_facts`` + alias. Please use ``ec2_vpc_route_table_info`` instead. + - ec2_vpc_subnet_facts - Remove deprecated ``ec2_vpc_subnet_facts`` alias. Please + use ``ec2_vpc_subnet_info`` instead. + deprecated_features: + - module_utils - support for the original AWS SDK ``boto`` has been deprecated + in favour of the ``boto3``/``botocore`` SDK. All ``boto`` based modules have + either been deprecated or migrated to ``botocore``, and the remaining support + code in module_utils will be removed in release 4.0.0 of the amazon.aws collection. + Any modules outside of the amazon.aws and community.aws collections based + on the ``boto`` library will need to be migrated to the ``boto3``/``botocore`` + libraries (https://github.com/ansible-collections/amazon.aws/pull/575). + major_changes: + - amazon.aws collection - The amazon.aws collection has dropped support for + ``botocore<1.19.0`` and ``boto3<1.16.0``. Most modules will continue to work + with older versions of the AWS SDK, however compatibility with older versions + of the SDK is not guaranteed and will not be tested. When using older versions + of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/574). + minor_changes: + - ec2_instance - add count parameter support (https://github.com/ansible-collections/amazon.aws/pull/539). + fragments: + - 539-ec2_instance_add_count_param.yml + - 574-botocore-1-19-0.yml + - 575-deprecate-boto.yml + - remove_deprecated_facts.yml + release_date: '2021-12-06' + 3.1.0: + changes: + bugfixes: + - Various modules and plugins - use vendored version of ``distutils.version`` + instead of the deprecated Python standard library ``distutils`` (https://github.com/ansible-collections/amazon.aws/pull/599). + - aws_acm - No longer raising ResourceNotFound exception while retrieving ACM + certificates. + - aws_s3 - fix exception raised when using module to copy from source to destination + and key is missing from source (https://github.com/ansible-collections/amazon.aws/issues/602). + - ec2_instance - Add a condition to handle default ```instance_type``` value + for fix breaking on instance creation with launch template (https://github.com/ansible-collections/amazon.aws/pull/587). + - ec2_key - add support for ED25519 key type (https://github.com/ansible-collections/amazon.aws/issues/572). + - ec2_vol - Sets the Iops value in req_obj even if the iops value has not changed, + to allow modifying volume types that require passing an iops value to boto. + (https://github.com/ansible-collections/amazon.aws/pull/606) + - elb_classic_lb - handle security_group_ids when providing security_group_names + and fix broken tasks in integration test (https://github.com/ansible-collections/amazon.aws/pull/592). + - s3_bucket - Enable the management of bucket-level ACLs (https://github.com/ansible-collections/amazon.aws/issues/573). + deprecated_features: + - ec2_instance - The default value for ```instance_type``` has been deprecated, + in the future release you must set an instance_type or a launch_template (https://github.com/ansible-collections/amazon.aws/pull/587). + minor_changes: + - add new parameters hostvars_prefix and hostvars_suffix for inventory plugins + aws_ec2 and aws_rds (https://github.com/ansible-collections/amazon.aws/issues/535). + - aws_s3 - Add ``validate_bucket_name`` option, to control bucket name validation + (https://github.com/ansible-collections/amazon.aws/pull/615). + - aws_s3 - add latest choice on ``overwrite`` parameter to get latest object + on S3 (https://github.com/ansible-collections/amazon.aws/pull/595). + - ec2_vol - add support for OutpostArn param (https://github.com/ansible-collections/amazon.aws/pull/597). + - ec2_vol - tag volume on creation (https://github.com/ansible-collections/amazon.aws/pull/603). + - ec2_vpc_route_table - add support for IPv6 in creating route tables (https://github.com/ansible-collections/amazon.aws/pull/601). + - s3_bucket - Add ``validate_bucket_name`` option, to control bucket name validation + (https://github.com/ansible-collections/amazon.aws/pull/615). + fragments: + - 587-ec2_instance-default-instance-type-launch-template.yml + - 592-elb_classic_lb-handle-sg-ids-fix-tests.yml + - 593-aws_s3-fix-copy-when-missing-key.yml + - 595-aws_s3-add-latest-choice-on-overwrite-parameter.yml + - 597-ec2_vol-add-outpostarn-support.yml + - 601-ec2_vpc_route_table-ipv6-support.yml + - 603-ec2_vol-add-tags-on-creation.yml + - 606-ec2_vol-set-iops-even-if-unchanged-for-boto-req.yml + - 611-s3_bucket-add-support-for-acl.yml + - 614-ec2_key-add-support-for-ed25519-key-type.yml + - 615-s3-validate_bucket_name.yml + - 619-aws_ec2-aws_rds-add-support-for-hostvars_prefix-and-hostvars_suffix.yml + - 646-acm-resource-not-found.yml + - disutils.version.yml + release_date: '2022-02-10' + 3.1.1: + changes: + minor_changes: + - bump the release version of the amazon.aws collection from 3.1.0 to 3.1.1 + because of a bug that occurred while uploading to Galaxy. + fragments: + - bump_release_version.yml + release_date: '2022-02-15' + 3.2.0: + changes: + bugfixes: + - aws_ec2 inventory - use the iam_role_arn configuration parameter to assume + the role before trying to call DescribeRegions if the regions configuration + is not set and AWS credentials provided without enough privilege to perform + the DescribeRegions action. (https://github.com/ansible-collections/amazon.aws/issues/566). + - ec2_vol - changing a volume from a type that does not support IOPS (like ``standard``) + to a type that does (like ``gp3``) fails (https://github.com/ansible-collections/amazon.aws/issues/626). + - ec2_vpc_igw - fix 'NoneType' object is not subscriptable error (https://github.com/ansible-collections/amazon.aws/pull/691). + - ec2_vpc_igw - use paginator for describe internet gateways and add retry to + fix NoneType object is not subscriptable error (https://github.com/ansible-collections/amazon.aws/pull/695). + - ec2_vpc_net - In check mode, ensure the module does not change the configuration. + Handle case when Amazon-provided ipv6 block is enabled, then disabled, then + enabled again. Do not disable IPv6 CIDR association (using Amazon pool) if + ipv6_cidr property is not present in the task. If the VPC already exists and + ipv6_cidr property, retain the current config (https://github.com/ansible-collections/amazon.aws/pull/631). + minor_changes: + - aws_secret - add pagination for ``bypath`` functionality (https://github.com/ansible-collections/amazon.aws/pull/591). + - ec2_instance - Fix scope of deprecation warning to not show warning when ``state`` + in ``absent`` (https://github.com/ansible-collections/amazon.aws/pull/719). + - ec2_vpc_route_table - support associating internet gateways (https://github.com/ansible-collections/amazon.aws/pull/690). + - module_utils.elbv2 - Add support for alb specific attributes and compare_elb_attributes + method to support check_mode in module_utils.elbv2 (https://github.com/ansible-collections/amazon.aws/pull/696). + - s3_bucket - Add support for enforced bucket owner object ownership (https://github.com/ansible-collections/amazon.aws/pull/694). + fragments: + - 591-aws_secrets-handle-pagination.yml + - 619-inventory-aws_ec2-assume-arn-role-when-listing-regions.yml + - 626-ec2_vol-iops-when-source-does-not-have-iops.yml + - 631-ec2_vpc_net-check_mode.yml + - 690-ec2_vpc_route_table-associate-igw.yml + - 691-ec2_vpc_igw-fix-null-igw-error.yml + - 694-s3_bucket-owner_enforcement.yml + - 695-ec2_vpc_igw-fix-nonetype-with-paginator.yml + - 696-elbv2-support-alb-attributes.yml + - 719-ec2_instance-fix-deprecation-warning-scope.yml + release_date: '2022-04-05' + 3.3.0: + changes: + bugfixes: + - Include ``PSF-license.txt`` file for ``plugins/module_utils/_version.py``. + - aws_account_attribute lookup plugin - fix linting errors in documentation + data (https://github.com/ansible-collections/amazon.aws/pull/701). + - aws_ec2 inventory plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). + - aws_rds inventory plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). + - aws_resource_actions callback plugin - fix linting errors in documentation + data (https://github.com/ansible-collections/amazon.aws/pull/701). + - aws_secret lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). + - aws_service_ip_ranges lookup plugin - fix linting errors in documentation + data (https://github.com/ansible-collections/amazon.aws/pull/701). + - aws_ssm - Fix environment variables for client configuration (e.g., AWS_PROFILE, + AWS_ACCESS_KEY_ID) (https://github.com/ansible-collections/amazon.aws/pull/837). + - aws_ssm lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). + - ec2_instance - ec2_instance module broken in Python 3.8 - dict keys modified + during iteration (https://github.com/ansible-collections/amazon.aws/issues/709). + - module.utils.rds - Add waiter for promoting read replica to fix idempotency + issue (https://github.com/ansible-collections/amazon.aws/pull/714). + - module.utils.rds - Catch InvalidDBSecurityGroupStateFault when modifying a + db instance (https://github.com/ansible-collections/amazon.aws/pull/776). + - module.utils.s3 - Update validate_bucket_name minimum length to 3 (https://github.com/ansible-collections/amazon.aws/pull/802). + minor_changes: + - aws_ec2 inventory - Allow for literal strings in hostname that don't match + filter parameters in ec2 describe-instances (https://github.com/ansible-collections/amazon.aws/pull/826). + - aws_ssm - Add support for ``endpoint`` parameter (https://github.com/ansible-collections/amazon.aws/pull/837). + - module.utils.rds - add retry_codes to get_rds_method_attribute return data + to use in call_method and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/776). + - module.utils.rds - refactor to utilize get_rds_method_attribute return data + (https://github.com/ansible-collections/amazon.aws/pull/776). + - module_utils - add new aliases ``aws_session_token`` and ``session_token`` + to the ``security_token`` parameter to be more in-line with the boto SDK (https://github.com/ansible-collections/amazon.aws/pull/631). + - module_utils.rds - Add support and unit tests for addition/removal of IAM + roles to/from a db instance in module_utils.rds with waiters (https://github.com/ansible-collections/amazon.aws/pull/714). + fragments: + - 655-aws_ec2-aws_rds-add-support-for-hostvars_prefix-and-hostvars_suffix.yml + - 709-ec_2_instance-python-3-8-compatibility.yml + - 714-module_util_rds-support-iam-roles-add-waiters.yml + - 776-module_util_rds-add-extra-retry-codes-refactor.yml + - 802-update-s3-module_util-validate_bucket_name-to-accept-3-character-bucket-name.yml + - 826-inventory-aws_ec2-allow-literal-string-in-hostname.yml + - 837-aws_ssm-envars.yml + - psf-license.yml + - validate-plugins.yml + release_date: '2022-05-26' + 3.3.1: + release_date: '2022-06-22' + 3.4.0: + changes: + bugfixes: + - elb_application_lb - fix ``KeyError`` when balancing across two Target Groups + (https://github.com/ansible-collections/community.aws/issues/1089). + - elb_classic_lb - fix ``'NoneType' object has no attribute`` bug when creating + a new ELB in check mode with a health check (https://github.com/ansible-collections/amazon.aws/pull/915). + - elb_classic_lb - fix ``'NoneType' object has no attribute`` bug when creating + a new ELB using security group names (https://github.com/ansible-collections/amazon.aws/issues/914). + minor_changes: + - ec2_instance - expanded the use of the automatic retries on temporary failures + (https://github.com/ansible-collections/amazon.aws/issues/927). + fragments: + - 1089-elb_application_lb-ForwardConfig-KeyError.yml + - 914-elb_classic_lb-security_group_names.yml + - 927-ec2_instance-retries.yml + release_date: '2022-08-02' + 3.5.0: + changes: + bugfixes: + - ec2_metadata_facts - fix ``'NoneType' object is not callable`` exception when + using Ansible 2.13+ (https://github.com/ansible-collections/amazon.aws/issues/942). + minor_changes: + - ec2_security_group - set type as ``list`` for rules->group_name as it can + accept both ``str`` and ``list`` (https://github.com/ansible-collections/amazon.aws/pull/971). + release_summary: 'Following the release of amazon.aws 5.0.0, 3.5.0 is a bugfix + release and the final planned release for the 3.x series. + + ' + fragments: + - 638-ec2_security_group_group_name_datatype.yml + - 943-ec2_metadata_facts-fix-NoneType-callable.yml + - RELEASE-3.5.0.yml + - unit-tests_test_rds_py37_only.yaml + release_date: '2022-10-06' + 4.0.0: + changes: + breaking_changes: + - Tags beginning with ``aws:`` will not be removed when purging tags, these + tags are reserved by Amazon and may not be updated or deleted (https://github.com/ansible-collections/amazon.aws/issues/817). + - amazon.aws collection - the ``profile`` parameter is now mutually exclusive + with the ``aws_access_key``, ``aws_secret_key`` and ``security_token`` parameters + (https://github.com/ansible-collections/amazon.aws/pull/834). + - aws_az_info - the module alias ``aws_az_facts`` was deprecated in Ansible + 2.9 and has now been removed (https://github.com/ansible-collections/amazon.aws/pull/832). + - aws_s3 - the default value for ``ensure overwrite`` has been changed to ``different`` + instead of ``always`` so that the module is idempotent by default (https://github.com/ansible-collections/amazon.aws/issues/811). + - aws_ssm - on_denied and on_missing now both default to error, for consistency + with both aws_secret and the base Lookup class (https://github.com/ansible-collections/amazon.aws/issues/617). + - ec2 - The ``ec2`` module has been removed in release 4.0.0 and replaced by + the ``ec2_instance`` module (https://github.com/ansible-collections/amazon.aws/pull/630). + - ec2_vpc_igw_info - The default value for ``convert_tags`` has been changed + to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/835). + - elb_classic_lb - the ``ec2_elb`` fact has been removed (https://github.com/ansible-collections/amazon.aws/pull/827). + - module_utils - Support for the original AWS SDK aka ``boto`` has been removed, + including all relevant helper functions. All modules should now use the ``boto3``/``botocore`` + AWS SDK (https://github.com/ansible-collections/amazon.aws/pull/630) + bugfixes: + - ec2_group - fix uncaught exception when running with ``--diff`` and ``--check`` + to create a new security group (https://github.com/ansible-collections/amazon.aws/issues/440). + - ec2_instance - Add a condition to handle default ```instance_type``` value + for fix breaking on instance creation with launch template (https://github.com/ansible-collections/amazon.aws/pull/587). + - ec2_instance - raise an error when missing permission to stop instance when + ``state`` is set to ``rebooted``` (https://github.com/ansible-collections/amazon.aws/pull/671). + - ec2_vpc_igw - use gateway_id rather than filters to paginate if possible to + fix 'NoneType' object is not subscriptable error (https://github.com/ansible-collections/amazon.aws/pull/766). + - ec2_vpc_net - fix a bug where CIDR configuration would be updated in check + mode (https://github.com/ansible/ansible/issues/62678). + - ec2_vpc_net - fix a bug where the module would get stuck if DNS options were + updated in check mode (https://github.com/ansible/ansible/issues/62677). + - elb_classic_lb - modify the return value of _format_listeners method to resolve + a failure creating https listeners (https://github.com/ansible-collections/amazon.aws/pull/860). + deprecated_features: + - aws_s3 - The ``S3_URL`` alias for the s3_url option has been deprecated and + will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). + - ec2_ami - The ``DeviceName`` alias for the device_name option has been deprecated + and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). + - ec2_ami - The ``NoDevice`` alias for the no_device option has been deprecated + and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). + - ec2_ami - The ``VirtualName`` alias for the virtual_name option has been deprecated + and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). + - ec2_ami - the current default value of ``False`` for ``purge_tags`` has been + deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/846). + - ec2_instance - The default value for ```instance_type``` has been deprecated, + in the future release you must set an instance_type or a launch_template (https://github.com/ansible-collections/amazon.aws/pull/587). + - ec2_instance - the current default value of ``False`` for ``purge_tags`` has + been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/849). + - ec2_key - the current default value of ``False`` for ``purge_tags`` has been + deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/846). + - ec2_vol - the current default value of ``False`` for ``purge_tags`` has been + deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/846). + - ec2_vpc_dhcp_option_info - The ``DhcpOptionIds`` alias for the dhcp_option_ids + option has been deprecated and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). + - ec2_vpc_dhcp_option_info - The ``DryRun`` alias for the dry_run option has + been deprecated and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). + - ec2_vpc_endpoint - the current default value of ``False`` for ``purge_tags`` + has been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/846). + - ec2_vpc_net - the current default value of ``False`` for ``purge_tags`` has + been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/848). + - ec2_vpc_route_table - the current default value of ``False`` for ``purge_tags`` + has been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/846). + - s3_bucket - The ``S3_URL`` alias for the s3_url option has been deprecated + and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). + - s3_object - Support for creation and deletion of S3 buckets has been deprecated. Please + use the ``amazon.aws.s3_bucket`` module to create and delete buckets (https://github.com/ansible-collections/amazon.aws/pull/869). + major_changes: + - amazon.aws collection - The amazon.aws collection has dropped support for + ``botocore<1.20.0`` and ``boto3<1.17.0``. Most modules will continue to work + with older versions of the AWS SDK, however compatibility with older versions + of the SDK is not guaranteed and will not be tested. When using older versions + of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/574). + minor_changes: + - aws_s3 - Add ``validate_bucket_name`` option, to control bucket name validation + (https://github.com/ansible-collections/amazon.aws/pull/615). + - aws_s3 - The ``aws_s3`` module has been renamed to ``s3_object`` (https://github.com/ansible-collections/amazon.aws/pull/869). + - aws_s3 - ``resource_tags`` has been added as an alias for the ``tags`` parameter + (https://github.com/ansible-collections/amazon.aws/pull/845). + - ec2_eni - Change parameter ``device_index`` data type to string when passing + to ``describe_network_inter`` api call (https://github.com/ansible-collections/amazon.aws/pull/877). + - ec2_eni - ``resource_tags`` has been added as an alias for the ``tags`` parameter + (https://github.com/ansible-collections/amazon.aws/pull/845). + - ec2_group - add ``egress_rules`` as an alias for ``rules_egress`` (https://github.com/ansible-collections/amazon.aws/pull/878). + - ec2_group - add ``purge_egress_rules`` as an alias for ``purge_rules_egress`` + (https://github.com/ansible-collections/amazon.aws/pull/878). + - ec2_instance - Add missing ``metadata_options`` parameters (https://github.com/ansible-collections/amazon.aws/pull/715). + - ec2_key - ``resource_tags`` has been added as an alias for the ``tags`` parameter + (https://github.com/ansible-collections/amazon.aws/pull/845). + - ec2_vpc_net - add support for managing VPCs by ID (https://github.com/ansible-collections/amazon.aws/pull/848). + - ec2_vpc_subnet - add support for OutpostArn param (https://github.com/ansible-collections/amazon.aws/pull/598). + - elb_classic_lb - ``resource_tags`` has been added as an alias for the ``tags`` + parameter (https://github.com/ansible-collections/amazon.aws/pull/845). + - s3_bucket - Add ``validate_bucket_name`` option, to control bucket name validation + (https://github.com/ansible-collections/amazon.aws/pull/615). + - s3_bucket - ``resource_tags`` has been added as an alias for the ``tags`` + parameter (https://github.com/ansible-collections/amazon.aws/pull/845). + removed_features: + - cloudformation - the ``template_format`` option has been removed. It has been + ignored by the module since Ansible 2.3 (https://github.com/ansible-collections/amazon.aws/pull/833). + - ec2_key - the ``wait_timeout`` option had no effect, was deprecated in release + 1.0.0, and has now been removed (https://github.com/ansible-collections/amazon.aws/pull/830). + - ec2_key - the ``wait`` option had no effect, was deprecated in release 1.0.0, + and has now been removed (https://github.com/ansible-collections/amazon.aws/pull/830). + - ec2_tag - the previously deprecated state ``list`` has been removed. To list + tags on an EC2 resource the ``ec2_tag_info`` module can be used (https://github.com/ansible-collections/amazon.aws/pull/829). + - ec2_vol - the previously deprecated state ``list`` has been removed. To list + volumes the ``ec2_vol_info`` module can be used (https://github.com/ansible-collections/amazon.aws/pull/828). + - module_utils.batch - the class ``ansible_collections.amazon.aws.plugins.module_utils.batch.AWSConnection`` + has been removed. Please use ``AnsibleAWSModule.client()`` instead (https://github.com/ansible-collections/amazon.aws/pull/831). + fragments: + - 108-ec2_vol-state-list-remove.yml + - 151-profile-mutually-exclusive.yml + - 318-ec2_vpc_igw_info-convert-tags.yml + - 552-elb_classic_lb-fact-remove.yml + - 57613-facts.yml + - 587-ec2_instance-default-instance-type-launch-template.yml + - 598-ec2_vpc_subnet-add-outpostarn-support.yml + - 615-s3-validate_bucket_name.yml + - 617-aws_ssm-on_missing-and-on-denied-now-default-to-error.yml + - 630-remove-boto.yml + - 64230-ec2_key-remove-unused.yml + - 64368-cloudformation.yml + - 66840-ec2_tag-remove-list.yml + - 67191-remove-AWSConnection.yml + - 715-ec2-instance-metadata-options.yml + - 756-ec2_instance-raise-an-error-when-missing-permission.yml + - 766-ec2_vpc_igw-use-InternetGatewayIds-not-filters.yml + - 795-deprecate-aliases.yml + - 811-aws_s3-ovewrite-default.yml + - 817-skip_purge_aws.yaml + - 845-tagging.yml + - 846-tagging-deprecate.yml + - 848-ec2_vpc_net-tagging-and-id.yml + - 849-ec2_instance-tagging-deprecate.yml + - 860-elb_classic_lb-create-https-listeners.yml + - 869-s3_object.yml + - 877-ec2_eni-device_index-data-change-type-to-str.yml + - 878-ec2_group.yml + - release-4--botocore.yml + release_date: '2022-06-22' + 4.1.0: + changes: + bugfixes: + - aws_ec2 - ensure the correct number of hosts are returned when tags as hostnames + are used (https://github.com/ansible-collections/amazon.aws/pull/862). + - elb_application_lb - fix ``KeyError`` when balancing across two Target Groups + (https://github.com/ansible-collections/community.aws/issues/1089). + - elb_classic_lb - fix ``'NoneType' object has no attribute`` bug when creating + a new ELB in check mode with a health check (https://github.com/ansible-collections/amazon.aws/pull/915). + - elb_classic_lb - fix ``'NoneType' object has no attribute`` bug when creating + a new ELB using security group names (https://github.com/ansible-collections/amazon.aws/issues/914). + deprecated_features: + - amazon.aws collection - due to the AWS SDKs announcing the end of support + for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) + support for Python less than 3.7 by this collection has been deprecated and + will be removed in a release after 2023-05-31 (https://github.com/ansible-collections/amazon.aws/pull/935). + minor_changes: + - ec2_instance - expanded the use of the automatic retries on temporary failures + (https://github.com/ansible-collections/amazon.aws/issues/927). + - s3_bucket - updated module to enable support for setting S3 Bucket Keys for + SSE-KMS (https://github.com/ansible-collections/amazon.aws/pull/882). + fragments: + - 1089-elb_application_lb-ForwardConfig-KeyError.yml + - 862-aws_ec2-hostnames.yml + - 882-s3_bucket-bucket-keys.yml + - 914-elb_classic_lb-security_group_names.yml + - 927-ec2_instance-retries.yml + - python.yml + release_date: '2022-08-02' + 4.2.0: + changes: + deprecated_features: + - module_utils.cloud - removal of the ``CloudRetry.backoff`` has been delayed + until release 6.0.0. It is recommended to update custom modules to use ``jittered_backoff`` + or ``exponential_backoff`` instead (https://github.com/ansible-collections/amazon.aws/pull/951). + minor_changes: + - ec2_security_group - set type as ``list`` for rules->group_name as it can + accept both ``str`` and ``list`` (https://github.com/ansible-collections/amazon.aws/pull/971). + - various modules - linting fixups (https://github.com/ansible-collections/amazon.aws/pull/953). + fragments: + - 638-ec2_security_group_group_name_datatype.yml + - 951-cloudretry.yml + - 965-linting.yml + release_date: '2022-09-14' + 4.3.0: + changes: + bugfixes: + - ec2_metadata_facts - fix ``'NoneType' object is not callable`` exception when + using Ansible 2.13+ (https://github.com/ansible-collections/amazon.aws/issues/942). + - 'module_utils/cloud - Fix ``ValueError: ansible_collections.amazon.aws.plugins.module_utils.core.__spec__ + is None`` error on Ansible 2.9 (https://github.com/ansible-collections/amazon.aws/issues/1083).' + minor_changes: + - ec2_instance - expanded the use of the automatic retries to ``InsuffienctInstanceCapacity`` + (https://github.com/ansible-collections/amazon.aws/issues/1038). + release_summary: 'The amazon.aws 4.3.0 release includes a number of minor bug + fixes and improvements. + + Following the release of amazon.aws 5.0.0, backports to the 4.x series will + be limited to + + security issues and bugfixes. + + ' + fragments: + - 1038-ec2-insufficient-capacity.yml + - 1083-__spec__-is-None.yml + - 943-ec2_metadata_facts-fix-NoneType-callable.yml + - RELEASE-4.3.0.yml + - tests-cloud.yml + - unit-tests_test_rds_py37_only.yaml + release_date: '2022-10-06' + 5.0.0: + changes: + breaking_changes: + - amazon.aws collection - Support for ansible-core < 2.11 has been dropped (https://github.com/ansible-collections/amazon.aws/pull/1087). + - amazon.aws collection - The amazon.aws collection has dropped support for + ``botocore<1.21.0`` and ``boto3<1.18.0``. Most modules will continue to work + with older versions of the AWS SDK, however compatibility with older versions + of the SDK is not guaranteed and will not be tested. When using older versions + of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/934). + - doc_fragments - remove minimum collection requirements from doc_fragments/aws.py + and allow pulling those from doc_fragments/aws_boto3.py instead (https://github.com/ansible-collections/amazon.aws/pull/985). + - ec2_ami - the default value for ``purge_tags`` has been changed from ``False`` + to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). + - ec2_ami - the parameter aliases ``DeviceName``, ``VirtualName`` and ``NoDevice`` + were previously deprecated and have been removed, please use ``device_name``, + ``virtual_name`` and ``no_device`` instead (https://github.com/ansible-collections/amazon.aws/pull/913). + - ec2_eni_info - the mutual exclusivity of the ``eni_id`` and ``filters`` parameters + is now enforced, previously ``filters`` would be ignored if ``eni_id`` was + set (https://github.com/ansible-collections/amazon.aws/pull/954). + - ec2_instance - the default value for ``purge_tags`` has been changed from + ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). + - ec2_key - the default value for ``purge_tags`` has been changed from ``False`` + to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). + - ec2_vol - the default value for ``purge_tags`` has been changed from ``False`` + to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). + - ec2_vpc_dhcp_option_info - the parameter aliases ``DhcpOptionIds`` and ``DryRun`` + were previously deprecated and have been removed, please use ``dhcp_options_ids`` + and ``no_device`` instead (https://github.com/ansible-collections/amazon.aws/pull/913). + - ec2_vpc_endpoint - the default value for ``purge_tags`` has been changed from + ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). + - ec2_vpc_net - the default value for ``purge_tags`` has been changed from ``False`` + to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). + - ec2_vpc_route_table - the default value for ``purge_tags`` has been changed + from ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). + - s3_bucket - the previously deprecated alias ``S3_URL`` for the ``s3_url`` + parameter has been removed. Playbooks shuold be updated to use ``s3_url`` + (https://github.com/ansible-collections/amazon.aws/pull/908). + - s3_object - the previously deprecated alias ``S3_URL`` for the ``s3_url`` + parameter has been removed. Playbooks should be updated to use ``s3_url`` + (https://github.com/ansible-collections/amazon.aws/pull/908). + bugfixes: + - aws_ec2 - address a regression introduced in 4.1.0 (https://github.com/ansible-collections/amazon.aws/pull/862) + that cause the presnse of duplicated hosts in the inventory. + - cloudtrail - Fix key error TagList to TagsList (https://github.com/ansible-collections/amazon.aws/issues/1088). + - ec2_instance - Only show the deprecation warning for the default value of + ``instance_type`` when ``count`` or ``exact_count`` are specified (https://github.com//issues/980). + - ec2_metadata_facts - fix ``'NoneType' object is not callable`` exception when + using Ansible 2.13+ (https://github.com/ansible-collections/amazon.aws/issues/942). + - module_utils/botocore - fix ``object has no attribute 'fail'`` error in error + handling (https://github.com/ansible-collections/amazon.aws/pull/1045). + - module_utils/elbv2 - fixes ``KeyError`` when using ``UseExistingClientSecret`` + rather than ``ClientSecret`` (https://github.com/ansible-collections/amazon.aws/pull/940). + - module_utils/elbv2 - improvements to idempotency when comparing listeners + (https://github.com/ansible-collections/community.aws/issues/604). + - s3_object - also use ``ignore_nonexistent_bucket`` when listing a bucket (https://github.com/ansible-collections/amazon.aws/issues/966). + deprecated_features: + - amazon.aws collection - due to the AWS SDKs announcing the end of support + for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) + support for Python less than 3.7 by this collection has been deprecated and + will be removed in a release after 2023-05-31 (https://github.com/ansible-collections/amazon.aws/pull/935). + - inventory/aws_ec2 - the ``include_extra_api_calls`` is now deprecated, its + value is silently ignored (https://github.com/ansible-collections/amazon.aws/pull/1097). + major_changes: + - autoscaling_group - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.autoscaling_group``. + - autoscaling_group_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.autoscaling_group_info``. + - cloudtrail - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.cloudtrail``. + - cloudwatch_metric_alarm - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.cloudwatch_metric_alarm``. + - cloudwatchevent_rule - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.cloudwatchevent_rule``. + - cloudwatchlogs_log_group - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.cloudwatchlogs_log_group``. + - cloudwatchlogs_log_group_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.cloudwatchlogs_log_group_info``. + - cloudwatchlogs_log_group_metric_filter - The module has been migrated from + the ``community.aws`` collection. Playbooks using the Fully Qualified Collection + Name for this module should be updated to use ``amazon.aws.cloudwatchlogs_log_group_metric_filter``. + - ec2_eip - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.ec2_eip``. + - ec2_eip_info - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.ec2_eip_info``. + - elb_application_lb - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.elb_application_lb``. + - elb_application_lb_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.elb_application_lb_info``. + - execute_lambda - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.execute_lambda``. + - iam_policy - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.iam_policy``. + - iam_policy_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.iam_policy_info``. + - iam_user - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.iam_user``. + - iam_user_info - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.iam_user_info``. + - kms_key - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.kms_key``. + - kms_key_info - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.kms_key_info``. + - lambda - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.lambda``. + - lambda_alias - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.lambda_alias``. + - lambda_event - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.lambda_event``. + - lambda_execute - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.lambda_execute``. + - lambda_info - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.lambda_info``. + - lambda_policy - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.lambda_policy``. + - rds_cluster - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.rds_cluster``. + - rds_cluster_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.rds_cluster_info``. + - rds_cluster_snapshot - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.rds_cluster_snapshot``. + - rds_instance - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.rds_instance``. + - rds_instance_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.rds_instance_info``. + - rds_instance_snapshot - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.rds_instance_snapshot``. + - rds_option_group - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.rds_option_group``. + - rds_option_group_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.rds_option_group_info``. + - rds_param_group - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.rds_param_group``. + - rds_snapshot_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.rds_snapshot_info``. + - rds_subnet_group - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.rds_subnet_group``. + - route53 - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.route53``. + - route53_health_check - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.route53_health_check``. + - route53_info - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.route53_info``. + - route53_zone - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.route53_zone``. + minor_changes: + - Ability to record and replay the API interaction of a module for testing purpose. + Show case the feature with an example (https://github.com/ansible-collections/amazon.aws/pull/998). + - Remove the empty __init__.py file from the distribution, they were not required + anymore (https://github.com/ansible-collections/amazon.aws/pull/1018). + - amazon.aws modules - the ``ec2_url`` parameter has been renamed to ``endpoint_url`` + for consistency, ``ec2_url`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/992). + - aws_caller_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/968). + - aws_ec2 - introduce the ``allow_duplicated_hosts`` configuration key (https://github.com/ansible-collections/amazon.aws/pull/1026). + - cloudformation - avoid catching ``Exception``, catch more specific errors + instead (https://github.com/ansible-collections/amazon.aws/pull/968). + - cloudwatch_metric_alarm_info - Added a new module that describes the cloudwatch + metric alarms (https://github.com/ansible-collections/amazon.aws/pull/988). + - ec2_group - The ``ec2_group`` module has been renamed to ``ec2_security_group``, + ``ec2_group`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/897). + - ec2_group_info - The ``ec2_group_info`` module has been renamed to ``ec2_security_group_info``, + ``ec2_group_info`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/897). + - ec2_instance - Add hibernation_options and volumes->ebs->encrypted keys to + support stop-hibernate instance (https://github.com/ansible-collections/amazon.aws/pull/972). + - ec2_instance - expanded the use of the automatic retries to ``InsuffienctInstanceCapacity`` + (https://github.com/ansible-collections/amazon.aws/issues/1038). + - ec2_metadata_facts - avoid catching ``Exception``, catch more specific errors + instead (https://github.com/ansible-collections/amazon.aws/pull/968). + - ec2_security_group - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/968). + - ec2_vpc_endpoint - avoid catching ``Exception``, catch more specific errors + instead (https://github.com/ansible-collections/amazon.aws/pull/968). + - ec2_vpc_nat_gateway - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/968). + - ec2_vpc_net_info - handle classic link check for shared VPCs by throwing a + warning instead of an error (https://github.com/ansible-collections/amazon.aws/pull/984). + - module_utils/acm - Move to jittered backoff (https://github.com/ansible-collections/amazon.aws/pull/946). + - module_utils/elbv2 - ensures that ``ip_address_type`` is set on creation rather + than re-setting it after creation (https://github.com/ansible-collections/amazon.aws/pull/940). + - module_utils/elbv2 - uses new waiters with retries for temporary failures + (https://github.com/ansible-collections/amazon.aws/pull/940). + - module_utils/waf - Move to jittered backoff (https://github.com/ansible-collections/amazon.aws/pull/946). + - module_utils/waiters - Add waiters to manage eks_nodegroup module (https://github.com/ansible-collections/community.aws/pull/1415). + - s3_bucket - ``rgw`` was added as an alias for the ``ceph`` parameter for consistency + with the ``s3_object`` module (https://github.com/ansible-collections/amazon.aws/pull/994). + - s3_bucket - the ``s3_url`` parameter was merged into the ``endpoint_url`` + parameter, ``s3_url`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/994). + - s3_object - added the ``sig_v4`` paramater, enbling the user to opt in to + signature version 4 for download/get operations. (https://github.com/ansible-collections/amazon.aws/pull/1014) + - s3_object - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/968). + - s3_object - the ``rgw`` parameter was renamed to ``ceph`` for consistency + with the ``s3_bucket`` module, ``rgw`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/994). + - s3_object - the ``s3_url`` parameter was merged into the ``endpoint_url`` + parameter, ``s3_url`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/994). + - s3_object - updated module to add support for handling file upload to a bucket + with ACL disabled (https://github.com/ansible-collections/amazon.aws/pull/921). + - s3_object_info - Added a new module that describes S3 Objects (https://github.com/ansible-collections/amazon.aws/pull/977). + release_summary: In this release we promoted many community modules to Red Hat + supported status. Those modules have been moved from the commuity.aws to amazon.aws + collection. This release also brings some new features, bugfixes, breaking + changes and deprecated features. The amazon.aws collection has dropped support + for ``botocore<1.21.0`` and ``boto3<1.18.0``. Support for ``ansible-core<2.11`` + has also been dropped. + fragments: + - 1014-add-support-for-signature-version-4-to-the-s3_object-geturl-mode.yml + - 1038-ec2-insufficient-capacity.yml + - 1045-botocore_fail.yml + - 1047-docsite-changelog.yml + - 1055-bools.yml + - 1061-legacy_aliases.yml + - 1061-sanity.yml + - 1070-gitleaks-1.yml + - 1073-gitleaks-2.yml + - 1087-old-ansible.yml + - 1088-fix_key_error_TagsList_cloudtrail.yml + - 1094-revert.yml + - 604-elb_network_lb.yml + - 897-ec2_security_group.yml + - 908-s3_url.yml + - 913-deprecations.yml + - 916-purge_tags.yml + - 921-s3_object-handle-file-upload-to-acl-disabled-bucket.yml + - 943-ec2_metadata_facts-fix-NoneType-callable.yml + - 946-retries.yml + - 954-linting.yml + - 966-ignore_nonexistent_bucket_list.yml + - 968-sanity.yml + - 972-ec2_instance-stop-hibernate.yml + - 975-waiters-eks_nodegroup.yml + - 977-s3_object_info-add-new-module.yml + - 978-bools.yml + - 979-ec2_instance_tests_split.yaml + - 980-instance-type-deprecation-warning.yml + - 984-ec2_vpc_net_info_shared_vpc.yml + - 985-cleanup_doc_fragments.yml + - 988-cloudwatch_metric_alarm_info-add-new-module.yml + - 992-ec2_url.yml + - 994-s3_url.yml + - botocore.yml + - ec2_instance-test-duration.yaml + - inventory-aws_ec2-avoid-py38-syntax.yaml + - inventory-aws_ec2-include_extra_api_calls-is-deprecated.yaml + - inventory-aws_ec2_unit-tests.yaml + - inventory-multi-hosts.yaml + - migrate_autoscaling_group.yml + - migrate_cloudtrail.yml + - migrate_cloudtrail_tests.yml + - migrate_cloudwatch_metric_alarm.yml + - migrate_cloudwatchevent.yml + - migrate_cloudwatchlogs.yml + - migrate_ec2_eip.yml + - migrate_elb_application_lb.yml + - migrate_iam_policy.yml + - migrate_iam_user.yml + - migrate_kms_key.yml + - migrate_lambda.yml + - migrate_rds_cluster.yml + - migrate_rds_instance.yml + - migrate_rds_option_group.yml + - migrate_rds_param_group.yml + - migrate_rds_snapshot.yml + - migrate_rds_subnet_group.yml + - migrate_route53.yml + - migrate_route53_module_utils.yml + - placebo_record.yaml + - python.yml + - release_summary.yml + - remove__init__.py_file.yaml + - test-python3-unicode-string.yaml + - tests-cloud.yml + - unit-tests_test_rds_py37_only.yaml + modules: + - description: Gather information about trails in AWS Cloud Trail. + name: cloudtrail_info + namespace: '' + - description: Gather information about the alarms for the specified metric + name: cloudwatch_metric_alarm_info + namespace: '' + - description: Gather information about objects in S3 + name: s3_object_info + namespace: '' + release_date: '2022-10-04' + 5.0.1: + changes: + bugfixes: + - ec2_vpc_net_info - fix KeyError (https://github.com/ansible-collections/amazon.aws/pull/1109). + - ec2_vpc_net_info - remove hardcoded ``ClassicLinkEnabled`` parameter when + request for ``ClassicLinkDnsSupported`` failed (https://github.com/ansible-collections/amazon.aws/pull/1109). + - s3_object - be more defensive when checking the results of ``s3.get_bucket_ownership_controls`` + (https://github.com/ansible-collections/amazon.aws/issues/1115). + fragments: + - 1109-ec2_vpc_net_info_keyerror.yml + - 1115-s3_object-scaleway.yml + release_date: '2022-10-06' + 5.0.2: + changes: + bugfixes: + - ec2_metadata_facts - fixed ``AttributeError`` (https://github.com/ansible-collections/amazon.aws/issues/1134). + fragments: + - 1134-ec2_metadata_facts-AttributeError.yml + release_date: '2022-10-10' + 5.1.0: + changes: + bugfixes: + - ec2_instance - fixes ``Invalid type for parameter TagSpecifications, value + None`` error when tags aren't specified (https://github.com/ansible-collections/amazon.aws/issues/1148). + - module_utils.transformations - ensure that ``map_complex_type`` still returns + transformed items if items exists that are not in the type_map (https://github.com/ansible-collections/amazon.aws/pull/1163). + deprecated_features: + - amazon.aws collection - Support for the ``EC2_ACCESS_KEY`` environment variable + has been deprecated and will be removed in a release after 2024-12-01. Please + use the ``access_key`` parameter or ``AWS_ACCESS_KEY_ID`` environment variable + instead (https://github.com/ansible-collections/amazon.aws/pull/1172). + - amazon.aws collection - Support for the ``EC2_REGION`` environment variable + has been deprecated and will be removed in a release after 2024-12-01. Please + use the ``region`` parameter or ``AWS_REGION`` environment variable instead + (https://github.com/ansible-collections/amazon.aws/pull/1172). + - amazon.aws collection - Support for the ``EC2_SECRET_KEY`` environment variable + has been deprecated and will be removed in a release after 2024-12-01. Please + use the ``secret_key`` parameter or ``AWS_SECRET_ACCESS_KEY`` environment + variable instead (https://github.com/ansible-collections/amazon.aws/pull/1172). + - amazon.aws collection - Support for the ``EC2_SECURITY_TOKEN`` environment + variable has been deprecated and will be removed in a release after 2024-12-01. Please + use the ``session_token`` parameter or ``AWS_SESSION_TOKEN`` environment variable + instead (https://github.com/ansible-collections/amazon.aws/pull/1172). + - amazon.aws collection - Support for the ``EC2_URL`` and ``S3_URL`` environment + variables has been deprecated and will be removed in a release after 2024-12-01. Please + use the ``endpoint_url`` parameter or ``AWS_ENDPOINT_URL`` environment variable + instead (https://github.com/ansible-collections/amazon.aws/pull/1172). + - amazon.aws collection - The ``access_token`` alias for the ``session_token`` + parameter has been deprecated and will be removed in a release after 2024-12-01. Please + use the ``session_token`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). + - amazon.aws collection - The ``access_token`` alias for the ``session_token`` + parameter has been deprecated and will be removed in a release after 2024-12-01. Please + use the ``session_token`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). + - amazon.aws collection - The ``aws_security_token`` alias for the ``session_token`` + parameter has been deprecated and will be removed in a release after 2024-12-01. Please + use the ``session_token`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). + - amazon.aws collection - The ``ec2_access_key`` alias for the ``access_key`` + parameter has been deprecated and will be removed in a release after 2024-12-01. Please + use the ``access_key`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). + - amazon.aws collection - The ``ec2_region`` alias for the ``region`` parameter + has been deprecated and will be removed in a release after 2024-12-01. Please + use the ``region`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). + - amazon.aws collection - The ``ec2_secret_key`` alias for the ``secret_key`` + parameter has been deprecated and will be removed in a release after 2024-12-01. Please + use the ``secret_key`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). + - amazon.aws collection - The ``security_token`` alias for the ``session_token`` + parameter has been deprecated and will be removed in a release after 2024-12-01. Please + use the ``session_token`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). + - 'ec2_security_group - support for passing nested lists to ``cidr_ip`` and + ``cidr_ipv6`` has been deprecated. Nested lists can be passed through the + ``flatten`` filter instead ``cidr_ip: ''{{ my_cidrs | flatten }}''`` (https://github.com/ansible-collections/amazon.aws/pull/1213).' + - module_utils.url - ``ansible_collections.amazon.aws.module_utils.urls`` is + believed to be unused and has been deprecated and will be removed in release + 7.0.0. + minor_changes: + - amazon.aws collection - The ``aws_access_key`` parameter has been renamed + to ``access_key``, ``access_key`` was previously an alias for this parameter + and ``aws_access_key`` remains as an alias. This change should have no observable + effect for users outside the module/plugin documentation. (https://github.com/ansible-collections/amazon.aws/pull/1172). + - amazon.aws collection - The ``aws_secret_key`` parameter has been renamed + to ``secret_key``, ``secret_key`` was previously an alias for this parameter + and ``aws_secret_key`` remains as an alias. This change should have no observable + effect for users outside the module/plugin documentation. (https://github.com/ansible-collections/amazon.aws/pull/1172). + - amazon.aws collection - The ``security_token`` parameter has been renamed + to ``session_token``, ``security_token`` was previously an alias for this + parameter and ``security_token`` remains as an alias. This change should + have no observable effect for users outside the module/plugin documentation. + (https://github.com/ansible-collections/amazon.aws/pull/1172). + - aws_account_attribute lookup plugin - use ``missing_required_lib`` for more + consistent error message when boto3/botocore is not available (https://github.com/ansible-collections/amazon.aws/pull/1152). + - aws_ec2 inventory - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - aws_ec2 inventory plugin - use ``missing_required_lib`` for more consistent + error message when boto3/botocore is not available (https://github.com/ansible-collections/amazon.aws/pull/1152). + - aws_rds inventory plugin - use ``missing_required_lib`` for more consistent + error message when boto3/botocore is not available (https://github.com/ansible-collections/amazon.aws/pull/1152). + - aws_secret lookup plugin - use ``missing_required_lib`` for more consistent + error message when boto3/botocore is not available (https://github.com/ansible-collections/amazon.aws/pull/1152). + - aws_ssm lookup plugin - use ``missing_required_lib`` for more consistent error + message when boto3/botocore is not available (https://github.com/ansible-collections/amazon.aws/pull/1152). + - ec2_instance - minor fix for launching an instance in specified AZ when ``vpc_subnet_id`` + is not provided (https://github.com/ansible-collections/amazon.aws/pull/1150). + - ec2_instance - refacter ``tower_callback`` code to handle parameter validation + as part of the argument specification (https://github.com/ansible-collections/amazon.aws/pull/1199). + - ec2_instance - the ``instance_role`` parameter has been renamed to ``iam_instance_profile`` + to better reflect what it is, ``instance_role`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/1151). + - ec2_instance - the ``tower_callback`` parameter has been renamed to ``aap_callback``, + ``tower_callback`` remains as an alias. This change should have no observable + effect for users outside the module documentation (https://github.com/ansible-collections/amazon.aws/pull/1199). + - s3_object_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + release_summary: This release brings some minor changes, bugfixes, security + fixes and deprecated features. + security_fixes: + - ec2_instance - fixes leak of password into logs when using ``tower_callback.windows=True`` + and ``tower_callback.set_password`` (https://github.com/ansible-collections/amazon.aws/pull/1199). + fragments: + - 1148-build_run_instance_spec.yml + - 1150-ec2_instance-fix-instance-creation-in-az.yml + - 1152-missing-botocore.yml + - 1163-map_complex_type.yml + - 1172-credentials_parameters.yml + - 1181-stable-5-linting.yml + - 20221010-test-times.yml + - 20221021-ec2_instance-tower_callback.yml + - 20221026-pytest-forked.yml + - 20221027-ec2_security_group-nested.yml + - DEPRECATE-module_utils-urls.yml + - summary.yml + - version_added-5.yml + - workflows-add-intersphinx.yml + release_date: '2022-10-28' + 5.2.0: + changes: + bugfixes: + - aws_rds - fixes bug in RDS inventory plugin where config file was ignored + (https://github.com/ansible-collections/amazon.aws/issues/1304). + - lambda - fix flaky integration test which assumes there are no other lambdas + in the account (https://github.com/ansible-collections/amazon.aws/issues/1277) + minor_changes: + - amazon.aws collection - refacterization of code to use argument specification + ``fallback`` when falling back to environment variables for security credentials + and AWS connection details (https://github.com/ansible-collections/amazon.aws/pull/1174). + - rds_instance - Split up the integration test-suite in a series of smaller + tests (https://github.com/ansible-collections/amazon.aws/pull/1185). + - rds_instance - add support for gp3 storage type (https://github.com/ansible-collections/amazon.aws/pull/1266). + release_summary: 'A minor release containing bugfixes for the ``ec2_eni_info`` + module and the ``aws_rds`` inventory plugin, as well as improvements to the + ``rds_instance`` module. + + ' + fragments: + - 1174-module_params.yml + - 1266-rds_instance_gp3_support.yaml + - 1277-lambda-int-test-other-lambdas.yml + - 1283-aws_ec2_inventory_compose_doc_examples.yml + - 1304-aws_rds-config.yml + - 20230103-sanity-ec2_eni_info.yml + - 20230106-ec2_vol.yml + - integration_tests_max_duration_increase.yaml + - rds_instance_split_the_integration_test-suite.yaml + - rds_mariadb_version-10.6.10.yml + - release.yml + release_date: '2023-01-24' + 5.3.0: + changes: + bugfixes: + - cloudtrail - support to disabling encryption using ``kms_key_id`` (https://github.com/ansible-collections/amazon.aws/pull/1384). + - ec2_key - fix issue when trying to update existing key pair with the same + key material (https://github.com/ansible-collections/amazon.aws/pull/1383). + - module_utils/elbv2 - fix change detection by adding default values for ``Scope`` + and ``SessionTimeout`` parameters in ``authenticate-oidc`` rules (https://github.com/ansible-collections/amazon.aws/pull/1270). + - module_utils/elbv2 - respect ``UseExistingClientSecret`` parameter in ``authenticate-oidc`` + rules (https://github.com/ansible-collections/amazon.aws/pull/1270). + - revert breaking change introduced in 5.2.0 when passing credentials through + a mix of environment variables and parameters (https://github.com/ansible-collections/amazon.aws/issues/1353). + - s3_bucket - empty bucket policy was throwing a JSONDecodeError - deal with + it gracefully instead (https://github.com/ansible-collections/amazon.aws/pull/1368) + deprecated_features: + - support for passing both profile and security tokens through a mix of environment + variables and parameters has been deprecated and support will be removed in + release 6.0.0. After release 6.0.0 it will only be possible to pass either + a profile or security tokens, regardless of mechanism used to pass them. To + explicitly block a parameter coming from an environment variable pass an empty + string as the parameter value. Support for passing profile and security tokens + together was originally deprecated in release 1.2.0, however only partially + implemented in release 5.0.0 (https://github.com/ansible-collections/amazon.aws/pull/1355). + minor_changes: + - ec2_instance - more consistently return ``instances`` information (https://github.com/ansible-collections/amazon.aws/pull/964). + - ec2_instance - remove unused import (https://github.com/ansible-collections/amazon.aws/pull/1350). + - ec2_key - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1288). + - ec2_vpc_nat_gateway - ensure allocation_id is defined before potential access + (https://github.com/ansible-collections/amazon.aws/pull/1350). + - route53_zone - added support for associating multiple VPCs to route53 hosted + zones (https://github.com/ansible-collections/amazon.aws/pull/1300). + - s3_bucket - add option to support creation of buckets with object lock enabled + (https://github.com/ansible-collections/amazon.aws/pull/1372). + release_summary: This release brings some minor changes, bugfixes, and deprecated + features. + fragments: + - 1226-defaults.yml + - 1270-elbv2-fixes.yml + - 1299-route53_zone-multi-vpc.yml + - 1347-s3-object-lock-enabled.yml + - 1353-revert-env-fallback.yml + - 1357-subnet-example.yml + - 1368-empty_bucket_policy.yml + - 1383-ec2_key-fix-idempotency-issue-when-creating-existing-key-with-same-key-material.yaml + - 1384-cloudtrail-disable_encryption.yml + - 1395-s3-encryption.yml + - 1400-typo.yml + - 20230204-sanity.yml + - 20230220-inventory_docs_ec2_aws.yml + - 389-ssm_parameter-versioned_test.yml + - 964-ec2_instance-return-instances.yml + - release_summary.yml + - unit-tests_test_ec2_key_only.yaml + release_date: '2023-03-05' + 5.4.0: + changes: + bugfixes: + - ec2_metadata_facts - fix ``AttributeError`` when running the ec2_metadata_facts + module on Python 2 managed nodes (https://github.com/ansible-collections/amazon.aws/issues/1358). + - ec2_vol - handle ec2_vol.tags when the associated instance already exists + (https://github.com/ansible-collections/amazon.aws/pull/1071). + - rds_instance - Fixed ``TypeError`` when tagging RDS DB with storage type ``gp3`` + (https://github.com/ansible-collections/amazon.aws/pull/1437). + - route53_info - Add new return key ``health_check_observations`` for health + check operations (https://github.com/ansible-collections/amazon.aws/pull/1419). + - route53_info - Fixed ``Key Error`` when getting status or failure_reason of + a health check (https://github.com/ansible-collections/amazon.aws/pull/1419). + minor_changes: + - ec2_spot_instance - add parameter ``terminate_instances`` to support terminate + instances associated with spot requests. (https://github.com/ansible-collections/amazon.aws/pull/1402). + - route53_health_check - added support for enabling Latency graphs (MeasureLatency) + during creation of a Route53 Health Check. (https://github.com/ansible-collections/amazon.aws/pull/1201). + release_summary: This minor release brings bugfixes and minor new features. + fragments: + - 1071-ec2_vol_tags_idempotent.yaml + - 1201-route53_health_check-add-support-for-latency-graphs.yml + - 1358-ec2_metadata_facts.yml + - 1402-ec2_spot_instance-ability-to-terminate-instances.yml + - 1419-route53_info-fix-keyerror-for-healthcheck-operations.yml + - 1437-rds_instance-gp3-tagging-bugfix.yml + - 5.4.0-release.yml + - 5.4.0-route53_health_check.yml + release_date: '2023-03-27' + 5.5.0: + changes: + bugfixes: + - cloudwatch_metric_alarm - Don't consider ``StateTransitionedTimestamp`` in + change detection. (https://github.com/ansible-collections/amazon.aws/pull/1440). + - ec2_instance - Pick up ``app_callback -> set_password`` rather than ``app_callback + -> set_passwd`` (https://github.com/ansible-collections/amazon.aws/issues/1449). + - lambda_info - Do not convert environment variables to snake_case when querying + lambda config. (https://github.com/ansible-collections/amazon.aws/pull/1457). + - rds_instance - fix type of ``promotion_tier`` as passed to the APIs (https://github.com/ansible-collections/amazon.aws/pull/1475). + minor_changes: + - Add connectivity_type to ec2_vpc_nat_gateway module (https://github.com/ansible-collections/amazon.aws/pull/1267). + - cloudwatch - Add metrics and extended_statistic keys to cloudwatch module + (https://github.com/ansible-collections/amazon.aws/pull/1133). + - ec2_ami - add support for BootMode, TpmSupport, UefiData params (https://github.com/ansible-collections/amazon.aws/pull/1037). + - ec2_metadata_facts - added support to query instance tags in metadata (https://github.com/ansible-collections/amazon.aws/pull/1186). + - kms_key - Add multi_region option to create_key (https://github.com/ansible-collections/amazon.aws/pull/1290). + - lambda - add support for function layers when creating or updating lambda + function (https://github.com/ansible-collections/amazon.aws/pull/1118). + - lambda_event - Added support to set FunctionResponseTypes when creating lambda + event source mappings (https://github.com/ansible-collections/amazon.aws/pull/1209). + - module_utils/elbv2 - removed compatibility code for ``botocore < 1.10.30`` + (https://github.com/ansible-collections/amazon.aws/pull/1477). + - rds_cluster - New ``engine_mode`` parameter (https://github.com/ansible-collections/amazon.aws/pull/941). + - rds_cluster - add new options (e.g., ``db_cluster_instance_class``, ``allocated_storage``, + ``storage_type``, ``iops``) (https://github.com/ansible-collections/amazon.aws/pull/1191). + - rds_cluster - update list of supported engines with ``mysql`` and ``postgres`` + (https://github.com/ansible-collections/amazon.aws/pull/1191). + - s3_bucket - ensure ``public_access`` is configured before updating policies + (https://github.com/ansible-collections/amazon.aws/pull/1511). + release_summary: 'This release contains a number of bugfixes, new features and + new modules. This is the last planned minor release prior to the release + of version 6.0.0. + + ' + fragments: + - 1037-ec2_ami-add-support-for-boot_mode-tpm_support-uefi_data.yml + - 1133-add_metrics_cloudwatch.yml + - 1186-ec2_metadata_facts-query-instance-metadata-tags.yml + - 1191-rds_cluster-new_options.yml + - 1209-lambda_event-add-support-for-function_response_types.yml + - 1258-ec2_instance.yml + - 1267-ec2_vpc_nat_gateway_connectivitytype.yml + - 1290-create_multi_region_key.yml + - 1440-cloudwatch_metric_alarm-fix-change-detection.yml + - 1457-lambda_info-fix-env-var-in-output.yml + - 1474-ec2_vol.yml + - 1475-rds_instance-promotion-tier.yml + - 1477-elbv2-botocore.yml + - 1505-ec2_instance_test_fixes.yml + - 1511-s3_bucket-public_access.yml + - 20230424-ec2_instance-app_callback.yml + - 20230502-rds_cluster-engine.yml + - 20230503-rds_cluster-engine-rds_cluster_snapshot.yml + - fix_version_added.yml + - lambda-add-support-for-layers.yml + - rds_cluster_engine_mode.yaml + - release-summary.yml + modules: + - description: Creates an AWS Lambda layer or deletes an AWS Lambda layer version + name: lambda_layer + namespace: '' + - description: List lambda layer or lambda layer versions + name: lambda_layer_info + namespace: '' + release_date: '2023-05-04' + 5.5.1: + changes: + bugfixes: + - autoscaling_group - fix ValidationError when describing an autoscaling group + that has more than 20 target groups attached to it by breaking the request + into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). + - autoscaling_group_info - fix ValidationError when describing an autoscaling + group that has more than 20 target groups attached to it by breaking the request + into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). + - aws_account_attribute - raise correct ``AnsibleLookupError`` rather than ``AnsibleError`` + (https://github.com/ansible-collections/amazon.aws/issues/1528). + - aws_secret - raise correct ``AnsibleLookupError`` rather than ``AnsibleError`` + (https://github.com/ansible-collections/amazon.aws/issues/1528). + - aws_service_ip_ranges raise correct ``AnsibleLookupError`` rather than ``AnsibleError`` + (https://github.com/ansible-collections/amazon.aws/issues/1528). + - aws_ssm - raise correct ``AnsibleLookupError`` rather than ``AnsibleError`` + (https://github.com/ansible-collections/amazon.aws/issues/1528). + - ec2_instance - fix check_mode issue when adding network interfaces (https://github.com/ansible-collections/amazon.aws/issues/1403). + - elb_application_lb - fix missing attributes on creation of ALB. The ``create_or_update_alb()`` + was including ALB-specific attributes when updating an existing ALB but not + when creating a new ALB (https://github.com/ansible-collections/amazon.aws/issues/1510). + release_summary: 'This release brings few bugfixes. + + ' + fragments: + - 1510-elb_application_lb-fix-alb-specific-attributes-not-added-on-create.yml + - 1528-lookup-error.yml + - 1578-rds-instance-docs.yml + - 1593-autoscaling_group_info-20-target-groups-per-call.yml + - ec2_instance-eni-attach-idempotency.yml + - endpoint.yml + - release-summary.yml + - test-reqs.yml + release_date: '2023-06-07' diff --git a/ansible_collections/amazon/aws/changelogs/config.yaml b/ansible_collections/amazon/aws/changelogs/config.yaml new file mode 100644 index 000000000..026952159 --- /dev/null +++ b/ansible_collections/amazon/aws/changelogs/config.yaml @@ -0,0 +1,29 @@ +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +keep_fragments: false +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: amazon.aws +trivial_section_name: trivial diff --git a/ansible_collections/amazon/aws/changelogs/fragments/.keep b/ansible_collections/amazon/aws/changelogs/fragments/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/docs/docsite/extra-docs.yml b/ansible_collections/amazon/aws/docs/docsite/extra-docs.yml new file mode 100644 index 000000000..aeb152847 --- /dev/null +++ b/ansible_collections/amazon/aws/docs/docsite/extra-docs.yml @@ -0,0 +1,14 @@ +--- +sections: + - title: Changelog + toctree: + - CHANGELOG + - title: Scenario Guide + toctree: + - guide_aws + - title: Module Development Guidelines + toctree: + - dev_guidelines + - title: Dynamic Inventory Plugin Guide + toctree: + - aws_ec2_guide diff --git a/ansible_collections/amazon/aws/docs/docsite/links.yml b/ansible_collections/amazon/aws/docs/docsite/links.yml new file mode 100644 index 000000000..ce667b367 --- /dev/null +++ b/ansible_collections/amazon/aws/docs/docsite/links.yml @@ -0,0 +1,41 @@ +--- +# based on https://github.com/ansible-collections/collection_template/blob/main/docs/docsite/links.yml +# +# This will make sure that plugin and module documentation gets Edit on GitHub links +# that allow users to directly create a PR for this plugin or module in GitHub's UI. +# Remove this section if the collection repository is not on GitHub, or if you do not want this +# functionality for your collection. +edit_on_github: + repository: ansible-collections/amazon.aws + branch: main + # If your collection root (the directory containing galaxy.yml) does not coincide with your + # repository's root, you have to specify the path to the collection root here. For example, + # if the collection root is in a subdirectory ansible_collections/community/REPO_NAME + # in your repository, you have to set path_prefix to 'ansible_collections/community/REPO_NAME'. + path_prefix: '' + +# Here you can add arbitrary extra links. Please keep the number of links down to a +# minimum! Also please keep the description short, since this will be the text put on +# a button. +# +# Also note that some links are automatically added from information in galaxy.yml. +# The following are automatically added: +# 1. A link to the issue tracker (if `issues` is specified); +# 2. A link to the homepage (if `homepage` is specified and does not equal the +# `documentation` or `repository` link); +# 3. A link to the collection's repository (if `repository` is specified). + +# extra_links: +# - description: +# url: + +# Specify communication channels for your collection. We suggest to not specify more +# than one place for communication per communication tool to avoid confusion. +communication: + matrix_rooms: + - topic: General usage and support questions + room: '#aws:ansible.im' + irc_channels: + - topic: General usage and support questions + network: Libera + channel: '#ansible-aws' diff --git a/ansible_collections/amazon/aws/docs/docsite/rst/CHANGELOG.rst b/ansible_collections/amazon/aws/docs/docsite/rst/CHANGELOG.rst new file mode 100644 index 000000000..6e07527c1 --- /dev/null +++ b/ansible_collections/amazon/aws/docs/docsite/rst/CHANGELOG.rst @@ -0,0 +1,1013 @@ +======================== +amazon.aws Release Notes +======================== + +.. contents:: Topics + + +v5.5.1 +====== + +Release Summary +--------------- + +This release brings few bugfixes. + + +Bugfixes +-------- + +- autoscaling_group - fix ValidationError when describing an autoscaling group that has more than 20 target groups attached to it by breaking the request into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). +- autoscaling_group_info - fix ValidationError when describing an autoscaling group that has more than 20 target groups attached to it by breaking the request into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). +- aws_account_attribute - raise correct ``AnsibleLookupError`` rather than ``AnsibleError`` (https://github.com/ansible-collections/amazon.aws/issues/1528). +- aws_secret - raise correct ``AnsibleLookupError`` rather than ``AnsibleError`` (https://github.com/ansible-collections/amazon.aws/issues/1528). +- aws_service_ip_ranges raise correct ``AnsibleLookupError`` rather than ``AnsibleError`` (https://github.com/ansible-collections/amazon.aws/issues/1528). +- aws_ssm - raise correct ``AnsibleLookupError`` rather than ``AnsibleError`` (https://github.com/ansible-collections/amazon.aws/issues/1528). +- ec2_instance - fix check_mode issue when adding network interfaces (https://github.com/ansible-collections/amazon.aws/issues/1403). +- elb_application_lb - fix missing attributes on creation of ALB. The ``create_or_update_alb()`` was including ALB-specific attributes when updating an existing ALB but not when creating a new ALB (https://github.com/ansible-collections/amazon.aws/issues/1510). + +v5.5.0 +====== + +Release Summary +--------------- + +This release contains a number of bugfixes, new features and new modules. This is the last planned minor release prior to the release of version 6.0.0. + + +Minor Changes +------------- + +- Add connectivity_type to ec2_vpc_nat_gateway module (https://github.com/ansible-collections/amazon.aws/pull/1267). +- cloudwatch - Add metrics and extended_statistic keys to cloudwatch module (https://github.com/ansible-collections/amazon.aws/pull/1133). +- ec2_ami - add support for BootMode, TpmSupport, UefiData params (https://github.com/ansible-collections/amazon.aws/pull/1037). +- ec2_metadata_facts - added support to query instance tags in metadata (https://github.com/ansible-collections/amazon.aws/pull/1186). +- kms_key - Add multi_region option to create_key (https://github.com/ansible-collections/amazon.aws/pull/1290). +- lambda - add support for function layers when creating or updating lambda function (https://github.com/ansible-collections/amazon.aws/pull/1118). +- lambda_event - Added support to set FunctionResponseTypes when creating lambda event source mappings (https://github.com/ansible-collections/amazon.aws/pull/1209). +- module_utils/elbv2 - removed compatibility code for ``botocore < 1.10.30`` (https://github.com/ansible-collections/amazon.aws/pull/1477). +- rds_cluster - New ``engine_mode`` parameter (https://github.com/ansible-collections/amazon.aws/pull/941). +- rds_cluster - add new options (e.g., ``db_cluster_instance_class``, ``allocated_storage``, ``storage_type``, ``iops``) (https://github.com/ansible-collections/amazon.aws/pull/1191). +- rds_cluster - update list of supported engines with ``mysql`` and ``postgres`` (https://github.com/ansible-collections/amazon.aws/pull/1191). +- s3_bucket - ensure ``public_access`` is configured before updating policies (https://github.com/ansible-collections/amazon.aws/pull/1511). + +Bugfixes +-------- + +- cloudwatch_metric_alarm - Don't consider ``StateTransitionedTimestamp`` in change detection. (https://github.com/ansible-collections/amazon.aws/pull/1440). +- ec2_instance - Pick up ``app_callback -> set_password`` rather than ``app_callback -> set_passwd`` (https://github.com/ansible-collections/amazon.aws/issues/1449). +- lambda_info - Do not convert environment variables to snake_case when querying lambda config. (https://github.com/ansible-collections/amazon.aws/pull/1457). +- rds_instance - fix type of ``promotion_tier`` as passed to the APIs (https://github.com/ansible-collections/amazon.aws/pull/1475). + +New Modules +----------- + +- lambda_layer - Creates an AWS Lambda layer or deletes an AWS Lambda layer version +- lambda_layer_info - List lambda layer or lambda layer versions + +v5.4.0 +====== + +Release Summary +--------------- + +This minor release brings bugfixes and minor new features. + +Minor Changes +------------- + +- ec2_spot_instance - add parameter ``terminate_instances`` to support terminate instances associated with spot requests. (https://github.com/ansible-collections/amazon.aws/pull/1402). +- route53_health_check - added support for enabling Latency graphs (MeasureLatency) during creation of a Route53 Health Check. (https://github.com/ansible-collections/amazon.aws/pull/1201). + +Bugfixes +-------- + +- ec2_metadata_facts - fix ``AttributeError`` when running the ec2_metadata_facts module on Python 2 managed nodes (https://github.com/ansible-collections/amazon.aws/issues/1358). +- ec2_vol - handle ec2_vol.tags when the associated instance already exists (https://github.com/ansible-collections/amazon.aws/pull/1071). +- rds_instance - Fixed ``TypeError`` when tagging RDS DB with storage type ``gp3`` (https://github.com/ansible-collections/amazon.aws/pull/1437). +- route53_info - Add new return key ``health_check_observations`` for health check operations (https://github.com/ansible-collections/amazon.aws/pull/1419). +- route53_info - Fixed ``Key Error`` when getting status or failure_reason of a health check (https://github.com/ansible-collections/amazon.aws/pull/1419). + +v5.3.0 +====== + +Release Summary +--------------- + +This release brings some minor changes, bugfixes, and deprecated features. + +Minor Changes +------------- + +- ec2_instance - more consistently return ``instances`` information (https://github.com/ansible-collections/amazon.aws/pull/964). +- ec2_instance - remove unused import (https://github.com/ansible-collections/amazon.aws/pull/1350). +- ec2_key - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1288). +- ec2_vpc_nat_gateway - ensure allocation_id is defined before potential access (https://github.com/ansible-collections/amazon.aws/pull/1350). +- route53_zone - added support for associating multiple VPCs to route53 hosted zones (https://github.com/ansible-collections/amazon.aws/pull/1300). +- s3_bucket - add option to support creation of buckets with object lock enabled (https://github.com/ansible-collections/amazon.aws/pull/1372). + +Deprecated Features +------------------- + +- support for passing both profile and security tokens through a mix of environment variables and parameters has been deprecated and support will be removed in release 6.0.0. After release 6.0.0 it will only be possible to pass either a profile or security tokens, regardless of mechanism used to pass them. To explicitly block a parameter coming from an environment variable pass an empty string as the parameter value. Support for passing profile and security tokens together was originally deprecated in release 1.2.0, however only partially implemented in release 5.0.0 (https://github.com/ansible-collections/amazon.aws/pull/1355). + +Bugfixes +-------- + +- cloudtrail - support to disabling encryption using ``kms_key_id`` (https://github.com/ansible-collections/amazon.aws/pull/1384). +- ec2_key - fix issue when trying to update existing key pair with the same key material (https://github.com/ansible-collections/amazon.aws/pull/1383). +- module_utils/elbv2 - fix change detection by adding default values for ``Scope`` and ``SessionTimeout`` parameters in ``authenticate-oidc`` rules (https://github.com/ansible-collections/amazon.aws/pull/1270). +- module_utils/elbv2 - respect ``UseExistingClientSecret`` parameter in ``authenticate-oidc`` rules (https://github.com/ansible-collections/amazon.aws/pull/1270). +- revert breaking change introduced in 5.2.0 when passing credentials through a mix of environment variables and parameters (https://github.com/ansible-collections/amazon.aws/issues/1353). +- s3_bucket - empty bucket policy was throwing a JSONDecodeError - deal with it gracefully instead (https://github.com/ansible-collections/amazon.aws/pull/1368) + +v5.2.0 +====== + +Release Summary +--------------- + +A minor release containing bugfixes for the ``ec2_eni_info`` module and the ``aws_rds`` inventory plugin, as well as improvements to the ``rds_instance`` module. + + +Minor Changes +------------- + +- amazon.aws collection - refacterization of code to use argument specification ``fallback`` when falling back to environment variables for security credentials and AWS connection details (https://github.com/ansible-collections/amazon.aws/pull/1174). +- rds_instance - Split up the integration test-suite in a series of smaller tests (https://github.com/ansible-collections/amazon.aws/pull/1185). +- rds_instance - add support for gp3 storage type (https://github.com/ansible-collections/amazon.aws/pull/1266). + +Bugfixes +-------- + +- aws_rds - fixes bug in RDS inventory plugin where config file was ignored (https://github.com/ansible-collections/amazon.aws/issues/1304). +- lambda - fix flaky integration test which assumes there are no other lambdas in the account (https://github.com/ansible-collections/amazon.aws/issues/1277) + +v5.1.0 +====== + +Release Summary +--------------- + +This release brings some minor changes, bugfixes, security fixes and deprecated features. + +Minor Changes +------------- + +- amazon.aws collection - The ``aws_access_key`` parameter has been renamed to ``access_key``, ``access_key`` was previously an alias for this parameter and ``aws_access_key`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation. (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``aws_secret_key`` parameter has been renamed to ``secret_key``, ``secret_key`` was previously an alias for this parameter and ``aws_secret_key`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation. (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``security_token`` parameter has been renamed to ``session_token``, ``security_token`` was previously an alias for this parameter and ``security_token`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation. (https://github.com/ansible-collections/amazon.aws/pull/1172). +- aws_account_attribute lookup plugin - use ``missing_required_lib`` for more consistent error message when boto3/botocore is not available (https://github.com/ansible-collections/amazon.aws/pull/1152). +- aws_ec2 inventory - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- aws_ec2 inventory plugin - use ``missing_required_lib`` for more consistent error message when boto3/botocore is not available (https://github.com/ansible-collections/amazon.aws/pull/1152). +- aws_rds inventory plugin - use ``missing_required_lib`` for more consistent error message when boto3/botocore is not available (https://github.com/ansible-collections/amazon.aws/pull/1152). +- aws_secret lookup plugin - use ``missing_required_lib`` for more consistent error message when boto3/botocore is not available (https://github.com/ansible-collections/amazon.aws/pull/1152). +- aws_ssm lookup plugin - use ``missing_required_lib`` for more consistent error message when boto3/botocore is not available (https://github.com/ansible-collections/amazon.aws/pull/1152). +- ec2_instance - minor fix for launching an instance in specified AZ when ``vpc_subnet_id`` is not provided (https://github.com/ansible-collections/amazon.aws/pull/1150). +- ec2_instance - refacter ``tower_callback`` code to handle parameter validation as part of the argument specification (https://github.com/ansible-collections/amazon.aws/pull/1199). +- ec2_instance - the ``instance_role`` parameter has been renamed to ``iam_instance_profile`` to better reflect what it is, ``instance_role`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/1151). +- ec2_instance - the ``tower_callback`` parameter has been renamed to ``aap_callback``, ``tower_callback`` remains as an alias. This change should have no observable effect for users outside the module documentation (https://github.com/ansible-collections/amazon.aws/pull/1199). +- s3_object_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + +Deprecated Features +------------------- + +- amazon.aws collection - Support for the ``EC2_ACCESS_KEY`` environment variable has been deprecated and will be removed in a release after 2024-12-01. Please use the ``access_key`` parameter or ``AWS_ACCESS_KEY_ID`` environment variable instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - Support for the ``EC2_REGION`` environment variable has been deprecated and will be removed in a release after 2024-12-01. Please use the ``region`` parameter or ``AWS_REGION`` environment variable instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - Support for the ``EC2_SECRET_KEY`` environment variable has been deprecated and will be removed in a release after 2024-12-01. Please use the ``secret_key`` parameter or ``AWS_SECRET_ACCESS_KEY`` environment variable instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - Support for the ``EC2_SECURITY_TOKEN`` environment variable has been deprecated and will be removed in a release after 2024-12-01. Please use the ``session_token`` parameter or ``AWS_SESSION_TOKEN`` environment variable instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - Support for the ``EC2_URL`` and ``S3_URL`` environment variables has been deprecated and will be removed in a release after 2024-12-01. Please use the ``endpoint_url`` parameter or ``AWS_ENDPOINT_URL`` environment variable instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``access_token`` alias for the ``session_token`` parameter has been deprecated and will be removed in a release after 2024-12-01. Please use the ``session_token`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``access_token`` alias for the ``session_token`` parameter has been deprecated and will be removed in a release after 2024-12-01. Please use the ``session_token`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``aws_security_token`` alias for the ``session_token`` parameter has been deprecated and will be removed in a release after 2024-12-01. Please use the ``session_token`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``ec2_access_key`` alias for the ``access_key`` parameter has been deprecated and will be removed in a release after 2024-12-01. Please use the ``access_key`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``ec2_region`` alias for the ``region`` parameter has been deprecated and will be removed in a release after 2024-12-01. Please use the ``region`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``ec2_secret_key`` alias for the ``secret_key`` parameter has been deprecated and will be removed in a release after 2024-12-01. Please use the ``secret_key`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- amazon.aws collection - The ``security_token`` alias for the ``session_token`` parameter has been deprecated and will be removed in a release after 2024-12-01. Please use the ``session_token`` name instead (https://github.com/ansible-collections/amazon.aws/pull/1172). +- ec2_security_group - support for passing nested lists to ``cidr_ip`` and ``cidr_ipv6`` has been deprecated. Nested lists can be passed through the ``flatten`` filter instead ``cidr_ip: '{{ my_cidrs | flatten }}'`` (https://github.com/ansible-collections/amazon.aws/pull/1213). +- module_utils.url - ``ansible_collections.amazon.aws.module_utils.urls`` is believed to be unused and has been deprecated and will be removed in release 7.0.0. + +Security Fixes +-------------- + +- ec2_instance - fixes leak of password into logs when using ``tower_callback.windows=True`` and ``tower_callback.set_password`` (https://github.com/ansible-collections/amazon.aws/pull/1199). + +Bugfixes +-------- + +- ec2_instance - fixes ``Invalid type for parameter TagSpecifications, value None`` error when tags aren't specified (https://github.com/ansible-collections/amazon.aws/issues/1148). +- module_utils.transformations - ensure that ``map_complex_type`` still returns transformed items if items exists that are not in the type_map (https://github.com/ansible-collections/amazon.aws/pull/1163). + +v5.0.2 +====== + +Bugfixes +-------- + +- ec2_metadata_facts - fixed ``AttributeError`` (https://github.com/ansible-collections/amazon.aws/issues/1134). + +v5.0.1 +====== + +Bugfixes +-------- + +- ec2_vpc_net_info - fix KeyError (https://github.com/ansible-collections/amazon.aws/pull/1109). +- ec2_vpc_net_info - remove hardcoded ``ClassicLinkEnabled`` parameter when request for ``ClassicLinkDnsSupported`` failed (https://github.com/ansible-collections/amazon.aws/pull/1109). +- s3_object - be more defensive when checking the results of ``s3.get_bucket_ownership_controls`` (https://github.com/ansible-collections/amazon.aws/issues/1115). + +v5.0.0 +====== + +Release Summary +--------------- + +In this release we promoted many community modules to Red Hat supported status. Those modules have been moved from the commuity.aws to amazon.aws collection. This release also brings some new features, bugfixes, breaking changes and deprecated features. The amazon.aws collection has dropped support for ``botocore<1.21.0`` and ``boto3<1.18.0``. Support for ``ansible-core<2.11`` has also been dropped. + +Major Changes +------------- + +- autoscaling_group - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.autoscaling_group``. +- autoscaling_group_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.autoscaling_group_info``. +- cloudtrail - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.cloudtrail``. +- cloudwatch_metric_alarm - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.cloudwatch_metric_alarm``. +- cloudwatchevent_rule - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.cloudwatchevent_rule``. +- cloudwatchlogs_log_group - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.cloudwatchlogs_log_group``. +- cloudwatchlogs_log_group_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.cloudwatchlogs_log_group_info``. +- cloudwatchlogs_log_group_metric_filter - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.cloudwatchlogs_log_group_metric_filter``. +- ec2_eip - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_eip``. +- ec2_eip_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_eip_info``. +- elb_application_lb - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.elb_application_lb``. +- elb_application_lb_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.elb_application_lb_info``. +- execute_lambda - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.execute_lambda``. +- iam_policy - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_policy``. +- iam_policy_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_policy_info``. +- iam_user - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_user``. +- iam_user_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_user_info``. +- kms_key - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.kms_key``. +- kms_key_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.kms_key_info``. +- lambda - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.lambda``. +- lambda_alias - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.lambda_alias``. +- lambda_event - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.lambda_event``. +- lambda_execute - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.lambda_execute``. +- lambda_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.lambda_info``. +- lambda_policy - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.lambda_policy``. +- rds_cluster - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_cluster``. +- rds_cluster_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_cluster_info``. +- rds_cluster_snapshot - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_cluster_snapshot``. +- rds_instance - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_instance``. +- rds_instance_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_instance_info``. +- rds_instance_snapshot - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_instance_snapshot``. +- rds_option_group - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_option_group``. +- rds_option_group_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_option_group_info``. +- rds_param_group - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_param_group``. +- rds_snapshot_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_snapshot_info``. +- rds_subnet_group - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.rds_subnet_group``. +- route53 - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.route53``. +- route53_health_check - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.route53_health_check``. +- route53_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.route53_info``. +- route53_zone - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.route53_zone``. + +Minor Changes +------------- + +- Ability to record and replay the API interaction of a module for testing purpose. Show case the feature with an example (https://github.com/ansible-collections/amazon.aws/pull/998). +- Remove the empty __init__.py file from the distribution, they were not required anymore (https://github.com/ansible-collections/amazon.aws/pull/1018). +- amazon.aws modules - the ``ec2_url`` parameter has been renamed to ``endpoint_url`` for consistency, ``ec2_url`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/992). +- aws_caller_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/968). +- aws_ec2 - introduce the ``allow_duplicated_hosts`` configuration key (https://github.com/ansible-collections/amazon.aws/pull/1026). +- cloudformation - avoid catching ``Exception``, catch more specific errors instead (https://github.com/ansible-collections/amazon.aws/pull/968). +- cloudwatch_metric_alarm_info - Added a new module that describes the cloudwatch metric alarms (https://github.com/ansible-collections/amazon.aws/pull/988). +- ec2_group - The ``ec2_group`` module has been renamed to ``ec2_security_group``, ``ec2_group`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/897). +- ec2_group_info - The ``ec2_group_info`` module has been renamed to ``ec2_security_group_info``, ``ec2_group_info`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/897). +- ec2_instance - Add hibernation_options and volumes->ebs->encrypted keys to support stop-hibernate instance (https://github.com/ansible-collections/amazon.aws/pull/972). +- ec2_instance - expanded the use of the automatic retries to ``InsuffienctInstanceCapacity`` (https://github.com/ansible-collections/amazon.aws/issues/1038). +- ec2_metadata_facts - avoid catching ``Exception``, catch more specific errors instead (https://github.com/ansible-collections/amazon.aws/pull/968). +- ec2_security_group - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/968). +- ec2_vpc_endpoint - avoid catching ``Exception``, catch more specific errors instead (https://github.com/ansible-collections/amazon.aws/pull/968). +- ec2_vpc_nat_gateway - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/968). +- ec2_vpc_net_info - handle classic link check for shared VPCs by throwing a warning instead of an error (https://github.com/ansible-collections/amazon.aws/pull/984). +- module_utils/acm - Move to jittered backoff (https://github.com/ansible-collections/amazon.aws/pull/946). +- module_utils/elbv2 - ensures that ``ip_address_type`` is set on creation rather than re-setting it after creation (https://github.com/ansible-collections/amazon.aws/pull/940). +- module_utils/elbv2 - uses new waiters with retries for temporary failures (https://github.com/ansible-collections/amazon.aws/pull/940). +- module_utils/waf - Move to jittered backoff (https://github.com/ansible-collections/amazon.aws/pull/946). +- module_utils/waiters - Add waiters to manage eks_nodegroup module (https://github.com/ansible-collections/community.aws/pull/1415). +- s3_bucket - ``rgw`` was added as an alias for the ``ceph`` parameter for consistency with the ``s3_object`` module (https://github.com/ansible-collections/amazon.aws/pull/994). +- s3_bucket - the ``s3_url`` parameter was merged into the ``endpoint_url`` parameter, ``s3_url`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/994). +- s3_object - added the ``sig_v4`` paramater, enbling the user to opt in to signature version 4 for download/get operations. (https://github.com/ansible-collections/amazon.aws/pull/1014) +- s3_object - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/968). +- s3_object - the ``rgw`` parameter was renamed to ``ceph`` for consistency with the ``s3_bucket`` module, ``rgw`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/994). +- s3_object - the ``s3_url`` parameter was merged into the ``endpoint_url`` parameter, ``s3_url`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/994). +- s3_object - updated module to add support for handling file upload to a bucket with ACL disabled (https://github.com/ansible-collections/amazon.aws/pull/921). +- s3_object_info - Added a new module that describes S3 Objects (https://github.com/ansible-collections/amazon.aws/pull/977). + +Breaking Changes / Porting Guide +-------------------------------- + +- amazon.aws collection - Support for ansible-core < 2.11 has been dropped (https://github.com/ansible-collections/amazon.aws/pull/1087). +- amazon.aws collection - The amazon.aws collection has dropped support for ``botocore<1.21.0`` and ``boto3<1.18.0``. Most modules will continue to work with older versions of the AWS SDK, however compatibility with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/934). +- doc_fragments - remove minimum collection requirements from doc_fragments/aws.py and allow pulling those from doc_fragments/aws_boto3.py instead (https://github.com/ansible-collections/amazon.aws/pull/985). +- ec2_ami - the default value for ``purge_tags`` has been changed from ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). +- ec2_ami - the parameter aliases ``DeviceName``, ``VirtualName`` and ``NoDevice`` were previously deprecated and have been removed, please use ``device_name``, ``virtual_name`` and ``no_device`` instead (https://github.com/ansible-collections/amazon.aws/pull/913). +- ec2_eni_info - the mutual exclusivity of the ``eni_id`` and ``filters`` parameters is now enforced, previously ``filters`` would be ignored if ``eni_id`` was set (https://github.com/ansible-collections/amazon.aws/pull/954). +- ec2_instance - the default value for ``purge_tags`` has been changed from ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). +- ec2_key - the default value for ``purge_tags`` has been changed from ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). +- ec2_vol - the default value for ``purge_tags`` has been changed from ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). +- ec2_vpc_dhcp_option_info - the parameter aliases ``DhcpOptionIds`` and ``DryRun`` were previously deprecated and have been removed, please use ``dhcp_options_ids`` and ``no_device`` instead (https://github.com/ansible-collections/amazon.aws/pull/913). +- ec2_vpc_endpoint - the default value for ``purge_tags`` has been changed from ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). +- ec2_vpc_net - the default value for ``purge_tags`` has been changed from ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). +- ec2_vpc_route_table - the default value for ``purge_tags`` has been changed from ``False`` to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/916). +- s3_bucket - the previously deprecated alias ``S3_URL`` for the ``s3_url`` parameter has been removed. Playbooks shuold be updated to use ``s3_url`` (https://github.com/ansible-collections/amazon.aws/pull/908). +- s3_object - the previously deprecated alias ``S3_URL`` for the ``s3_url`` parameter has been removed. Playbooks should be updated to use ``s3_url`` (https://github.com/ansible-collections/amazon.aws/pull/908). + +Deprecated Features +------------------- + +- amazon.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection has been deprecated and will be removed in a release after 2023-05-31 (https://github.com/ansible-collections/amazon.aws/pull/935). +- inventory/aws_ec2 - the ``include_extra_api_calls`` is now deprecated, its value is silently ignored (https://github.com/ansible-collections/amazon.aws/pull/1097). + +Bugfixes +-------- + +- aws_ec2 - address a regression introduced in 4.1.0 (https://github.com/ansible-collections/amazon.aws/pull/862) that cause the presnse of duplicated hosts in the inventory. +- cloudtrail - Fix key error TagList to TagsList (https://github.com/ansible-collections/amazon.aws/issues/1088). +- ec2_instance - Only show the deprecation warning for the default value of ``instance_type`` when ``count`` or ``exact_count`` are specified (https://github.com//issues/980). +- ec2_metadata_facts - fix ``'NoneType' object is not callable`` exception when using Ansible 2.13+ (https://github.com/ansible-collections/amazon.aws/issues/942). +- module_utils/botocore - fix ``object has no attribute 'fail'`` error in error handling (https://github.com/ansible-collections/amazon.aws/pull/1045). +- module_utils/elbv2 - fixes ``KeyError`` when using ``UseExistingClientSecret`` rather than ``ClientSecret`` (https://github.com/ansible-collections/amazon.aws/pull/940). +- module_utils/elbv2 - improvements to idempotency when comparing listeners (https://github.com/ansible-collections/community.aws/issues/604). +- s3_object - also use ``ignore_nonexistent_bucket`` when listing a bucket (https://github.com/ansible-collections/amazon.aws/issues/966). + +New Modules +----------- + +- cloudtrail_info - Gather information about trails in AWS Cloud Trail. +- cloudwatch_metric_alarm_info - Gather information about the alarms for the specified metric +- s3_object_info - Gather information about objects in S3 + +v4.3.0 +====== + +Release Summary +--------------- + +The amazon.aws 4.3.0 release includes a number of minor bug fixes and improvements. +Following the release of amazon.aws 5.0.0, backports to the 4.x series will be limited to +security issues and bugfixes. + + +Minor Changes +------------- + +- ec2_instance - expanded the use of the automatic retries to ``InsuffienctInstanceCapacity`` (https://github.com/ansible-collections/amazon.aws/issues/1038). + +Bugfixes +-------- + +- ec2_metadata_facts - fix ``'NoneType' object is not callable`` exception when using Ansible 2.13+ (https://github.com/ansible-collections/amazon.aws/issues/942). +- module_utils/cloud - Fix ``ValueError: ansible_collections.amazon.aws.plugins.module_utils.core.__spec__ is None`` error on Ansible 2.9 (https://github.com/ansible-collections/amazon.aws/issues/1083). + +v4.2.0 +====== + +Minor Changes +------------- + +- ec2_security_group - set type as ``list`` for rules->group_name as it can accept both ``str`` and ``list`` (https://github.com/ansible-collections/amazon.aws/pull/971). +- various modules - linting fixups (https://github.com/ansible-collections/amazon.aws/pull/953). + +Deprecated Features +------------------- + +- module_utils.cloud - removal of the ``CloudRetry.backoff`` has been delayed until release 6.0.0. It is recommended to update custom modules to use ``jittered_backoff`` or ``exponential_backoff`` instead (https://github.com/ansible-collections/amazon.aws/pull/951). + +v4.1.0 +====== + +Minor Changes +------------- + +- ec2_instance - expanded the use of the automatic retries on temporary failures (https://github.com/ansible-collections/amazon.aws/issues/927). +- s3_bucket - updated module to enable support for setting S3 Bucket Keys for SSE-KMS (https://github.com/ansible-collections/amazon.aws/pull/882). + +Deprecated Features +------------------- + +- amazon.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection has been deprecated and will be removed in a release after 2023-05-31 (https://github.com/ansible-collections/amazon.aws/pull/935). + +Bugfixes +-------- + +- aws_ec2 - ensure the correct number of hosts are returned when tags as hostnames are used (https://github.com/ansible-collections/amazon.aws/pull/862). +- elb_application_lb - fix ``KeyError`` when balancing across two Target Groups (https://github.com/ansible-collections/community.aws/issues/1089). +- elb_classic_lb - fix ``'NoneType' object has no attribute`` bug when creating a new ELB in check mode with a health check (https://github.com/ansible-collections/amazon.aws/pull/915). +- elb_classic_lb - fix ``'NoneType' object has no attribute`` bug when creating a new ELB using security group names (https://github.com/ansible-collections/amazon.aws/issues/914). + +v4.0.0 +====== + +Major Changes +------------- + +- amazon.aws collection - The amazon.aws collection has dropped support for ``botocore<1.20.0`` and ``boto3<1.17.0``. Most modules will continue to work with older versions of the AWS SDK, however compatibility with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/574). + +Minor Changes +------------- + +- aws_s3 - Add ``validate_bucket_name`` option, to control bucket name validation (https://github.com/ansible-collections/amazon.aws/pull/615). +- aws_s3 - The ``aws_s3`` module has been renamed to ``s3_object`` (https://github.com/ansible-collections/amazon.aws/pull/869). +- aws_s3 - ``resource_tags`` has been added as an alias for the ``tags`` parameter (https://github.com/ansible-collections/amazon.aws/pull/845). +- ec2_eni - Change parameter ``device_index`` data type to string when passing to ``describe_network_inter`` api call (https://github.com/ansible-collections/amazon.aws/pull/877). +- ec2_eni - ``resource_tags`` has been added as an alias for the ``tags`` parameter (https://github.com/ansible-collections/amazon.aws/pull/845). +- ec2_group - add ``egress_rules`` as an alias for ``rules_egress`` (https://github.com/ansible-collections/amazon.aws/pull/878). +- ec2_group - add ``purge_egress_rules`` as an alias for ``purge_rules_egress`` (https://github.com/ansible-collections/amazon.aws/pull/878). +- ec2_instance - Add missing ``metadata_options`` parameters (https://github.com/ansible-collections/amazon.aws/pull/715). +- ec2_key - ``resource_tags`` has been added as an alias for the ``tags`` parameter (https://github.com/ansible-collections/amazon.aws/pull/845). +- ec2_vpc_net - add support for managing VPCs by ID (https://github.com/ansible-collections/amazon.aws/pull/848). +- ec2_vpc_subnet - add support for OutpostArn param (https://github.com/ansible-collections/amazon.aws/pull/598). +- elb_classic_lb - ``resource_tags`` has been added as an alias for the ``tags`` parameter (https://github.com/ansible-collections/amazon.aws/pull/845). +- s3_bucket - Add ``validate_bucket_name`` option, to control bucket name validation (https://github.com/ansible-collections/amazon.aws/pull/615). +- s3_bucket - ``resource_tags`` has been added as an alias for the ``tags`` parameter (https://github.com/ansible-collections/amazon.aws/pull/845). + +Breaking Changes / Porting Guide +-------------------------------- + +- Tags beginning with ``aws:`` will not be removed when purging tags, these tags are reserved by Amazon and may not be updated or deleted (https://github.com/ansible-collections/amazon.aws/issues/817). +- amazon.aws collection - the ``profile`` parameter is now mutually exclusive with the ``aws_access_key``, ``aws_secret_key`` and ``security_token`` parameters (https://github.com/ansible-collections/amazon.aws/pull/834). +- aws_az_info - the module alias ``aws_az_facts`` was deprecated in Ansible 2.9 and has now been removed (https://github.com/ansible-collections/amazon.aws/pull/832). +- aws_s3 - the default value for ``ensure overwrite`` has been changed to ``different`` instead of ``always`` so that the module is idempotent by default (https://github.com/ansible-collections/amazon.aws/issues/811). +- aws_ssm - on_denied and on_missing now both default to error, for consistency with both aws_secret and the base Lookup class (https://github.com/ansible-collections/amazon.aws/issues/617). +- ec2 - The ``ec2`` module has been removed in release 4.0.0 and replaced by the ``ec2_instance`` module (https://github.com/ansible-collections/amazon.aws/pull/630). +- ec2_vpc_igw_info - The default value for ``convert_tags`` has been changed to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/835). +- elb_classic_lb - the ``ec2_elb`` fact has been removed (https://github.com/ansible-collections/amazon.aws/pull/827). +- module_utils - Support for the original AWS SDK aka ``boto`` has been removed, including all relevant helper functions. All modules should now use the ``boto3``/``botocore`` AWS SDK (https://github.com/ansible-collections/amazon.aws/pull/630) + +Deprecated Features +------------------- + +- aws_s3 - The ``S3_URL`` alias for the s3_url option has been deprecated and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). +- ec2_ami - The ``DeviceName`` alias for the device_name option has been deprecated and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). +- ec2_ami - The ``NoDevice`` alias for the no_device option has been deprecated and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). +- ec2_ami - The ``VirtualName`` alias for the virtual_name option has been deprecated and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). +- ec2_ami - the current default value of ``False`` for ``purge_tags`` has been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/846). +- ec2_instance - The default value for ```instance_type``` has been deprecated, in the future release you must set an instance_type or a launch_template (https://github.com/ansible-collections/amazon.aws/pull/587). +- ec2_instance - the current default value of ``False`` for ``purge_tags`` has been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/849). +- ec2_key - the current default value of ``False`` for ``purge_tags`` has been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/846). +- ec2_vol - the current default value of ``False`` for ``purge_tags`` has been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/846). +- ec2_vpc_dhcp_option_info - The ``DhcpOptionIds`` alias for the dhcp_option_ids option has been deprecated and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). +- ec2_vpc_dhcp_option_info - The ``DryRun`` alias for the dry_run option has been deprecated and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). +- ec2_vpc_endpoint - the current default value of ``False`` for ``purge_tags`` has been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/846). +- ec2_vpc_net - the current default value of ``False`` for ``purge_tags`` has been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/848). +- ec2_vpc_route_table - the current default value of ``False`` for ``purge_tags`` has been deprecated and will be updated in release 5.0.0 to ``True`` (https://github.com/ansible-collections/amazon.aws/pull/846). +- s3_bucket - The ``S3_URL`` alias for the s3_url option has been deprecated and will be removed in release 5.0.0 (https://github.com/ansible-collections/community.aws/pull/795). +- s3_object - Support for creation and deletion of S3 buckets has been deprecated. Please use the ``amazon.aws.s3_bucket`` module to create and delete buckets (https://github.com/ansible-collections/amazon.aws/pull/869). + +Removed Features (previously deprecated) +---------------------------------------- + +- cloudformation - the ``template_format`` option has been removed. It has been ignored by the module since Ansible 2.3 (https://github.com/ansible-collections/amazon.aws/pull/833). +- ec2_key - the ``wait_timeout`` option had no effect, was deprecated in release 1.0.0, and has now been removed (https://github.com/ansible-collections/amazon.aws/pull/830). +- ec2_key - the ``wait`` option had no effect, was deprecated in release 1.0.0, and has now been removed (https://github.com/ansible-collections/amazon.aws/pull/830). +- ec2_tag - the previously deprecated state ``list`` has been removed. To list tags on an EC2 resource the ``ec2_tag_info`` module can be used (https://github.com/ansible-collections/amazon.aws/pull/829). +- ec2_vol - the previously deprecated state ``list`` has been removed. To list volumes the ``ec2_vol_info`` module can be used (https://github.com/ansible-collections/amazon.aws/pull/828). +- module_utils.batch - the class ``ansible_collections.amazon.aws.plugins.module_utils.batch.AWSConnection`` has been removed. Please use ``AnsibleAWSModule.client()`` instead (https://github.com/ansible-collections/amazon.aws/pull/831). + +Bugfixes +-------- + +- ec2_group - fix uncaught exception when running with ``--diff`` and ``--check`` to create a new security group (https://github.com/ansible-collections/amazon.aws/issues/440). +- ec2_instance - Add a condition to handle default ```instance_type``` value for fix breaking on instance creation with launch template (https://github.com/ansible-collections/amazon.aws/pull/587). +- ec2_instance - raise an error when missing permission to stop instance when ``state`` is set to ``rebooted``` (https://github.com/ansible-collections/amazon.aws/pull/671). +- ec2_vpc_igw - use gateway_id rather than filters to paginate if possible to fix 'NoneType' object is not subscriptable error (https://github.com/ansible-collections/amazon.aws/pull/766). +- ec2_vpc_net - fix a bug where CIDR configuration would be updated in check mode (https://github.com/ansible/ansible/issues/62678). +- ec2_vpc_net - fix a bug where the module would get stuck if DNS options were updated in check mode (https://github.com/ansible/ansible/issues/62677). +- elb_classic_lb - modify the return value of _format_listeners method to resolve a failure creating https listeners (https://github.com/ansible-collections/amazon.aws/pull/860). + +v3.5.0 +====== + +Release Summary +--------------- + +Following the release of amazon.aws 5.0.0, 3.5.0 is a bugfix release and the final planned release for the 3.x series. + + +Minor Changes +------------- + +- ec2_security_group - set type as ``list`` for rules->group_name as it can accept both ``str`` and ``list`` (https://github.com/ansible-collections/amazon.aws/pull/971). + +Bugfixes +-------- + +- ec2_metadata_facts - fix ``'NoneType' object is not callable`` exception when using Ansible 2.13+ (https://github.com/ansible-collections/amazon.aws/issues/942). + +v3.4.0 +====== + +Minor Changes +------------- + +- ec2_instance - expanded the use of the automatic retries on temporary failures (https://github.com/ansible-collections/amazon.aws/issues/927). + +Bugfixes +-------- + +- elb_application_lb - fix ``KeyError`` when balancing across two Target Groups (https://github.com/ansible-collections/community.aws/issues/1089). +- elb_classic_lb - fix ``'NoneType' object has no attribute`` bug when creating a new ELB in check mode with a health check (https://github.com/ansible-collections/amazon.aws/pull/915). +- elb_classic_lb - fix ``'NoneType' object has no attribute`` bug when creating a new ELB using security group names (https://github.com/ansible-collections/amazon.aws/issues/914). + +v3.3.1 +====== + +v3.3.0 +====== + +Minor Changes +------------- + +- aws_ec2 inventory - Allow for literal strings in hostname that don't match filter parameters in ec2 describe-instances (https://github.com/ansible-collections/amazon.aws/pull/826). +- aws_ssm - Add support for ``endpoint`` parameter (https://github.com/ansible-collections/amazon.aws/pull/837). +- module.utils.rds - add retry_codes to get_rds_method_attribute return data to use in call_method and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/776). +- module.utils.rds - refactor to utilize get_rds_method_attribute return data (https://github.com/ansible-collections/amazon.aws/pull/776). +- module_utils - add new aliases ``aws_session_token`` and ``session_token`` to the ``security_token`` parameter to be more in-line with the boto SDK (https://github.com/ansible-collections/amazon.aws/pull/631). +- module_utils.rds - Add support and unit tests for addition/removal of IAM roles to/from a db instance in module_utils.rds with waiters (https://github.com/ansible-collections/amazon.aws/pull/714). + +Bugfixes +-------- + +- Include ``PSF-license.txt`` file for ``plugins/module_utils/_version.py``. +- aws_account_attribute lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_ec2 inventory plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_rds inventory plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_resource_actions callback plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_secret lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_service_ip_ranges lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_ssm - Fix environment variables for client configuration (e.g., AWS_PROFILE, AWS_ACCESS_KEY_ID) (https://github.com/ansible-collections/amazon.aws/pull/837). +- aws_ssm lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- ec2_instance - ec2_instance module broken in Python 3.8 - dict keys modified during iteration (https://github.com/ansible-collections/amazon.aws/issues/709). +- module.utils.rds - Add waiter for promoting read replica to fix idempotency issue (https://github.com/ansible-collections/amazon.aws/pull/714). +- module.utils.rds - Catch InvalidDBSecurityGroupStateFault when modifying a db instance (https://github.com/ansible-collections/amazon.aws/pull/776). +- module.utils.s3 - Update validate_bucket_name minimum length to 3 (https://github.com/ansible-collections/amazon.aws/pull/802). + +v3.2.0 +====== + +Minor Changes +------------- + +- aws_secret - add pagination for ``bypath`` functionality (https://github.com/ansible-collections/amazon.aws/pull/591). +- ec2_instance - Fix scope of deprecation warning to not show warning when ``state`` in ``absent`` (https://github.com/ansible-collections/amazon.aws/pull/719). +- ec2_vpc_route_table - support associating internet gateways (https://github.com/ansible-collections/amazon.aws/pull/690). +- module_utils.elbv2 - Add support for alb specific attributes and compare_elb_attributes method to support check_mode in module_utils.elbv2 (https://github.com/ansible-collections/amazon.aws/pull/696). +- s3_bucket - Add support for enforced bucket owner object ownership (https://github.com/ansible-collections/amazon.aws/pull/694). + +Bugfixes +-------- + +- aws_ec2 inventory - use the iam_role_arn configuration parameter to assume the role before trying to call DescribeRegions if the regions configuration is not set and AWS credentials provided without enough privilege to perform the DescribeRegions action. (https://github.com/ansible-collections/amazon.aws/issues/566). +- ec2_vol - changing a volume from a type that does not support IOPS (like ``standard``) to a type that does (like ``gp3``) fails (https://github.com/ansible-collections/amazon.aws/issues/626). +- ec2_vpc_igw - fix 'NoneType' object is not subscriptable error (https://github.com/ansible-collections/amazon.aws/pull/691). +- ec2_vpc_igw - use paginator for describe internet gateways and add retry to fix NoneType object is not subscriptable error (https://github.com/ansible-collections/amazon.aws/pull/695). +- ec2_vpc_net - In check mode, ensure the module does not change the configuration. Handle case when Amazon-provided ipv6 block is enabled, then disabled, then enabled again. Do not disable IPv6 CIDR association (using Amazon pool) if ipv6_cidr property is not present in the task. If the VPC already exists and ipv6_cidr property, retain the current config (https://github.com/ansible-collections/amazon.aws/pull/631). + +v3.1.1 +====== + +Minor Changes +------------- + +- bump the release version of the amazon.aws collection from 3.1.0 to 3.1.1 because of a bug that occurred while uploading to Galaxy. + +v3.1.0 +====== + +Minor Changes +------------- + +- add new parameters hostvars_prefix and hostvars_suffix for inventory plugins aws_ec2 and aws_rds (https://github.com/ansible-collections/amazon.aws/issues/535). +- aws_s3 - Add ``validate_bucket_name`` option, to control bucket name validation (https://github.com/ansible-collections/amazon.aws/pull/615). +- aws_s3 - add latest choice on ``overwrite`` parameter to get latest object on S3 (https://github.com/ansible-collections/amazon.aws/pull/595). +- ec2_vol - add support for OutpostArn param (https://github.com/ansible-collections/amazon.aws/pull/597). +- ec2_vol - tag volume on creation (https://github.com/ansible-collections/amazon.aws/pull/603). +- ec2_vpc_route_table - add support for IPv6 in creating route tables (https://github.com/ansible-collections/amazon.aws/pull/601). +- s3_bucket - Add ``validate_bucket_name`` option, to control bucket name validation (https://github.com/ansible-collections/amazon.aws/pull/615). + +Deprecated Features +------------------- + +- ec2_instance - The default value for ```instance_type``` has been deprecated, in the future release you must set an instance_type or a launch_template (https://github.com/ansible-collections/amazon.aws/pull/587). + +Bugfixes +-------- + +- Various modules and plugins - use vendored version of ``distutils.version`` instead of the deprecated Python standard library ``distutils`` (https://github.com/ansible-collections/amazon.aws/pull/599). +- aws_acm - No longer raising ResourceNotFound exception while retrieving ACM certificates. +- aws_s3 - fix exception raised when using module to copy from source to destination and key is missing from source (https://github.com/ansible-collections/amazon.aws/issues/602). +- ec2_instance - Add a condition to handle default ```instance_type``` value for fix breaking on instance creation with launch template (https://github.com/ansible-collections/amazon.aws/pull/587). +- ec2_key - add support for ED25519 key type (https://github.com/ansible-collections/amazon.aws/issues/572). +- ec2_vol - Sets the Iops value in req_obj even if the iops value has not changed, to allow modifying volume types that require passing an iops value to boto. (https://github.com/ansible-collections/amazon.aws/pull/606) +- elb_classic_lb - handle security_group_ids when providing security_group_names and fix broken tasks in integration test (https://github.com/ansible-collections/amazon.aws/pull/592). +- s3_bucket - Enable the management of bucket-level ACLs (https://github.com/ansible-collections/amazon.aws/issues/573). + +v3.0.0 +====== + +Major Changes +------------- + +- amazon.aws collection - The amazon.aws collection has dropped support for ``botocore<1.19.0`` and ``boto3<1.16.0``. Most modules will continue to work with older versions of the AWS SDK, however compatibility with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/574). + +Minor Changes +------------- + +- ec2_instance - add count parameter support (https://github.com/ansible-collections/amazon.aws/pull/539). + +Breaking Changes / Porting Guide +-------------------------------- + +- aws_caller_facts - Remove deprecated ``aws_caller_facts`` alias. Please use ``aws_caller_info`` instead. +- cloudformation_facts - Remove deprecated ``cloudformation_facts`` alias. Please use ``cloudformation_info`` instead. +- ec2_ami_facts - Remove deprecated ``ec2_ami_facts`` alias. Please use ``ec2_ami_info`` instead. +- ec2_eni_facts - Remove deprecated ``ec2_eni_facts`` alias. Please use ``ec2_eni_info`` instead. +- ec2_group_facts - Remove deprecated ``ec2_group_facts`` alias. Please use ``ec2_group_info`` instead. +- ec2_instance_facts - Remove deprecated ``ec2_instance_facts`` alias. Please use ``ec2_instance_info`` instead. +- ec2_snapshot_facts - Remove deprecated ``ec2_snapshot_facts`` alias. Please use ``ec2_snapshot_info`` instead. +- ec2_vol_facts - Remove deprecated ``ec2_vol_facts`` alias. Please use ``ec2_vol_info`` instead. +- ec2_vpc_dhcp_option_facts - Remove deprecated ``ec2_vpc_dhcp_option_facts`` alias. Please use ``ec2_vpc_dhcp_option_info`` instead. +- ec2_vpc_endpoint_facts - Remove deprecated ``ec2_vpc_endpoint_facts`` alias. Please use ``ec2_vpc_endpoint_info`` instead. +- ec2_vpc_igw_facts - Remove deprecated ``ec2_vpc_igw_facts`` alias. Please use ``ec2_vpc_igw_info`` instead. +- ec2_vpc_nat_gateway_facts - Remove deprecated ``ec2_vpc_nat_gateway_facts`` alias. Please use ``ec2_vpc_nat_gateway_info`` instead. +- ec2_vpc_net_facts - Remove deprecated ``ec2_vpc_net_facts`` alias. Please use ``ec2_vpc_net_info`` instead. +- ec2_vpc_route_table_facts - Remove deprecated ``ec2_vpc_route_table_facts`` alias. Please use ``ec2_vpc_route_table_info`` instead. +- ec2_vpc_subnet_facts - Remove deprecated ``ec2_vpc_subnet_facts`` alias. Please use ``ec2_vpc_subnet_info`` instead. + +Deprecated Features +------------------- + +- module_utils - support for the original AWS SDK ``boto`` has been deprecated in favour of the ``boto3``/``botocore`` SDK. All ``boto`` based modules have either been deprecated or migrated to ``botocore``, and the remaining support code in module_utils will be removed in release 4.0.0 of the amazon.aws collection. Any modules outside of the amazon.aws and community.aws collections based on the ``boto`` library will need to be migrated to the ``boto3``/``botocore`` libraries (https://github.com/ansible-collections/amazon.aws/pull/575). + +v2.2.0 +====== + +Minor Changes +------------- + +- ec2_instance - add count parameter support (https://github.com/ansible-collections/amazon.aws/pull/539). + +Bugfixes +-------- + +- aws_ec2 inventory - use the iam_role_arn configuration parameter to assume the role before trying to call DescribeRegions if the regions configuration is not set and AWS credentials provided without enough privilege to perform the DescribeRegions action. (https://github.com/ansible-collections/amazon.aws/issues/566). +- ec2_vol - Sets the Iops value in req_obj even if the iops value has not changed, to allow modifying volume types that require passing an iops value to boto. (https://github.com/ansible-collections/amazon.aws/pull/606) +- ec2_vol - changing a volume from a type that does not support IOPS (like ``standard``) to a type that does (like ``gp3``) fails (https://github.com/ansible-collections/amazon.aws/issues/626). +- ec2_vpc_igw - fix 'NoneType' object is not subscriptable error (https://github.com/ansible-collections/amazon.aws/pull/691). +- ec2_vpc_igw - use paginator for describe internet gateways and add retry to fix NoneType object is not subscriptable error (https://github.com/ansible-collections/amazon.aws/pull/695). +- elb_classic_lb - handle security_group_ids when providing security_group_names and fix broken tasks in integration test (https://github.com/ansible-collections/amazon.aws/pull/592). + +v2.1.0 +====== + +Minor Changes +------------- + +- aws_service_ip_ranges - add new option ``ipv6_prefixes`` to get only IPV6 addresses and prefixes for Amazon services (https://github.com/ansible-collections/amazon.aws/pull/430) +- cloudformation - fix detection when there are no changes. Sometimes when there are no changes, the change set will have a status FAILED with StatusReason No updates are to be performed (https://github.com/ansible-collections/amazon.aws/pull/507). +- ec2_ami - add check_mode support (https://github.com/ansible-collections/amazon.aws/pull/516). +- ec2_ami - use module_util helper for tagging AMIs (https://github.com/ansible-collections/amazon.aws/pull/520). +- ec2_ami - when creating an AMI from an instance pass the tagging options at creation time (https://github.com/ansible-collections/amazon.aws/pull/551). +- ec2_elb_lb - module renamed to ``elb_classic_lb`` (https://github.com/ansible-collections/amazon.aws/pull/377). +- ec2_eni - add check mode support (https://github.com/ansible-collections/amazon.aws/pull/534). +- ec2_eni - use module_util helper for tagging ENIs (https://github.com/ansible-collections/amazon.aws/pull/522). +- ec2_instance - use module_util helpers for tagging (https://github.com/ansible-collections/amazon.aws/pull/527). +- ec2_key - add support for tagging key pairs (https://github.com/ansible-collections/amazon.aws/pull/548). +- ec2_snapshot - add check_mode support (https://github.com/ansible-collections/amazon.aws/pull/512). +- ec2_vol - add check_mode support (https://github.com/ansible-collections/amazon.aws/pull/509). +- ec2_vpc_dhcp_option - use module_util helpers for tagging (https://github.com/ansible-collections/amazon.aws/pull/531). +- ec2_vpc_endpoint - added ``vpc_endpoint_security_groups`` parameter to support defining the security group attached to an interface endpoint (https://github.com/ansible-collections/amazon.aws/pull/544). +- ec2_vpc_endpoint - added ``vpc_endpoint_subnets`` parameter to support defining the subnet attached to an interface or gateway endpoint (https://github.com/ansible-collections/amazon.aws/pull/544). +- ec2_vpc_endpoint - use module_util helper for tagging (https://github.com/ansible-collections/amazon.aws/pull/525). +- ec2_vpc_endpoint - use module_util helpers for tagging (https://github.com/ansible-collections/amazon.aws/pull/531). +- ec2_vpc_igw - use module_util helper for tagging (https://github.com/ansible-collections/amazon.aws/pull/523). +- ec2_vpc_igw - use module_util helpers for tagging (https://github.com/ansible-collections/amazon.aws/pull/531). +- ec2_vpc_nat_gateway - use module_util helper for tagging (https://github.com/ansible-collections/amazon.aws/pull/524). +- ec2_vpc_nat_gateway - use module_util helpers for tagging (https://github.com/ansible-collections/amazon.aws/pull/531). +- elb_classic_lb - added retries on common AWS temporary API failures (https://github.com/ansible-collections/amazon.aws/pull/377). +- elb_classic_lb - added support for check_mode (https://github.com/ansible-collections/amazon.aws/pull/377). +- elb_classic_lb - added support for wait during creation (https://github.com/ansible-collections/amazon.aws/pull/377). +- elb_classic_lb - added support for wait during instance addition and removal (https://github.com/ansible-collections/amazon.aws/pull/377). +- elb_classic_lb - migrated to boto3 SDK (https://github.com/ansible-collections/amazon.aws/pull/377). +- elb_classic_lb - various error messages changed due to refactor (https://github.com/ansible-collections/amazon.aws/pull/377). +- module_utils.ec2 - moved generic tagging helpers into module_utils.tagging (https://github.com/ansible-collections/amazon.aws/pull/527). +- module_utils.tagging - add new helper to generate TagSpecification lists (https://github.com/ansible-collections/amazon.aws/pull/527). + +Deprecated Features +------------------- + +- ec2_classic_lb - setting of the ``ec2_elb`` fact has been deprecated and will be removed in release 4.0.0 of the collection. The module now returns ``elb`` which can be accessed using the register keyword (https://github.com/ansible-collections/amazon.aws/pull/552). + +Bugfixes +-------- + +- AWS action group - added missing ``ec2_instance_facts`` entry (https://github.com/ansible-collections/amazon.aws/issues/557) +- ec2_ami - fix problem when creating an AMI from an instance with ephemeral volumes (https://github.com/ansible-collections/amazon.aws/issues/511). +- ec2_instance - ensure that ec2_instance falls back to the tag(Name) parameter when no filter and no name parameter is passed (https://github.com/ansible-collections/amazon.aws/issues/526). +- s3_bucket - update error handling to better support DigitalOcean Space (https://github.com/ansible-collections/amazon.aws/issues/508). + +v2.0.0 +====== + +Major Changes +------------- + +- amazon.aws collection - Due to the AWS SDKs announcing the end of support for Python less than 3.6 (https://boto3.amazonaws.com/v1/documentation/api/1.17.64/guide/migrationpy3.html) this collection now requires Python 3.6+ (https://github.com/ansible-collections/amazon.aws/pull/298). +- amazon.aws collection - The amazon.aws collection has dropped support for ``botocore<1.18.0`` and ``boto3<1.15.0``. Most modules will continue to work with older versions of the AWS SDK, however compatibility with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/502). +- ec2_instance - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_instance``. +- ec2_instance_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_instance_info``. +- ec2_vpc_endpoint - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_endpoint``. +- ec2_vpc_endpoint_facts - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_endpoint_info``. +- ec2_vpc_endpoint_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_endpoint_info``. +- ec2_vpc_endpoint_service_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_endpoint_service_info``. +- ec2_vpc_igw - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_igw``. +- ec2_vpc_igw_facts - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_igw_facts``. +- ec2_vpc_igw_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_igw_info``. +- ec2_vpc_nat_gateway - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_nat_gateway``. +- ec2_vpc_nat_gateway_facts - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_nat_gateway_info``. +- ec2_vpc_nat_gateway_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_nat_gateway_info``. +- ec2_vpc_route_table - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_route_table``. +- ec2_vpc_route_table_facts - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_route_table_facts``. +- ec2_vpc_route_table_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.ec2_vpc_route_table_info``. + +Minor Changes +------------- + +- aws_ec2 - use a generator rather than list comprehension (https://github.com/ansible-collections/amazon.aws/pull/465). +- aws_s3 - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- aws_s3 - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- aws_s3 - add ``tags`` and ``purge_tags`` features for an S3 object (https://github.com/ansible-collections/amazon.aws/pull/335) +- aws_s3 - new mode to copy existing on another bucket (https://github.com/ansible-collections/amazon.aws/pull/359). +- aws_secret - added support for gracefully handling deleted secrets (https://github.com/ansible-collections/amazon.aws/pull/455). +- aws_ssm - add "on_missing" and "on_denied" option (https://github.com/ansible-collections/amazon.aws/pull/370). +- cloudformation - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- cloudformation - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- ec2_ami - ensure tags are propagated to the snapshot(s) when creating an AMI (https://github.com/ansible-collections/amazon.aws/pull/437). +- ec2_eni - fix idempotency when ``security_groups`` attribute is specified (https://github.com/ansible-collections/amazon.aws/pull/337). +- ec2_eni - timeout increased when waiting for ENIs to finish detaching (https://github.com/ansible-collections/amazon.aws/pull/501). +- ec2_group - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- ec2_group - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- ec2_group - use a generator rather than list comprehension (https://github.com/ansible-collections/amazon.aws/pull/465). +- ec2_group - use system ipaddress module, available with Python >= 3.3, instead of vendored copy (https://github.com/ansible-collections/amazon.aws/pull/461). +- ec2_instance - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- ec2_instance - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- ec2_instance - add ``throughput`` parameter for gp3 volume types (https://github.com/ansible-collections/amazon.aws/pull/433). +- ec2_instance - add support for controlling metadata options (https://github.com/ansible-collections/amazon.aws/pull/414). +- ec2_instance - remove unnecessary raise when exiting with a failure (https://github.com/ansible-collections/amazon.aws/pull/460). +- ec2_instance_info - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- ec2_instance_info - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- ec2_snapshot - migrated to use the boto3 python library (https://github.com/ansible-collections/amazon.aws/pull/356). +- ec2_spot_instance_info - Added a new module that describes the specified Spot Instance requests (https://github.com/ansible-collections/amazon.aws/pull/487). +- ec2_vol - add parameter ``multi_attach`` to support Multi-Attach on volume creation/update (https://github.com/ansible-collections/amazon.aws/pull/362). +- ec2_vol - relax the boto3/botocore requirements and only require botocore 1.19.27 for modifying the ``throughput`` parameter (https://github.com/ansible-collections/amazon.aws/pull/346). +- ec2_vpc_dhcp_option - Now also returns a boto3-style resource description in the ``dhcp_options`` result key. This includes any tags for the ``dhcp_options_id`` and has the same format as the current return value of ``ec2_vpc_dhcp_option_info``. (https://github.com/ansible-collections/amazon.aws/pull/252) +- ec2_vpc_dhcp_option_info - Now also returns a user-friendly ``dhcp_config`` key that matches the historical ``new_config`` key from ec2_vpc_dhcp_option, and alleviates the need to use ``items2dict(key_name='key', value_name='values')`` when parsing the output of the module. (https://github.com/ansible-collections/amazon.aws/pull/252) +- ec2_vpc_subnet - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- ec2_vpc_subnet - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- integration tests - remove dependency with collection ``community.general`` (https://github.com/ansible-collections/amazon.aws/pull/361). +- module_utils/waiter - add RDS cluster ``cluster_available`` waiter (https://github.com/ansible-collections/amazon.aws/pull/464). +- module_utils/waiter - add RDS cluster ``cluster_deleted`` waiter (https://github.com/ansible-collections/amazon.aws/pull/464). +- module_utils/waiter - add Route53 ``resource_record_sets_changed`` waiter (https://github.com/ansible-collections/amazon.aws/pull/350). +- s3_bucket - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- s3_bucket - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). +- s3_bucket - add new option ``object_ownership`` to configure object ownership (https://github.com/ansible-collections/amazon.aws/pull/311) +- s3_bucket - updated to use HeadBucket instead of ListBucket when testing for bucket existence (https://github.com/ansible-collections/amazon.aws/pull/357). + +Breaking Changes / Porting Guide +-------------------------------- + +- ec2_instance - instance wait for state behaviour has changed. If plays require the old behavior of waiting for the instance monitoring status to become ``OK`` when launching a new instance, the action will need to specify ``state: started`` (https://github.com/ansible-collections/amazon.aws/pull/481). +- ec2_snapshot - support for waiting indefinitely has been dropped, new default is 10 minutes (https://github.com/ansible-collections/amazon.aws/pull/356). +- ec2_vol_info - return ``attachment_set`` is now a list of attachments with Multi-Attach support on disk. (https://github.com/ansible-collections/amazon.aws/pull/362). +- ec2_vpc_dhcp_option - The module has been refactored to use boto3. Keys and value types returned by the module are now consistent, which is a change from the previous behaviour. A ``purge_tags`` option has been added, which defaults to ``True``. (https://github.com/ansible-collections/amazon.aws/pull/252) +- ec2_vpc_dhcp_option_info - Now preserves case for tag keys in return value. (https://github.com/ansible-collections/amazon.aws/pull/252) +- module_utils.core - The boto3 switch has been removed from the region parameter (https://github.com/ansible-collections/amazon.aws/pull/287). +- module_utils/compat - vendored copy of ipaddress removed (https://github.com/ansible-collections/amazon.aws/pull/461). +- module_utils/core - updated the ``scrub_none_parameters`` function so that ``descend_into_lists`` is set to ``True`` by default (https://github.com/ansible-collections/amazon.aws/pull/297). + +Deprecated Features +------------------- + +- ec2 - the boto based ``ec2`` module has been deprecated in favour of the boto3 based ``ec2_instance`` module. The ``ec2`` module will be removed in release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/424). +- ec2_vpc_dhcp_option - The ``new_config`` return key has been deprecated and will be removed in a future release. It will be replaced by ``dhcp_config``. Both values are returned in the interim. (https://github.com/ansible-collections/amazon.aws/pull/252) + +Bugfixes +-------- + +- aws_s3 - Fix upload permission when an S3 bucket ACL policy requires a particular canned ACL (https://github.com/ansible-collections/amazon.aws/pull/318) +- ec2_ami - Fix ami issue when creating an ami with no_device parameter (https://github.com/ansible-collections/amazon.aws/pull/386) +- ec2_instance - ``ec2_instance`` was waiting on EC2 instance monitoring status to be ``OK`` when launching a new instance. This could cause a play to wait multiple minutes for AWS's monitoring to complete status checks (https://github.com/ansible-collections/amazon.aws/pull/481). +- ec2_snapshot - Fix snapshot issue when capturing a snapshot of a volume without tags (https://github.com/ansible-collections/amazon.aws/pull/383) +- ec2_vol - Fixes ``changed`` status when ``modify_volume`` is used, but no new disk is being attached. The module incorrectly reported that no change had occurred even when disks had been modified (iops, throughput, type, etc.). (https://github.com/ansible-collections/amazon.aws/issues/482). +- ec2_vol - fix iops setting and enforce iops/throughput parameters usage (https://github.com/ansible-collections/amazon.aws/pull/334) +- inventory - ``include_filters`` won't be ignored anymore if ``filters`` is not set (https://github.com/ansible-collections/amazon.aws/issues/457). +- s3_bucket - Fix error handling when attempting to set a feature that is not implemented (https://github.com/ansible-collections/amazon.aws/pull/391). +- s3_bucket - Gracefully handle ``NotImplemented`` exceptions when fetching encryption settings (https://github.com/ansible-collections/amazon.aws/issues/390). + +New Modules +----------- + +- ec2_spot_instance - request, stop, reboot or cancel spot instance +- ec2_spot_instance_info - Gather information about ec2 spot instance requests + +v1.5.0 +====== + +Minor Changes +------------- + +- AWS inventory plugins - use shared HAS_BOTO3 helper rather than copying code (https://github.com/ansible-collections/amazon.aws/pull/288). +- AWS lookup plugins - use shared HAS_BOTO3 helper rather than copying code (https://github.com/ansible-collections/amazon.aws/pull/288). +- aws_account_attribute - add retries on common AWS failures (https://github.com/ansible-collections/amazon.aws/pull/295). +- aws_ec2 inventory - expose a new configuration key ``use_contrib_script_compatible_ec2_tag_keys`` to reproduce a behavior of the old ``ec2.py`` inventory script. With this option enabled, each tag is exposed using a ``ec2_tag_TAGNAME`` key (https://github.com/ansible-collections/amazon.aws/pull/331). +- aws_ec2 inventory - expose to new keys called ``include_filters`` and ``exclude_filters`` to give the user the ability to compose an inventory with multiple queries (https://github.com/ansible-collections/amazon.aws/pull/328). +- aws_ec2 inventory plugin - Added support for using Jinja2 templates in the authentication fields (https://github.com/ansible-collections/amazon.aws/pull/57). +- cloudformation - added support for StackPolicyDuringUpdateBody (https://github.com/ansible-collections/amazon.aws/pull/155). +- ec2_metadata_facts - add support for IMDSv2 (https://github.com/ansible-collections/amazon.aws/pull/43). +- ec2_snapshot_info - add the ``max_results`` along with ``next_token_id`` option (https://github.com/ansible-collections/amazon.aws/pull/321). +- ec2_tag - use common code for tagging resources (https://github.com/ansible-collections/amazon.aws/pull/309). +- ec2_tag_info - use common code for tagging resources (https://github.com/ansible-collections/amazon.aws/pull/309). +- ec2_vol - add the ``purge_tags`` option (https://github.com/ansible-collections/amazon.aws/pull/242). +- ec2_vol - use common code for tagging resources (https://github.com/ansible-collections/amazon.aws/pull/309). +- ec2_vpc_net - use a custom waiter which can handle API rate limiting (https://github.com/ansible-collections/amazon.aws/pull/270). +- ec2_vpc_subnet - use AWSRetry decorator to more consistently handle API rate limiting (https://github.com/ansible-collections/amazon.aws/pull/270). +- ec2_vpc_subnet - use common code for tagging resources (https://github.com/ansible-collections/amazon.aws/pull/309). +- module_utils.cloudfront_facts - linting cleanup (https://github.com/ansible-collections/amazon.aws/pull/291). +- module_utils.ec2 - linting cleanup (https://github.com/ansible-collections/amazon.aws/pull/291). +- module_utils/core - add a helper function ``normalize_boto3_result`` (https://github.com/ansible-collections/amazon.aws/pull/271). +- module_utils/core - add parameter ``descend_into_lists`` to ``scrub_none_parameters`` helper function (https://github.com/ansible-collections/amazon.aws/pull/262). +- module_utils/ec2 - added additional helper functions for tagging EC2 resources (https://github.com/ansible-collections/amazon.aws/pull/309). +- sanity tests - add ignore.txt for 2.12 (https://github.com/ansible-collections/amazon.aws/pull/315). + +Bugfixes +-------- + +- ec2_vol - create or update now preserves the existing tags, including Name (https://github.com/ansible-collections/amazon.aws/issues/229) +- ec2_vol - fix exception when platform information isn't available (https://github.com/ansible-collections/amazon.aws/issues/305). + +v1.4.1 +====== + +Minor Changes +------------- + +- module_utils - the ipaddress module utility has been vendored into this collection. This eliminates the collection dependency on ansible.netcommon (which had removed the library in its 2.0 release). The ipaddress library is provided for internal use in this collection only. (https://github.com/ansible-collections/amazon.aws/issues/273)- + +v1.4.0 +====== + +Minor Changes +------------- + +- aws_ec2 - Add hostname options concatenation +- aws_ec2 inventory plugin - avoid a superfluous import of ``ansible.utils.display.Display`` (https://github.com/ansible-collections/amazon.aws/pull/226). +- aws_ec2 module - Replace inverse aws instance-state-name filters !terminated, !shutting-down in favor of postive filters pending, running, stopping, stopped. Issue 235. (https://github.com/ansible-collections/amazon.aws/pull/237) +- aws_secret - add ``bypath`` functionality (https://github.com/ansible-collections/amazon.aws/pull/192). +- ec2_key - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/213). +- ec2_vol - Add support for gp3 volumes and support for modifying existing volumes (https://github.com/ansible-collections/amazon.aws/issues/55). +- module_utils/elbv2 - add logic to compare_rules to suit Values list nested within dicts unique to each field type. Fixes issue (https://github.com/ansible-collections/amazon.aws/issues/187) +- various AWS plugins and module_utils - Cleanup unused imports (https://github.com/ansible-collections/amazon.aws/pull/217). + +Bugfixes +-------- + +- ec2_vol - a creation or update now returns a structure with an up to date list of tags (https://github.com/ansible-collections/amazon.aws/pull/241). + +v1.3.0 +====== + +Minor Changes +------------- + +- aws_caller_info - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/208) +- aws_s3 - Add support for uploading templated content (https://github.com/ansible-collections/amazon.aws/pull/20). +- aws_secret - add "on_missing" and "on_denied" option (https://github.com/ansible-collections/amazon.aws/pull/122). +- ec2_ami - Add retries for ratelimiting related errors (https://github.com/ansible-collections/amazon.aws/pull/195). +- ec2_ami - fixed and streamlined ``max_attempts`` logic when waiting for AMI creation to finish (https://github.com/ansible-collections/amazon.aws/pull/194). +- ec2_ami - increased default ``wait_timeout`` to 1200 seconds (https://github.com/ansible-collections/amazon.aws/pull/194). +- ec2_ami_info - Add retries for ratelimiting related errors (https://github.com/ansible-collections/amazon.aws/pull/195). +- ec2_eni - Improve reliability of the module by adding waiters and performing lookups by ENI ID rather than repeated searches (https://github.com/ansible-collections/amazon.aws/pull/180). +- ec2_eni_info - Improve reliability of the module by adding waiters and performing lookups by ENI ID rather than repeated searches (https://github.com/ansible-collections/amazon.aws/pull/180). +- ec2_group - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/207) +- ec2_group_info - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/207) +- ec2_snapshot_info - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/208) +- ec2_vol - Add automatic retries on AWS rate limit errors (https://github.com/ansible-collections/amazon.aws/pull/199). +- ec2_vol - ported ec2_vol to use boto3 (https://github.com/ansible-collections/amazon.aws/pull/53). +- ec2_vpc_dhcp_option_info - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/208) +- module_utils/core - add helper function ``scrub_none_parameters`` to remove params set to ``None`` (https://github.com/ansible-collections/community.aws/issues/251). +- module_utils/waiters - Add retries to our waiters for the same failure codes that we retry with AWSRetry (https://github.com/ansible-collections/amazon.aws/pull/185) +- s3_bucket - Add support for managing the ``public_access`` settings (https://github.com/ansible-collections/amazon.aws/pull/171). + +Bugfixes +-------- + +- ec2 - Code fix so module can create ec2 instances with ``ec2_volume_iops`` option (https://github.com/ansible-collections/amazon.aws/pull/177). +- ec2 - ignore terminated instances and instances that are shutting down when starting and stopping (https://github.com/ansible-collections/amazon.aws/issues/146). +- ec2_group - Fixes error handling during tagging failures (https://github.com/ansible-collections/amazon.aws/issues/210). +- ec2_group_info - Code fix so module works with Python 3.8 (make dict immutable in loop) (https://github.com/ansible-collections/amazon.aws/pull/181) + +v1.2.1 +====== + +Minor Changes +------------- + +- ec2_eni - Add support for tagging. +- ec2_eni - Port ec2_eni module to boto3 and add an integration test suite. +- ec2_eni_info - Add retries on transient AWS failures. +- ec2_eni_info - Add support for providing an ENI ID. + +v1.2.0 +====== + +Minor Changes +------------- + +- ec2 module_utils - Update ``ec2_connect`` (boto2) behaviour so that ``ec2_url`` overrides ``region``. +- module_utils.core - Support passing arbitrary extra keys to fail_json_aws, matching capabilities of fail_json. + +Deprecated Features +------------------- + +- All AWS Modules - ``aws_access_key``, ``aws_secret_key`` and ``security_token`` will be made mutually exclusive with ``profile`` after 2022-06-01. + +Bugfixes +-------- + +- ec2 module_utils - Ensure boto3 verify parameter isn't overridden by setting a profile (https://github.com/ansible-collections/amazon.aws/issues/129) +- s3_bucket - Ceph compatibility: treat error code NoSuchTagSetError used by Ceph synonymously to NoSuchTagSet used by AWS + +v1.1.0 +====== + +Major Changes +------------- + +- ec2 module_utils - The ``AWSRetry`` decorator no longer catches ``NotFound`` exceptions by default. ``NotFound`` exceptions need to be explicitly added using ``catch_extra_error_codes``. Some AWS modules may see an increase in transient failures due to AWS''s eventual consistency model. + +Minor Changes +------------- + +- Add ``aws_security_token``, ``aws_endpoint_url`` and ``endpoint_url`` aliases to improve AWS module parameter naming consistency. +- Add support for ``aws_ca_bundle`` to boto3 based AWS modules +- Add support for configuring boto3 profiles using ``AWS_PROFILE`` and ``AWS_DEFAULT_PROFILE`` +- Added check_mode support to aws_az_info +- Added check_mode support to ec2_eni_info +- Added check_mode support to ec2_snapshot_info +- ansible_dict_to_boto3_filter_list - convert integers and bools to strings before using them in filters. +- aws_direct_connect_virtual_interface - add direct_connect_gateway_id parameter. This field is only applicable in private VIF cases (public=False) and is mutually exclusive to virtual_gateway_id. +- cloudformation - Return change_set_id in the cloudformation output if a change set was created. +- ec2 - deprecate allowing both group and group_id - currently we ignore group_id if both are passed. +- ec2_ami_info - allow integer and bool values for filtering images (https://github.com/ansible/ansible/issues/43570). +- ec2_asg - Add support for Max Instance Lifetime +- ec2_asg - Add the ability to use mixed_instance_policy in launch template driven autoscaling groups +- ec2_asg - Migrated to AnsibleAWSModule +- ec2_placement_group - make ``name`` a required field. +- ec2_vol_info - Code cleanup and use of the AWSRetry decorator to improve stability +- ec2_vpc_net - Enable IPv6 CIDR assignment + +Breaking Changes / Porting Guide +-------------------------------- + +- aws_s3 - can now delete versioned buckets even when they are not empty - set mode to delete to delete a versioned bucket and everything in it. + +Deprecated Features +------------------- + +- cloudformation - The ``template_format`` option had no effect since Ansible 2.3 and will be removed after 2022-06-01 +- cloudformation - the ``template_format`` option has been deprecated and will be removed in a later release. It has been ignored by the module since Ansible 2.3. +- data_pipeline - The ``version`` option had no effect and will be removed in after 2022-06-01 +- ec2 - in a later release, the ``group`` and ``group_id`` options will become mutually exclusive. Currently ``group_id`` is ignored if you pass both. +- ec2_ami - The ``no_device`` alias ``NoDevice`` has been deprecated and will be removed after 2022-06-01 +- ec2_ami - The ``virtual_name`` alias ``VirtualName`` has been deprecated and will be removed after 2022-06-01 +- ec2_eip - The ``wait_timeout`` option had no effect and will be removed after 2022-06-01 +- ec2_key - The ``wait_timeout`` option had no effect and will be removed after 2022-06-01 +- ec2_key - The ``wait`` option had no effect and will be removed after 2022-06-01 +- ec2_key - the ``wait_timeout`` option has been deprecated and will be removed in a later release. It has had no effect since Ansible 2.5. +- ec2_key - the ``wait`` option has been deprecated and will be removed in a later release. It has had no effect since Ansible 2.5. +- ec2_lc - The ``associate_public_ip_address`` option had no effect and will be removed after 2022-06-01 +- ec2_tag - deprecate the ``list`` option in favor of ec2_tag_info +- ec2_tag - support for ``list`` as a state has been deprecated and will be removed in a later release. The ``ec2_tag_info`` can be used to fetch the tags on an EC2 resource. + +Bugfixes +-------- + +- aws_ec2 - fix idempotency when managing tags +- aws_ec2 - fix idempotency when metrics are enable +- aws_s3 - Delete objects and delete markers so versioned buckets can be removed. +- aws_s3 - Try to wait for the bucket to exist before setting the access control list. +- cloudformation_info - Fix a KeyError returning information about the stack(s). +- ec2_asg - Ensure "wait" is honored during replace operations +- ec2_launch_template - Update output to include latest_version and default_version, matching the documentation +- ec2_transit_gateway - Use AWSRetry before ClientError is handled when describing transit gateways +- ec2_transit_gateway - fixed issue where auto_attach set to yes was not being honored (https://github.com/ansible/ansible/issues/61907) +- ec2_vol - fix filtering bug +- s3_bucket - Accept XNotImplemented response to support NetApp StorageGRID. diff --git a/ansible_collections/amazon/aws/docs/docsite/rst/aws_ec2_guide.rst b/ansible_collections/amazon/aws/docs/docsite/rst/aws_ec2_guide.rst new file mode 100644 index 000000000..3891aec2e --- /dev/null +++ b/ansible_collections/amazon/aws/docs/docsite/rst/aws_ec2_guide.rst @@ -0,0 +1,590 @@ +.. _ansible_collections.amazon.aws.docsite.dynamic_inventory: + + +Dynamic Inventory Plugin +======================== + +A dynamic inventory plugin allows users to point at data sources to compile the inventory of hosts that Ansible uses to target tasks, either via the ``-i /path/to/file`` and/or ``-i 'host1, host2'`` command line parameters or from other configuration sources. + +When using Ansible with AWS, inventory file maintenance will be a hectic task as AWS frequently changes IPs, autoscaling instances, and more. +Once your AWS EC2 hosts are spun up, you'll probably want to talk to them again. +With a cloud setup, it's best not to maintain a static list of cloud hostnames in text files. +Rather, the best way to handle this is to use the ``aws_ec2`` dynamic inventory plugin. + +The ``aws_ec2`` dynamic inventory plugin makes API calls to AWS to get a list of inventory hosts from Amazon Web Services EC2 in the run time. +It gives the EC2 instance details dynamically to manage the AWS infrastructure. + +The plugin will also return instances that were created outside of Ansible and allow Ansible to manage them. + +To start using the ``aws_ec2`` dynamic inventory plugin with a YAML configuration source, create a file with the accepted filename schema documented for the plugin (a YAML configuration file that ends with ``aws_ec2.(yml|yaml)``, e.g., ``demo.aws_ec2.yml``), then add ``plugin: amazon.aws.aws_ec2``. Use the fully qualified name if the plugin is in a collection. + +.. _ansible_collections.amazon.aws.docsite.using_inventory_plugin: + +Authentication +============== + +If your Ansible controller is not in AWS, authentication is handled by either +specifying your access and secret key as ENV variables or inventory plugin arguments. + +For environment variables: + +.. code-block:: bash + + export AWS_ACCESS_KEY_ID='AK123' + export AWS_SECRET_ACCESS_KEY='abc123' + +The ``AWS_SECURITY_TOKEN`` environment variable can also be used, but is only supported for backward compatibility. +The ``AWS_SECURITY_TOKEN`` is a replacement for ``AWS_SESSION_TOKEN`` and it is only needed when you are using temporary credentials. + +Or you can set ``aws_access_key``, ``aws_secret_key``, and ``security_token`` inside the inventory configuration file. + +.. code-block:: yaml + + # demo.aws_ec2.yml + plugin: amazon.aws.aws_ec2 + + # The access key for your AWS account. + aws_access_key: + # The secret access key for your AWS account. + aws_secret_key: + +If you use different credentials for different tools or applications, you can use profiles. + +The ``profile`` argument is mutually exclusive with the ``aws_access_key``, ``aws_secret_key`` and ``security_token`` options. +When no credentials are explicitly provided then the AWS SDK (boto3) which Ansible uses will fall back to its configuration files (typically ``~/.aws/credentials``). +The shared credentials file has a default location of ``~/.aws/credentials``. +You can change the location of the shared credentials file by setting the ``AWS_SHARED_CREDENTIALS_FILE`` environment variable. + +.. code-block:: yaml + + # demo.aws_ec2.yml + plugin: amazon.aws.aws_ec2 + + # Attach the default AWS profile + aws_profile: default + + # You could use Jinja2 to attach the AWS profile from the environment variable. + aws_profile: "{{ lookup('env', 'AWS_PROFILE') | default('dev-profile', true) }}" + +You can also set your AWS profile as an ENV variable: + +.. code-block:: bash + + export AWS_PROFILE='test-profile' + + +If your Ansible controller is running on an EC2 instance with an assigned IAM Role, the credential may be omitted. +See the documentation for the controller `for more details `_. + +You can also use the ARN of the IAM role to assume to perform the inventory lookup. +This can be useful for connecting across different accounts, or to limit user access. +To do so, you should specify the ``iam_role_arn``. +You should still provide AWS credentials with enough privilege to perform the AssumeRole action. + +.. code-block:: yaml + + # demo.aws_ec2.yml + plugin: amazon.aws.aws_ec2 + + iam_role_arn: arn:aws:iam::1234567890:role/assumed-ansible + + +Minimal Example +=============== + +Fetch all hosts in us-east-1, the hostname is the public DNS if it exists, otherwise the private IP address. + +.. code-block:: yaml + + # demo.aws_ec2.yml + plugin: amazon.aws.aws_ec2 + + # This sets the region. If empty (the default) default this will include all regions, except possibly + # restricted ones like us-gov-west-1 and cn-north-1. + regions: + - us-east-1 + +After providing any required options, you can view the populated inventory with ``ansible-inventory -i demo.aws_ec2.yml --graph``: + +.. code-block:: text + + @all: + |--@aws_ec2: + | |--ip-10-210-0-189.ec2.internal + | |--ip-10-210-0-195.ec2.internal + |--@ungrouped: + + +Allowed Options +=============== + +Some of the ``aws_ec2`` dynamic inventory plugin options are explained in detail below. For a full list see `the plugin documentation `_. + +``hostnames`` +------------- + +``hostnames`` option provides different settings to choose how the hostname will be displayed. + +Some examples are shown below: + +.. code-block:: yaml + + hostnames: + # This option allows displaying the public ip addresses. + - ip-address + + # This option allows displaying the private ip addresses using `tag:Name` as a prefix. + # `name` can be one of the options specified in http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options. + - name: 'private-ip-address' + separator: '_' + prefix: 'tag:Name' + + # Using literal values for hostname + # # Hostname will be aws-test_literal + - name: 'test_literal' + separator: '-' + prefix: 'aws' + + # To use tags as hostnames use the syntax `tag:Name=Value` to use the hostname `Name_Value`, or + # `tag:Name` to use the value of the Name tag. If value provided does not exist in the above options, + # it will be used as a literal string. + - name: 'tag:Tag1=Test1,Tag2=Test2' + + # Use dns-name attribute as hostname + - dns-name + + # You can also specify a list in order of precedence for hostname variables. + - ip-address + - dns-name + - tag:Name + - private-ip-address + +By default, the inventory will only return the first match one of the ``hostnames`` entries. +You may want to get all the potential matches in your inventory, this also implies you will get +duplicated entries. To switch to this behavior, set the ``allow_duplicated_hosts`` configuration key to ``True``. + +``keyed_groups`` +---------------- + +You can create dynamic groups using host variables with the ``keyed_groups`` option. ``keyed_groups`` comes in a prefix and a key format. +The prefix will be the name of the host group that is to be concatenated with the key. + +Some examples are shown below: + +.. code-block:: yaml + + keyed_groups: + # This creates host groups based on architecture. + - prefix: arch + key: architecture + + # This creates host groups based on `x86_64` architecture. + - prefix: arch + key: architecture + value: + 'x86_64' + + # This creates host groups based on availability zone. + - prefix: az + key: placement.availability_zone + + # If the EC2 tag Name had the value `redhat` the tag variable would be: `tag_Name_redhat`. + # Similarly, if a tag existed for an AWS EC2 instance as `Applications` with the value of `nodejs` the + # variable would be: `tag_Applications_nodejs`. + - prefix: tag + key: tags + + # This creates host groups using instance_type, e.g., `instance_type_z3_tiny`. + - prefix: instance_type + key: instance_type + + # This creates host groups using security_groups id, e.g., `security_groups_sg_abcd1234` group for each security group. + - key: 'security_groups|json_query("[].group_id")' + prefix: 'security_groups' + + # This creates a host group for each value of the Application tag. + - key: tags.Application + separator: '' + + # This creates a host group per region e.g., `aws_region_us_east_2`. + - key: placement.region + prefix: aws_region + + # This creates host groups based on the value of a custom tag `Role` and adds them to a metagroup called `project`. + - key: tags['Role'] + prefix: foo + parent_group: "project" + + # This creates a common parent group for all EC2 availability zones. + - key: placement.availability_zone + parent_group: all_ec2_zones + + # This creates a group per distro (distro_CentOS, distro_Debian) and assigns the hosts that have matching values to it, + # using the default separator "_". + - prefix: distro + key: ansible_distribution + + +``groups`` +---------- + +It is also possible to create groups using the ``groups`` option. + +Some examples are shown below: + +.. code-block:: yaml + + groups: + # This created two groups - `Production` and `PreProduction` based on tags + # These conditionals are expressed using Jinja2 syntax. + redhat: "'Production' in tags.Environment" + ubuntu: "'PreProduction' in tags.Environment" + + # This created a libvpc group based on specific condition on `vpc_id`. + libvpc: vpc_id == 'vpc-####' + + +``compose`` +----------- + +``compose`` creates and modifies host variables from Jinja2 expressions. + +.. code-block:: yaml + + compose: + # This sets the ansible_host variable to connect with the private IP address without changing the hostname. + ansible_host: private_ip_address + + # This sets location_vars variable as a dictionary with location as a key. + location_vars: + location: "east_coast" + server_type: "ansible_hostname | regex_replace ('(.{6})(.{2}).*', '\\2')" + + # This sets location variable. + location: "'east_coast'" + + # This lets you connect over SSM to the instance id. + ansible_host: instance_id + ansible_connection: 'community.aws.aws_ssm' + + # This defines combinations of host servers, IP addresses, and related SSH private keys. + ansible_host: private_ip_address + ansible_user: centos + ansible_ssh_private_key_file: /path/to/private_key_file + + # This sets the ec2_security_group_ids variable. + ec2_security_group_ids: security_groups | map(attribute='group_id') | list | join(',') + + # Host variables that are strings need to be wrapped with two sets of quotes. + # See https://docs.ansible.com/ansible/latest/plugins/inventory.html#using-inventory-plugins for details. + ansible_connection: '"community.aws.aws_ssm"' + ansible_user: '"ssm-user"' + + +``include_filters`` and ``exclude_filters`` +------------------------------------------- + +``include_filters`` and ``exclude_filters`` options give you the ability to compose the inventory with several queries (see `available filters `_). + +.. code-block:: yaml + + include_filters: + # This includes everything in the inventory that has the following tags. + - tag:Project: + - 'planets' + - tag:Environment: + - 'demo' + + # This excludes everything from the inventory that has the following tag:Name. + exclude_filters: + - tag:Name: + - '{{ resource_prefix }}_3' + + +``filters`` +----------- + +``filters`` are used to filter out AWS EC2 instances based on conditions (see `available filters `_). + +.. code-block:: yaml + + filters: + # This selects only running instances with tag `Environment` tag set to `dev`. + tag:Environment: dev + instance-state-name : running + + # This selects only instances with tag `Environment` tag set to `dev` and `qa` and specific security group id. + tag:Environment: + - dev + - qa + instance.group-id: sg-xxxxxxxx + + # This selects only instances with tag `Name` fulfilling specific conditions. + - tag:Name: + - dev-* + - share-resource + - hotfix + + +``use_contrib_script_compatible_ec2_tag_keys`` and ``use_contrib_script_compatible_sanitization`` +------------------------------------------------------------------------------------------------- + +``use_contrib_script_compatible_ec2_tag_keys`` exposes the host tags with ec2_tag_TAGNAME keys like the old ec2.py inventory script when it's True. + +By default the ``aws_ec2`` plugin is using a general group name sanitization to create safe and usable group names for use in Ansible. + +``use_contrib_script_compatible_ec2_tag_keys`` allows you to override that, in efforts to allow migration from the old inventory script and matches the sanitization of groups when the script's replace_dash_in_groups option is set to False. +To replicate behavior of replace_dash_in_groups = True with constructed groups, you will need to replace hyphens with underscores via the regex_replace filter for those entries. + +For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting, otherwise the core engine will just use the standard sanitization on top. + +This is not the default as such names break certain functionality as not all characters are valid Python identifiers which group names end up being used as. + +The use of this feature is discouraged and we advise to migrate to the new tags structure. + +.. code-block:: yaml + + # demo.aws_ec2.yml + plugin: amazon.aws.aws_ec2 + regions: + - us-east-1 + filters: + tag:Name: + - 'instance-*' + hostnames: + - tag:Name + use_contrib_script_compatible_sanitization: True + use_contrib_script_compatible_ec2_tag_keys: True + +After providing any required options, you can view the populated inventory with ``ansible-inventory -i demo.aws_ec2.yml --list``: + +.. code-block:: text + + { + "_meta": { + "hostvars": { + "instance-01": { + "aws_ami_launch_index_ec2": 0, + "aws_architecture_ec2": "x86_64", + ... + "ebs_optimized": false, + "ec2_tag_Environment": "dev", + "ec2_tag_Name": "instance-01", + "ec2_tag_Tag1": "Test1", + "ec2_tag_Tag2": "Test2", + "ena_support": true, + "enclave_options": { + "enabled": false + }, + ... + }, + "instance-02": { + ... + "ebs_optimized": false, + "ec2_tag_Environment": "dev", + "ec2_tag_Name": "instance-02", + "ec2_tag_Tag1": "Test3", + "ec2_tag_Tag2": "Test4", + "ena_support": true, + "enclave_options": { + "enabled": false + }, + ... + } + } + }, + all": { + "children": [ + "aws_ec2", + "ungrouped" + ] + }, + "aws_ec2": { + "hosts": [ + "instance-01", + "instance-02" + ] + } + } + + +``hostvars_prefix`` and ``hostvars_suffix`` +------------------------------------------- + +``hostvars_prefix`` and ``hostvars_sufix`` allow to set up a prefix and suffix for host variables. + +.. code-block:: yaml + + # demo.aws_ec2.yml + plugin: amazon.aws.aws_ec2 + regions: + - us-east-1 + filters: + tag:Name: + - 'instance-*' + hostvars_prefix: 'aws_' + hostvars_suffix: '_ec2' + hostnames: + - tag:Name + +Now the output of ``ansible-inventory -i demo.aws_ec2.yml --list``: + +.. code-block:: text + + { + "_meta": { + "hostvars": { + "instance-01": { + "aws_ami_launch_index_ec2": 0, + "aws_architecture_ec2": "x86_64", + "aws_block_device_mappings_ec2": [ + { + "device_name": "/dev/sda1", + "ebs": { + "attach_time": "2022-06-27T09:04:57+00:00", + "delete_on_termination": true, + "status": "attached", + "volume_id": "vol-06e065bca44e6eae5" + } + } + ], + "aws_capacity_reservation_specification_ec2": { + "capacity_reservation_preference": "open" + } + ..., + }, + "instance-02": { + ..., + } + } + }, + all": { + "children": [ + "aws_ec2", + "ungrouped" + ] + }, + "aws_ec2": { + "hosts": [ + "instance-01", + "instance-02" + ] + } + } + + +``strict`` and ``strict_permissions`` +------------------------------------- + +``strict: False`` will skip instead of producing an error if there are missing facts. + +``strict_permissions: False`` will ignore 403 errors rather than failing. + + +``cache`` +--------- + +``aws_ec2`` inventory plugin support caching can use the general settings for the fact cache defined in the ``ansible.cfg`` file's ``[defaults]`` section or define inventory-specific settings in the ``[inventory]`` section. +You can can define plugin-specific cache settings in the config file: + +.. code-block:: yaml + + # demo.aws_ec2.yml + plugin: aws_ec2 + # This enables cache. + cache: yes + # Plugin to be used. + cache_plugin: jsonfile + cache_timeout: 7200 + # Location where files are stored in the cache. + cache_connection: /tmp/aws_inventory + cache_prefix: aws_ec2 + +Here is an example of setting inventory caching with some fact caching defaults for the cache plugin used and the timeout in an ``ansible.cfg`` file: + +.. code-block:: ini + + [defaults] + fact_caching = ansible.builtin.jsonfile + fact_caching_connection = /tmp/ansible_facts + cache_timeout = 3600 + + [inventory] + cache = yes + cache_connection = /tmp/ansible_inventory + + +Complex Example +=============== + +Here is an ``aws_ec2`` complex example utilizing some of the previously listed options: + +.. code-block:: yaml + + # demo.aws_ec2.yml + plugin: amazon.aws.aws_ec2 + regions: + - us-east-1 + - us-east-2 + keyed_groups: + # add hosts to tag_Name_value groups for each aws_ec2 host's tags.Name variable. + - key: tags.Name + prefix: tag_Name_ + separator: "" + groups: + # add hosts to the group dev if any of the dictionary's keys or values is the word 'dev'. + development: "'dev' in (tags|list)" + filters: + tag:Name: + - 'instance-01' + - 'instance-03' + include_filters: + - tag:Name: + - 'instance-02' + - 'instance-04' + exclude_filters: + - tag:Name: + - 'instance-03' + - 'instance-04' + hostnames: + # You can also specify a list in order of precedence for hostname variables. + - ip-address + - dns-name + - tag:Name + - private-ip-address + compose: + # This sets the `ansible_host` variable to connect with the private IP address without changing the hostname. + ansible_host: private_ip_address + +If a host does not have the variables in the configuration above (i.e. ``tags.Name``, ``tags``, ``private_ip_address``), the host will not be added to groups other than those that the inventory plugin creates and the ``ansible_host`` host variable will not be modified. + +Now the output of ``ansible-inventory -i demo.aws_ec2.yml --graph``: + +.. code-block:: text + + @all: + |--@aws_ec2: + | |--instance-01 + | |--instance-02 + |--@tag_Name_instance_01: + | |--instance-01 + |--@tag_Name_instance_02: + | |--instance-02 + |--@ungrouped: + + +Using Dynamic Inventory Inside Playbook +======================================= + +If you want to use dynamic inventory inside the playbook, you just need to mention the group name in the hosts variable as shown below. + +.. code-block:: yaml + + --- + - name: Ansible Test Playbook + gather_facts: false + hosts: tag_Name_instance_02 + + tasks: + - name: Run Shell Command + command: echo "Hello World" diff --git a/ansible_collections/amazon/aws/docs/docsite/rst/dev_guidelines.rst b/ansible_collections/amazon/aws/docs/docsite/rst/dev_guidelines.rst new file mode 100644 index 000000000..f105cc78a --- /dev/null +++ b/ansible_collections/amazon/aws/docs/docsite/rst/dev_guidelines.rst @@ -0,0 +1,1050 @@ +.. _ansible_collections.amazon.aws.docsite.dev_guide_intro: + +**************************************************** +Guidelines for Ansible Amazon AWS module development +**************************************************** + +The Ansible AWS collection (on `Galaxy `_, source code `repository `_) is maintained by the Ansible AWS Working Group. For further information see the `AWS working group community page `_. If you are planning to contribute AWS modules to Ansible then getting in touch with the working group is a good way to start, especially because a similar module may already be under development. + +.. contents:: + :local: + +.. _ansible_collections.amazon.aws.docsite.dev_python: + +Requirements +============ + +Python Compatibility +-------------------- + +AWS content in Ansible 2.9 and 1.x collection releases supported Python 2.7 and newer. + +Starting with the 2.0 releases of both collections, Python 2.7 support will be ended in accordance with AWS' `end of Python 2.7 support `_. Contributions to both collections that target the 2.0 or later collection releases can be written to support Python 3.6+ syntax. + +SDK Version Support +------------------- + +Starting with the 2.0 releases of both collections, it is generally the policy to support the versions of botocore and boto3 that were released 12 months prior to the most recent major collection release, following semantic versioning (for example, 2.0.0, 3.0.0). + +Features and functionality that require newer versions of the SDK can be contributed provided they are noted in the module documentation: + +.. code-block:: yaml + + DOCUMENTATION = ''' + --- + module: ec2_vol + options: + throughput: + description: + - Volume throughput in MB/s. + - This parameter is only valid for gp3 volumes. + - Valid range is from 125 to 1000. + - Requires at least botocore version 1.19.27. + type: int + version_added: 1.4.0 + +And handled using the ``botocore_at_least`` helper method: + +.. code-block:: python + + if module.params.get('throughput'): + if not module.botocore_at_least("1.19.27"): + module.fail_json(msg="botocore >= 1.19.27 is required to set the throughput for a volume") + +Starting with the 4.0 releases of both collections, all support for the original boto SDK has been dropped. AWS Modules must be written using the botocore and boto3 SDKs. + +.. _ansible_collections.amazon.aws.docsite.dev_module_maint: + +Maintaining existing modules +============================ + +Changelogs +---------- + +A changelog fragment must be added to any PR that changes functionality or fixes +a bug. More information about changelog fragments can be found in the +`Making your PR merge-worthy section of the Ansible Development Cycle documentation` + +Breaking Changes +---------------- + +Changes that are likely to break existing playbooks using the AWS collections should be +avoided, should only be made in a major release, and where practical should be +preceeded by a deprecation cycle of at least 1 full major release. Deprecations +may be backported to the stable branches. + +For example: +- A deprecation added in release 3.0.0 may be removed in release 4.0.0. +- A deprecation added in release 1.2.0 may be removed in release 3.0.0. + +Breaking changes include: +- Removing a parameter. +- Making a parameter ``required``. +- Updating the default value of a parameter. +- Changing or removing an existing return value. + +Adding new features +------------------- + +Try to keep backward compatibility with versions of boto3/botocore that are at least a year old. +This means that if you want to implement functionality that uses a new feature of boto3/botocore, +it should only fail if that feature is explicitly used, with a message stating the missing feature +and minimum required version of botocore. (Feature support is usually defined in botocore and then +used by boto3) + +.. code-block:: python + + module = AnsibleAWSModule( + argument_spec=argument_spec, + ... + ) + + if module.params.get('scope') == 'managed': + module.require_botocore_at_least('1.23.23', reason='to list managed rules') + +.. _ansible_collections.amazon.aws.docsite.dev_backports: + +Release policy and backporting merged PRs +----------------------------------------- + +All amazon.aws and community.aws PRs must be merged to the ``main`` branch first. After a PR has +been accepted and merged to the ``main`` branch they can be backported to the stable branches. + +The ``main`` branch is a staging location for the next major version (X+1) of the collections and +may include breaking changes. + +General backport policy: + +- New features, deprecations and minor changes can be backported to the latest stable release. +- Bugfixes can be backported to the 2 latest stable releases. +- Security fixes should be backported to at least the 2 latest stable releases. + +Where necessary, additional CI related changes may be introduced to older stable branches to +ensure CI continues to function. + +The simplest mechanism for backporting PRs is by adding the ``backport-Y`` label to a PR. Once the +PR has been merged the patchback bot will attempt to automatically create a backport PR. + +.. _ansible_collections.amazon.aws.docsite.dev_module_create: + +Creating new AWS modules +======================== + +When writing a new module it is important to think about the scope of the module. In general, try +to do one thing and do it well. + +Where the Amazon APIs provide a distinction between dependent resources, such as S3 buckets and S3 +objects, this is often a good divider between modules. Additionally, resources which have a +many-to-many relationship with another resource, such as IAM managed policies and IAM roles, are +often best managed by two separate modules. + +While it's possible to write an ``s3`` module which manages all things related to S3, thoroughly +testing and maintaining such a module is difficult. Similarly, while it would be possible to +write a module that manages the base EC2 security group resource, and a second module to manage the +rules on the security group, this would be contrary to what users of the module might anticipate. + +There is no hard and fast right answer, but it's important to think about it, and Amazon have often +done this work for you when designing their APIs. + +Naming your module +------------------ + +Module names should include the name of the resource being managed and be prefixed with the AWS API +that the module is based on. Where examples of a prefix don't already exist a good rule of thumb is +to use whatever client name you use with boto3 as a starting point. + +Unless something is a well known abbreviation of a major component of AWS (for example, VPC or ELB) +avoid further abbreviating names and don't create new abbreviations independently. + +Where an AWS API primarily manages a single resource, the module managing this resource can be +named as just the name of the API. However, consider using ``instance`` or ``cluster`` for clarity +if Amazon refers to them using these names. + +Examples: + +- ``ec2_instance`` +- ``s3_object`` (previously named ``aws_s3``, but is primarily for manipulating S3 objects) +- ``elb_classic_lb`` (previously ``ec2_elb_lb``, but is part of the ELB API, not EC2) +- ``networkfirewall_rule_group`` +- ``networkfirewall`` (while this could be called ``networkfirewall_firewall`` the second firewall is redundant and the API is focused around creating these firewall resources) + +Note: Prior to the collections being split from Ansible Core, it was common to use ``aws_`` as a +prefix to disambiguate services with a generic name, such as ``aws_secret``. This is no longer +necessary, and the ``aws_`` prefix is reserved for services with a very broad effect where +referencing the AWS API might cause confusion. For example, ``aws_region_info``, which +connects to EC2 but provides global information about the regions enabled in an account for all +services. + +Use boto3 and AnsibleAWSModule +------------------------------- + +All new AWS modules must use boto3/botocore and ``AnsibleAWSModule``. + +``AnsibleAWSModule`` greatly simplifies exception handling and library +management, reducing the amount of boilerplate code. If you cannot +use ``AnsibleAWSModule`` as a base, you must document the reason and request an exception to this rule. + +Importing botocore and boto3 +---------------------------- + +The ``ansible_collections.amazon.aws.plugins.module_utils.ec2`` module and +``ansible_collections.amazon.aws.plugins.module_utils.core`` modules both +automatically import boto3 and botocore. If boto3 is missing from the system then the variable +``HAS_BOTO3`` will be set to false. Normally, this means that modules don't need to import +boto3 directly. There is no need to check ``HAS_BOTO3`` when using AnsibleAWSModule +as the module does that check: + +.. code-block:: python + + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + try: + import botocore + except ImportError: + pass # handled by AnsibleAWSModule + +or: + +.. code-block:: python + + from ansible.module_utils.basic import AnsibleModule + from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 + try: + import botocore + except ImportError: + pass # handled by imported HAS_BOTO3 + + def main(): + + if not HAS_BOTO3: + module.fail_json(msg='boto3 and botocore are required for this module') + +Supporting Module Defaults +-------------------------- + +The existing AWS modules support using :ref:`module_defaults ` for common +authentication parameters. To do the same for your new module, add an entry for it in +``meta/runtime.yml``. These entries take the form of: + +.. code-block:: yaml + + action_groups: + aws: + ... + aws_example_module + +Module behavior +--------------- + +To reduce the chance of breaking changes occurring when new features are added, +the module should avoid modifying the resource attribute when a parameter is +not explicitly set in a task. + +By convention, when a parameter is explicitly set in a task, the module should +set the resource attribute to match what was set in the task. In some cases, +such as tags or associations, it can be helpful to add an additional parameter +which can be set to change the behavior from replacive to additive. However, the +default behavior should still be replacive rather than additive. + +See the `Dealing with tags` +section for an example with ``tags`` and ``purge_tags``. + +.. _ansible_collections.amazon.aws.docsite.dev_module_connection: + +Connecting to AWS +================= + +AnsibleAWSModule provides the ``resource`` and ``client`` helper methods for obtaining boto3 connections. +These handle some of the more esoteric connection options, such as security tokens and boto profiles. + +If using the basic AnsibleModule then you should use ``get_aws_connection_info`` and then ``boto3_conn`` +to connect to AWS as these handle the same range of connection options. + +These helpers also check for missing profiles or a region not set when it needs to be, so you don't have to. + +An example of connecting to ec2 is shown below. Note that unlike boto there is no ``NoAuthHandlerFound`` +exception handling like in boto. Instead, an ``AuthFailure`` exception will be thrown when you use the +connection. To ensure that authorization, parameter validation and permissions errors are all caught, +you should catch ``ClientError`` and ``BotoCoreError`` exceptions with every boto3 connection call. +See exception handling: + +.. code-block:: python + + module.client('ec2') + +or for the higher level ec2 resource: + +.. code-block:: python + + module.resource('ec2') + + +An example of the older style connection used for modules based on AnsibleModule rather than AnsibleAWSModule: + +.. code-block:: python + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) + +.. code-block:: python + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) + + +Common Documentation Fragments for Connection Parameters +-------------------------------------------------------- + +There are four :ref:`common documentation fragments ` +that should be included into almost all AWS modules: + +* ``aws`` - contains the common boto3 connection parameters +* ``ec2`` - contains the common region parameter required for many AWS modules +* ``boto3`` - contains the minimum requirements for the collection +* ``tags`` - contains the common tagging parameters used by many AWS modules + +These fragments should be used rather than re-documenting these properties to ensure consistency +and that the more esoteric connection options are documented. For example: + +.. code-block:: python + + DOCUMENTATION = ''' + module: my_module + # some lines omitted here + extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 + ''' + +.. _ansible_collections.amazon.aws.docsite.dev_exceptions: + +Handling exceptions +=================== + +You should wrap any boto3 or botocore call in a try block. If an exception is thrown, then there +are a number of possibilities for handling it. + +* Catch the general ``ClientError`` or look for a specific error code with + ``is_boto3_error_code``. +* Use ``aws_module.fail_json_aws()`` to report the module failure in a standard way +* Retry using AWSRetry +* Use ``fail_json()`` to report the failure without using ``ansible_collections.amazon.aws.plugins.module_utils.core`` +* Do something custom in the case where you know how to handle the exception + +For more information on botocore exception handling see the `botocore error documentation `_. + +Using is_boto3_error_code +------------------------- + +To use ``ansible_collections.amazon.aws.plugins.module_utils.core.is_boto3_error_code`` to catch a single +AWS error code, call it in place of ``ClientError`` in your except clauses. In +this example, *only* the ``InvalidGroup.NotFound`` error code will be caught here, +and any other error will be raised for handling elsewhere in the program. + +.. code-block:: python + + try: + info = connection.describe_security_groups(**kwargs) + except is_boto3_error_code('InvalidGroup.NotFound'): + pass + do_something(info) # do something with the info that was successfully returned + +Using fail_json_aws() +--------------------- + +In the AnsibleAWSModule there is a special method, ``module.fail_json_aws()`` for nice reporting of +exceptions. Call this on your exception and it will report the error together with a traceback for +use in Ansible verbose mode. + +You should use the AnsibleAWSModule for all new modules, unless not possible. + +.. code-block:: python + + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + + # Set up module parameters + # module params code here + + # Connect to AWS + # connection code here + + # Make a call to AWS + name = module.params.get['name'] + try: + result = connection.describe_frooble(FroobleName=name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't obtain frooble %s" % name) + +Note that it should normally be acceptable to catch all normal exceptions here, however if you +expect anything other than botocore exceptions you should test everything works as expected. + +If you need to perform an action based on the error boto3 returned, use the error code and the +``is_boto3_error_code()`` helper. + +.. code-block:: python + + # Make a call to AWS + name = module.params.get['name'] + try: + result = connection.describe_frooble(FroobleName=name) + except is_boto3_error_code('FroobleNotFound'): + workaround_failure() # This is an error that we can work around + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't obtain frooble %s" % name) + +using fail_json() and avoiding ansible_collections.amazon.aws.plugins.module_utils.core +--------------------------------------------------------------------------------------- + +Boto3 provides lots of useful information when an exception is thrown so pass this to the user +along with the message. + +.. code-block:: python + + from ansible.module_utils.ec2 import HAS_BOTO3 + try: + import botocore + except ImportError: + pass # caught by imported HAS_BOTO3 + + # Connect to AWS + # connection code here + + # Make a call to AWS + name = module.params.get['name'] + try: + result = connection.describe_frooble(FroobleName=name) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't obtain frooble %s: %s" % (name, str(e)), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + +Note: we use ``str(e)`` rather than ``e.message`` as the latter doesn't +work with python3 + +If you need to perform an action based on the error boto3 returned, use the error code. + +.. code-block:: python + + # Make a call to AWS + name = module.params.get['name'] + try: + result = connection.describe_frooble(FroobleName=name) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'FroobleNotFound': + workaround_failure() # This is an error that we can work around + else: + module.fail_json(msg="Couldn't obtain frooble %s: %s" % (name, str(e)), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Couldn't obtain frooble %s" % name) + +.. _ansible_collections.amazon.aws.docsite.dev_ratelimits: + +API throttling (rate limiting) and pagination +============================================= + +For methods that return a lot of results, boto3 often provides +`paginators `_. If the method +you're calling has ``NextToken`` or ``Marker`` parameters, you should probably +check whether a paginator exists (the top of each boto3 service reference page has a link +to Paginators, if the service has any). To use paginators, obtain a paginator object, +call ``paginator.paginate`` with the appropriate arguments and then call ``build_full_result``. + +Any time that you are calling the AWS API a lot, you may experience API throttling, +and there is an ``AWSRetry`` decorator that can be used to ensure backoff. Because +exception handling could interfere with the retry working properly (as AWSRetry needs to +catch throttling exceptions to work correctly), you'd need to provide a backoff function +and then put exception handling around the backoff function. + +You can use ``exponential_backoff`` or ``jittered_backoff`` strategies - see +the cloud ``module_utils`` ()/lib/ansible/module_utils/cloud.py) +and `AWS Architecture blog `_ for more details. + +The combination of these two approaches is then: + +.. code-block:: python + + @AWSRetry.jittered_backoff(retries=5, delay=5) + def describe_some_resource_with_backoff(client, **kwargs): + paginator = client.get_paginator('describe_some_resource') + return paginator.paginate(**kwargs).build_full_result()['SomeResource'] + + def describe_some_resource(client, module): + filters = ansible_dict_to_boto3_filter_list(module.params['filters']) + try: + return describe_some_resource_with_backoff(client, Filters=filters) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e, msg="Could not describe some resource") + + +Prior to Ansible 2.10 if the underlying ``describe_some_resources`` API call threw +a ``ResourceNotFound`` exception, ``AWSRetry`` would take this as a cue to retry until +it is not thrown (this is so that when creating a resource, we can just retry until it +exists). This default was changed and it is now necessary to explicitly request +this behaviour. This can be done by using the ``catch_extra_error_codes`` +argument on the decorator. + +.. code-block:: python + + @AWSRetry.jittered_backoff(retries=5, delay=5, catch_extra_error_codes=['ResourceNotFound']) + def describe_some_resource_retry_missing(client, **kwargs): + return client.describe_some_resource(ResourceName=kwargs['name'])['Resources'] + + def describe_some_resource(client, module): + name = module.params.get['name'] + try: + return describe_some_resource_with_backoff(client, name=name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not describe resource %s" % name) + + +To make use of AWSRetry easier, it can now be wrapped around a client returned +by ``AnsibleAWSModule``. any call from a client. To add retries to a client, +create a client: + +.. code-block:: python + + module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + +Any calls from that client can be made to use the decorator passed at call-time +using the ``aws_retry`` argument. By default, no retries are used. + +.. code-block:: python + + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True) + + # equivalent with normal AWSRetry + @AWSRetry.jittered_backoff(retries=10) + def describe_instances(client, **kwargs): + return ec2.describe_instances(**kwargs) + + describe_instances(module.client('ec2'), InstanceIds=['i-123456789']) + +The call will be retried the specified number of times, so the calling functions +don't need to be wrapped in the backoff decorator. + +You can also use customization for ``retries``, ``delay`` and ``max_delay`` parameters used by +``AWSRetry.jittered_backoff`` API using module params. You can take a look at +the ``cloudformation `` module for example. + +To make all Amazon modules uniform, prefix the module param with ``backoff_``, so ``retries`` becomes ``backoff_retries`` + and likewise with ``backoff_delay`` and ``backoff_max_delay``. + +.. _ansible_collections.amazon.aws.docsite.dev_return: + +Returning Values +================ + +When you make a call using boto3, you will probably get back some useful information that you +should return in the module. As well as information related to the call itself, you will also have +some response metadata. It is OK to return this to the user as well as they may find it useful. + +Boto3 returns most keys in CamelCase. Ansible adopts python standards for naming variables and usage. +There is a useful helper function called ``camel_dict_to_snake_dict`` that allows for an easy conversion +of the boto3 response to snake_case. It resides in ``module_utils/common/dict_transformations``. + +You should use this helper function and avoid changing the names of values returned by Boto3. +E.g. if boto3 returns a value called 'SecretAccessKey' do not change it to 'AccessKey'. + +There is an optional parameter, ``ignore_list``, which is used to avoid converting a sub-tree +of a dict. This is particularly useful for tags, where keys are case-sensitive. + +.. code-block:: python + + # Make a call to AWS + resource = connection.aws_call() + + # Convert resource response to snake_case + snaked_resource = camel_dict_to_snake_dict(resource, ignore_list=['Tags']) + + # Return the resource details to the user without modifying tags + module.exit_json(changed=True, some_resource=snaked_resource) + +Note: The returned key representing the details of the specific resource (``some_resource`` above) +should be a sensible approximation of the resource name. For example, ``volume`` for ``ec2_vol``, +``volumes`` for ``ec2_vol_info``. + +Tags +---- + +Tags should be returned as a dictionary of key: value pairs, with each key being the tag's +key and value being the tag's value. It should be noted, however, that boto3 often returns tags +as a list of dictionaries. + +There is a helper function in module_utils/ec2.py called ``boto3_tag_list_to_ansible_dict`` +(discussed in detail below in the "Helper Functions" section) that allows for an easy conversion +from boto3's returned tag list to the desired dictionary of tags to be returned by the module. + +Below is a full example of getting the result of an AWS call and returning the expected values: + +.. code-block:: python + + # Make a call to AWS + result = connection.aws_call() + + # Make result snake_case without modifying tags + snaked_result = camel_dict_to_snake_dict(result, ignore_list=['Tags']) + + # Convert boto3 list of dict tags to just a dict of tags + snaked_result['tags'] = boto3_tag_list_to_ansible_dict(result.get('tags', [])) + + # Return the result to the user + module.exit_json(changed=True, **snaked_result) + +Info modules +------------ + +Info modules that can return information on multiple resources should return a list of +dictionaries, with each dictionary containing information about that particular resource +(i.e. ``security_groups`` in ``ec2_group_info``). + +In cases where the _info module only returns information on a singular resource +(i.e. ``ec2_tag_info``), a singular dictionary should be returned as opposed to a list +of dictionaries. + +In cases where the _info module returns no instances, an empty list '[]' should be returned. + +Keys in the returned dictionaries should follow the guidelines above and use snake_case. +If a return value can be used as a parameter for its corresponding main module, the key should +match either the parameter name itself, or an alias of that parameter. + +The following is an example of improper usage of a sample info module with its respective main module: + +.. code-block:: yaml + + "security_groups": { + { + "description": "Created by ansible integration tests", + "group_id": "sg-050dba5c3520cba71", + "group_name": "ansible-test-87988625-unknown5c5f67f3ad09-icmp-1", + "ip_permissions": [], + "ip_permissions_egress": [], + "owner_id": "721066863947", + "tags": [ + { + "Key": "Tag_One" + "Value": "Tag_One_Value" + }, + ], + "vpc_id": "vpc-0cbc2380a326b8a0d" + } + } + +The sample output above shows a few mistakes in the sample security group info module: +* ``security_groups`` is a dict of dict, not a list of dicts. +* ``tags`` appears to be directly returned from boto3, since they're a list of dicts. + +The following is what the sample output would look like, with the mistakes corrected. + +.. code-block:: yaml + + "security_groups": [ + { + "description": "Created by ansible integration tests", + "group_id": "sg-050dba5c3520cba71", + "group_name": "ansible-test-87988625-unknown5c5f67f3ad09-icmp-1", + "ip_permissions": [], + "ip_permissions_egress": [], + "owner_id": "721066863947", + "tags": { + "Tag_One": "Tag_One_Value", + }, + "vpc_id": "vpc-0cbc2380a326b8a0d" + } + ] + +Deprecating return values +------------------------- + +If changes need to be made to current return values, the new/"correct" keys should be +returned **in addition to** the existing keys to preserve compability with existing +playbooks. A deprecation should be added to the return values being replaced, initially +placed at least 2 years out, on the 1st of a month. + +For example: + +.. code-block:: python + + # Deprecate old `iam_user` return key to be replaced by `user` introduced on 2022-04-10 + module.deprecate("The 'iam_user' return key is deprecated and will be replaced by 'user'. Both values are returned for now.", + date='2024-05-01', collection_name='community.aws') + +.. _ansible_collections.amazon.aws.docsite.dev_policies: + +Dealing with IAM JSON policy +============================ + +If your module accepts IAM JSON policies then set the type to 'json' in the module spec. For +example: + +.. code-block:: python + + argument_spec.update( + dict( + policy=dict(required=False, default=None, type='json'), + ) + ) + +Note that AWS is unlikely to return the policy in the same order that is was submitted. Therefore, +use the ``compare_policies`` helper function which handles this variance. + +``compare_policies`` takes two dictionaries, recursively sorts and makes them hashable for comparison +and returns True if they are different. + +.. code-block:: python + + from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies + + import json + + # some lines skipped here + + # Get the policy from AWS + current_policy = json.loads(aws_object.get_policy()) + user_policy = json.loads(module.params.get('policy')) + + # Compare the user submitted policy to the current policy ignoring order + if compare_policies(user_policy, current_policy): + # Update the policy + aws_object.set_policy(user_policy) + else: + # Nothing to do + pass + +.. _ansible_collections.amazon.aws.docsite.dev_tags: + +Dealing with tags +================= + +AWS has a concept of resource tags. Usually the boto3 API has separate calls +for tagging and untagging a resource. For example, the EC2 API has +``create_tags`` and ``delete_tags`` calls. + +When adding tagging support, Ansible AWS modules should add a ``tags`` parameter +that defaults to ``None`` and a ``purge_tags`` parameter that defaults to +``True``. + + +.. code-block:: python + + argument_spec.update( + dict( + tags=dict(type='dict', required=False, default=None), + purge_tags=dict(type='bool', required=False, default=True), + ) + ) + +When the ``purge_tags`` parameter is set to ``True`` **and** the ``tags`` +parameter is explicitly set in the task, then any tags not explicitly set in +``tags`` should be removed. + +If the ``tags`` parameter is not set then tags should not be modified, even if +``purge_tags`` is set to ``True``. This means that removing all tags requires +``tags`` be explicitly set to an empty dictionary ``{}`` in the Ansible task. + + +There is a helper function ``compare_aws_tags`` to ease dealing with tags. It +compares two dictionaries, the current tags and the desired tags, and returns +the tags to set and the tags to delete. See the Helper function section below +for more detail. + +There is also a documentation fragment ``amazon.aws.tags`` which should be +included when adding tagging support. + +.. _ansible_collections.amazon.aws.docsite.dev_helpers: + +Helper functions +================ + +Along with the connection functions in Ansible ec2.py module_utils, there are some other useful +functions detailed below. + +camel_dict_to_snake_dict +------------------------ + +boto3 returns results in a dict. The keys of the dict are in CamelCase format. In keeping with +Ansible format, this function will convert the keys to snake_case. + +``camel_dict_to_snake_dict`` takes an optional parameter called ``ignore_list`` which is a list of +keys not to convert (this is usually useful for the ``tags`` dict, whose child keys should remain with +case preserved) + +Another optional parameter is ``reversible``. By default, ``HTTPEndpoint`` is converted to ``http_endpoint``, +which would then be converted by ``snake_dict_to_camel_dict`` to ``HttpEndpoint``. +Passing ``reversible=True`` converts HTTPEndpoint to ``h_t_t_p_endpoint`` which converts back to ``HTTPEndpoint``. + +snake_dict_to_camel_dict +------------------------ + +``snake_dict_to_camel_dict`` converts snake cased keys to camel case. By default, because it was +first introduced for ECS purposes, this converts to dromedaryCase. An optional +parameter called ``capitalize_first``, which defaults to ``False``, can be used to convert to CamelCase. + +ansible_dict_to_boto3_filter_list +--------------------------------- + +Converts a an Ansible list of filters to a boto3 friendly list of dicts. This is useful for any +boto3 ``_facts`` modules. + +boto_exception +-------------- + +Pass an exception returned from boto or boto3, and this function will consistently get the message from the exception. + +Deprecated: use ``AnsibleAWSModule``'s ``fail_json_aws`` instead. + + +boto3_tag_list_to_ansible_dict +------------------------------ + +Converts a boto3 tag list to an Ansible dict. Boto3 returns tags as a list of dicts containing keys +called 'Key' and 'Value' by default. This key names can be overridden when calling the function. +For example, if you have already camel_cased your list of tags you may want to pass lowercase key +names instead, in other words, 'key' and 'value'. + +This function converts the list in to a single dict where the dict key is the tag key and the dict +value is the tag value. + +ansible_dict_to_boto3_tag_list +------------------------------ + +Opposite of above. Converts an Ansible dict to a boto3 tag list of dicts. You can again override +the key names used if 'Key' and 'Value' is not suitable. + +get_ec2_security_group_ids_from_names +------------------------------------- + +Pass this function a list of security group names or combination of security group names and IDs +and this function will return a list of IDs. You should also pass the VPC ID if known because +security group names are not necessarily unique across VPCs. + +compare_policies +---------------- + +Pass two dicts of policies to check if there are any meaningful differences and returns true +if there are. This recursively sorts the dicts and makes them hashable before comparison. + +This method should be used any time policies are being compared so that a change in order +doesn't result in unnecessary changes. + +compare_aws_tags +---------------- + +Pass two dicts of tags and an optional purge parameter and this function will return a dict +containing key pairs you need to modify and a list of tag key names that you need to remove. Purge +is True by default. If purge is False then any existing tags will not be modified. + +This function is useful when using boto3 ``add_tags`` and ``remove_tags`` functions. Be sure to use the +other helper function ``boto3_tag_list_to_ansible_dict`` to get an appropriate tag dict before +calling this function. Since the AWS APIs are not uniform (for example, EC2 is different from Lambda) this will work +without modification for some (Lambda) and others may need modification before using these values +(such as EC2, with requires the tags to unset to be in the form ``[{'Key': key1}, {'Key': key2}]``). + +.. _ansible_collections.amazon.aws.docsite.dev_tests: + +Integration Tests for AWS Modules +================================= + +All new AWS modules should include integration tests to ensure that any changes in AWS APIs that +affect the module are detected. At a minimum this should cover the key API calls and check the +documented return values are present in the module result. + +For general information on running the integration tests see the :ref:`Integration Tests page of the +Module Development Guide `, especially the section on configuration for cloud tests. + +The integration tests for your module should be added in ``test/integration/targets/MODULE_NAME``. + +You must also have a aliases file in ``test/integration/targets/MODULE_NAME/aliases``. This file serves +two purposes. First indicates it's in an AWS test causing the test framework to make AWS credentials +available during the test run. Second putting the test in a test group causing it to be run in the +continuous integration build. + +Tests for new modules should be added to the ``cloud/aws`` group. In general just copy +an existing aliases file such as the `aws_s3 tests aliases file `_. + + +Custom SDK versions for Integration Tests +----------------------------------------- + +By default integration tests will run against the earliest supported version of +the AWS SDK. The current supported versions can be found in +``tests/integration/constraints.txt`` and should not be updated. Where a module +needs access to a later version of the SDK this can be installed by depending on +the ``setup_botocore_pip`` role and setting the ``botocore_version`` variable in +the ``meta/main.yml`` file for your tests. + +.. code-block:: yaml + + dependencies: + - role: setup_botocore_pip + vars: + botocore_version: "1.20.24" + + +Creating EC2 instances in Integration Tests +------------------------------------------- + +When started, the integration tests will be passed ``aws_region`` as an extra var. +Any resources created should be created in in this region, this includes EC2 +instances. Since AMIs are region specific there is a role which can be +included which will query the APIs for an AMI to use and set the ``ec2_ami_id`` +fact. This role can be included by adding the ``setup_ec2_facts`` role as a +dependency in the ``meta/main.yml`` file for your tests. + + +.. code-block:: yaml + + dependencies: + - role: setup_ec2_facts + +The ``ec2_ami_id`` fact can then be used in the tests. + +.. code-block:: yaml + + - name: Create launch configuration 1 + community.aws.ec2_lc: + name: '{{ resource_prefix }}-lc1' + image_id: '{{ ec2_ami_id }}' + assign_public_ip: yes + instance_type: '{{ ec2_instance_type }}' + security_groups: '{{ sg.group_id }}' + volumes: + - device_name: /dev/xvda + volume_size: 10 + volume_type: gp2 + delete_on_termination: true + +To improve test result reproducability across regions, tests should use this +role and the fact it provides to chose an AMI to use. + + +Resource naming in Integration Tests +------------------------------------ + +AWS has a range of limitations for the name of resources. Where possible, +resource names should include a string which makes the resource names unique +to the test. + +The ``ansible-test`` tool used for running the integration tests provides two +helpful extra vars: ``resource_prefix`` and ``tiny_prefix`` which are unique to the +test set, and should generally used as part of the name. ``resource_prefix`` will generate a prefix based on the host the test is being run on. Sometimes this may result in a resource name that exceeds the character limit allowed by AWS. In these cases, ``tiny_prefix`` will provide a 12-character randomly generated prefix. + +AWS Credentials for Integration Tests +------------------------------------- + +The testing framework handles running the test with appropriate AWS credentials, these are made available +to your test in the following variables: + +* ``aws_region`` +* ``aws_access_key`` +* ``aws_secret_key`` +* ``security_token`` + +So all invocations of AWS modules in the test should set these parameters. To avoid duplicating these +for every call, it's preferable to use :ref:`module_defaults `. For example: + +.. code-block:: yaml + + - name: set connection information for aws modules and run tasks + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + + block: + + - name: Do Something + ec2_instance: + ... params ... + + - name: Do Something Else + ec2_instance: + ... params ... + +AWS Permissions for Integration Tests +------------------------------------- + +As explained in the :ref:`Integration Test guide ` +there are defined IAM policies in `mattclay/aws-terminator `_ that contain the necessary permissions +to run the AWS integration test. + +If your module interacts with a new service or otherwise requires new permissions, tests will fail when you submit a pull request and the +`Ansibullbot `_ will tag your PR as needing revision. +We do not automatically grant additional permissions to the roles used by the continuous integration builds. +You will need to raise a Pull Request against `mattclay/aws-terminator `_ to add them. + +If your PR has test failures, check carefully to be certain the failure is only due to the missing permissions. If you've ruled out other sources of failure, add a comment with the ``ready_for_review`` +tag and explain that it's due to missing permissions. + +Your pull request cannot be merged until the tests are passing. If your pull request is failing due to missing permissions, +you must collect the minimum IAM permissions required to +run the tests. + +There are two ways to figure out which IAM permissions you need for your PR to pass: + +* Start with the most permissive IAM policy, run the tests to collect information about which resources your tests actually use, then construct a policy based on that output. This approach only works on modules that use ``AnsibleAWSModule``. +* Start with the least permissive IAM policy, run the tests to discover a failure, add permissions for the resource that addresses that failure, then repeat. If your module uses ``AnsibleModule`` instead of ``AnsibleAWSModule``, you must use this approach. + +To start with the most permissive IAM policy: + +1) `Create an IAM policy `_ that allows all actions (set ``Action`` and ``Resource`` to ``*``). +2) Run your tests locally with this policy. On AnsibleAWSModule-based modules, the ``debug_botocore_endpoint_logs`` option is automatically set to ``yes``, so you should see a list of AWS ACTIONS after the PLAY RECAP showing all the permissions used. If your tests use a boto/AnsibleModule module, you must start with the least permissive policy (see below). +3) Modify your policy to allow only the actions your tests use. Restrict account, region, and prefix where possible. Wait a few minutes for your policy to update. +4) Run the tests again with a user or role that allows only the new policy. +5) If the tests fail, troubleshoot (see tips below), modify the policy, run the tests again, and repeat the process until the tests pass with a restrictive policy. +6) Open a pull request proposing the minimum required policy to the `CI policies `_. + +To start from the least permissive IAM policy: + +1) Run the integration tests locally with no IAM permissions. +2) Examine the error when the tests reach a failure. + a) If the error message indicates the action used in the request, add the action to your policy. + b) If the error message does not indicate the action used in the request: + - Usually the action is a CamelCase version of the method name - for example, for an ec2 client the method ``describe_security_groups`` correlates to the action ``ec2:DescribeSecurityGroups``. + - Refer to the documentation to identify the action. + c) If the error message indicates the resource ARN used in the request, limit the action to that resource. + d) If the error message does not indicate the resource ARN used: + - Determine if the action can be restricted to a resource by examining the documentation. + - If the action can be restricted, use the documentation to construct the ARN and add it to the policy. +3) Add the action or resource that caused the failure to `an IAM policy `_. Wait a few minutes for your policy to update. +4) Run the tests again with this policy attached to your user or role. +5) If the tests still fail at the same place with the same error you will need to troubleshoot (see tips below). If the first test passes, repeat steps 2 and 3 for the next error. Repeat the process until the tests pass with a restrictive policy. +6) Open a pull request proposing the minimum required policy to the `CI policies `_. + +Troubleshooting IAM policies +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- When you make changes to a policy, wait a few minutes for the policy to update before re-running the tests. +- Use the `policy simulator `_ to verify that each action (limited by resource when applicable) in your policy is allowed. +- If you're restricting actions to certain resources, replace resources temporarily with ``*``. If the tests pass with wildcard resources, there is a problem with the resource definition in your policy. +- If the initial troubleshooting above doesn't provide any more insight, AWS may be using additional undisclosed resources and actions. +- Examine the AWS FullAccess policy for the service for clues. +- Re-read the AWS documentation, especially the list of `Actions, Resources and Condition Keys `_ for the various AWS services. +- Look at the `cloudonaut `_ documentation as a troubleshooting cross-reference. +- Use a search engine. +- Ask in the #ansible-aws chat channel (using Matrix at ansible.im or using IRC at `irc.libera.chat `_). + +Unsupported Integration tests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There are a limited number of reasons why it may not be practical to run integration +tests for a module within CI. Where these apply you should add the keyword +``unsupported`` to the aliases file in ``test/integration/targets/MODULE_NAME/aliases``. + +Some cases where tests should be marked as unsupported: +1) The tests take longer than 10 or 15 minutes to complete +2) The tests create expensive resources +3) The tests create inline policies +4) The tests require the existence of external resources +5) The tests manage Account level security policies such as the password policy or AWS Organizations. + +Where one of these reasons apply you should open a pull request proposing the minimum required policy to the +`unsupported test policies `_. + +Unsupported integration tests will not be automatically run by CI. However, the +necessary policies should be available so that the tests can be manually run by +someone performing a PR review or writing a patch. diff --git a/ansible_collections/amazon/aws/docs/docsite/rst/guide_aws.rst b/ansible_collections/amazon/aws/docs/docsite/rst/guide_aws.rst new file mode 100644 index 000000000..a05d21221 --- /dev/null +++ b/ansible_collections/amazon/aws/docs/docsite/rst/guide_aws.rst @@ -0,0 +1,302 @@ +.. _ansible_collections.amazon.aws.docsite.aws_intro: + +************************* +Amazon Web Services Guide +************************* + +The ``amazon.aws`` collection contains a number of modules and plugins for controlling Amazon Web Services (AWS). This guide explains how to use the modules and inventory scripts to automate your AWS resources with Ansible. + +.. contents:: + :local: + +Requirements for the AWS modules are minimal. + +All of the modules require and are tested against recent versions of botocore and boto3. Starting with the 2.0 AWS collection releases, it is generally the policy of the collections to support the versions of these libraries released 12 months prior to the most recent major collection revision. Individual modules may require a more recent library version to support specific features or may require the boto library, check the module documentation for the minimum required version for each module. You must have the boto3 Python module installed on your control machine. You can install these modules from your OS distribution or using the python package installer: ``pip install boto3``. + +Starting with the 2.0 releases of both collections, Python 2.7 support will be ended in accordance with AWS' `end of Python 2.7 support `_ and Python 3.6 or greater will be required. + +Whereas classically Ansible will execute tasks in its host loop against multiple remote machines, most cloud-control steps occur on your local machine with reference to the regions to control. + +In your playbook steps we'll typically be using the following pattern for provisioning steps:: + + - hosts: localhost + gather_facts: False + tasks: + - ... + +.. _ansible_collections.amazon.aws.docsite.aws_authentication: + +Authentication +`````````````` + +Authentication with the AWS-related modules is handled by either +specifying your access and secret key as ENV variables or module arguments. + +For environment variables:: + + export AWS_ACCESS_KEY_ID='AK123' + export AWS_SECRET_ACCESS_KEY='abc123' + +For storing these in a vars_file, ideally encrypted with ansible-vault:: + + --- + aws_access_key: "--REMOVED--" + aws_secret_key: "--REMOVED--" + +Note that if you store your credentials in vars_file, you need to refer to them in each AWS-module. For example:: + + - amazon.aws.ec2_instance: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + key_name: "example-ssh-key" + image_id: "..." + +Or they can be specified using "module_defaults" at the top of a playbook.:: + + # demo_setup.yml + + - hosts: localhost + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + region: '{{ region }}' + tasks: + - amazon.aws.ec2_instance: + key_name: "example-ssh-key" + image_id: "..." + +Credentials can also be accessed from a `Credentials Profile `_.:: + + - amazon.aws.ec2_instance: + aws_profile: default + key_name: "example-ssh-key" + image_id: "..." + +.. _ansible_collections.amazon.aws.docsite.aws_provisioning: + +Provisioning +```````````` + +The ec2_instance module provisions and de-provisions instances within EC2. + +An example of creating an instance with a public IP assigned follows. + +The "name" parameter will create a "tag:Name" on the instance. Additional tags can be specified with the "tags" parameter.:: + + # demo_setup.yml + + - hosts: localhost + gather_facts: False + + tasks: + + - name: Provision an EC2 instance with a public IP address + amazon.aws.ec2_instance: + name: Demo + key_name: "example-ssh-key" + vpc_subnet_id: subnet-5ca1ab1e + instance_type: c5.large + security_group: default + network: + assign_public_ip: true + image_id: ami-123456 + tags: + Environment: Testing + register: result + +The data about the instance that has been created is being saved by the "register" keyword in the variable named "result". + +From this, we'll use the add_host module to dynamically create a host group consisting of these new instances. This facilitates performing configuration actions on the hosts immediately in a subsequent task.:: + + # demo_setup.yml + + - hosts: localhost + gather_facts: False + + tasks: + + - name: Provision an EC2 instance with a public IP address + amazon.aws.ec2_instance: + name: Demo + key_name: "example-ssh-key" + vpc_subnet_id: subnet-5ca1ab1e + instance_type: c5.large + security_group: default + network: + assign_public_ip: true + image_id: ami-123456 + tags: + Environment: Testing + register: result + + - name: Add all instance public IPs to host group + add_host: hostname={{ item.public_ip }} groups=ec2hosts + loop: "{{ result.instances }}" + +With the host group now created, a second play at the bottom of the same provisioning playbook file might now have some configuration steps:: + + # demo_setup.yml + + - name: Provision a set of instances + hosts: localhost + # ... AS ABOVE ... + + - hosts: ec2hosts + name: configuration play + user: ec2-user + gather_facts: true + + tasks: + + - name: Check NTP service + service: name=ntpd state=started + +.. _ansible_collections.amazon.aws.docsite.aws_security_groups: + +Security Groups +``````````````` + +Security groups on AWS are stateful. The response of a request from your instance is allowed to flow in regardless of inbound security group rules and vice-versa. +In case you only want allow traffic with AWS S3 service, you need to fetch the current IP ranges of AWS S3 for one region and apply them as an egress rule.:: + + - name: fetch raw ip ranges for aws s3 + set_fact: + raw_s3_ranges: "{{ lookup('aws_service_ip_ranges', region='eu-central-1', service='S3', wantlist=True) }}" + + - name: prepare list structure for ec2_group module + set_fact: + s3_ranges: "{{ s3_ranges | default([]) + [{'proto': 'all', 'cidr_ip': item, 'rule_desc': 'S3 Service IP range'}] }}" + loop: "{{ raw_s3_ranges }}" + + - name: set S3 IP ranges to egress rules + ec2_group: + name: aws_s3_ip_ranges + description: allow outgoing traffic to aws S3 service + region: eu-central-1 + state: present + vpc_id: vpc-123456 + purge_rules: true + purge_rules_egress: true + rules: [] + rules_egress: "{{ s3_ranges }}" + tags: + Name: aws_s3_ip_ranges + +.. _ansible_collections.amazon.aws.docsite.aws_host_inventory: + +Host Inventory +`````````````` + +Once your nodes are spun up, you'll probably want to talk to them again. With a cloud setup, it's best to not maintain a static list of cloud hostnames +in text files. Rather, the best way to handle this is to use the aws_ec2 inventory plugin. See :ref:`dynamic_inventory`. + +The plugin will also return instances that were created outside of Ansible and allow Ansible to manage them. + +.. _ansible_collections.amazon.aws.docsite.aws_tags_and_groups: + +Tags And Groups And Variables +````````````````````````````` + +When using the inventory plugin, you can configure extra inventory structure based on the metadata returned by AWS. + +For instance, you might use ``keyed_groups`` to create groups from instance tags:: + + plugin: amazon.aws.aws_ec2 + keyed_groups: + - prefix: tag + key: tags + + +You can then target all instances with a "class" tag where the value is "webserver" in a play:: + + - hosts: tag_class_webserver + tasks: + - ping + +You can also use these groups with 'group_vars' to set variables that are automatically applied to matching instances. + +.. _ansible_collections.amazon.aws.docsite.aws_pull: + +Autoscaling with Ansible Pull +````````````````````````````` + +Amazon Autoscaling features automatically increase or decrease capacity based on load. There are also Ansible modules shown in the cloud documentation that +can configure autoscaling policy. + +When nodes come online, it may not be sufficient to wait for the next cycle of an ansible command to come along and configure that node. + +To do this, pre-bake machine images which contain the necessary ansible-pull invocation. Ansible-pull is a command line tool that fetches a playbook from a git server and runs it locally. + +One of the challenges of this approach is that there needs to be a centralized way to store data about the results of pull commands in an autoscaling context. +For this reason, the autoscaling solution provided below in the next section can be a better approach. + +Read :ref:`ansible-pull` for more information on pull-mode playbooks. + +.. _ansible_collections.amazon.aws.docsite.aws_autoscale: + +Autoscaling with Ansible Automation Platform +```````````````````````````````````````````` + +`Ansible Automation Platform (AAP) `_ +also contains a very nice feature for auto-scaling use cases. In this mode, a simple curl script can call +a defined URL and the server will "dial out" to the requester and configure an instance that is spinning up. This can be a great way +to reconfigure ephemeral nodes. See the install and product documentation for more details. + +A benefit of using the callback in AAP over pull mode is that job results are still centrally recorded and less information has to be shared +with remote hosts. + +.. _ansible_collections.amazon.aws.docsite.aws_cloudformation_example: + +Ansible With (And Versus) CloudFormation +```````````````````````````````````````` + +CloudFormation is a Amazon technology for defining a cloud stack as a JSON or YAML document. + +Ansible modules provide an easier to use interface than CloudFormation in many examples, without defining a complex JSON/YAML document. +This is recommended for most users. + +However, for users that have decided to use CloudFormation, there is an Ansible module that can be used to apply a CloudFormation template +to Amazon. + +When using Ansible with CloudFormation, typically Ansible will be used with a tool like Packer to build images, and CloudFormation will launch +those images, or ansible will be invoked through user data once the image comes online, or a combination of the two. + +Please see the examples in the Ansible CloudFormation module for more details. + +.. _ansible_collections.amazon.aws.docsite.aws_image_build: + +AWS Image Building With Ansible +``````````````````````````````` + +Many users may want to have images boot to a more complete configuration rather than configuring them entirely after instantiation. To do this, +one of many programs can be used with Ansible playbooks to define and upload a base image, which will then get its own AMI ID for usage with +the ec2 module or other Ansible AWS modules such as ec2_asg or the cloudformation module. Possible tools include Packer, aminator, and Ansible's +ec2_ami module. + +Generally speaking, we find most users using Packer. + +See the Packer documentation of the `Ansible local Packer provisioner `_ and `Ansible remote Packer provisioner `_. + +If you do not want to adopt Packer at this time, configuring a base-image with Ansible after provisioning (as shown above) is acceptable. + +.. _ansible_collections.amazon.aws.docsite.aws_next_steps: + +Next Steps: Explore Modules +``````````````````````````` + +Ansible ships with lots of modules for configuring a wide array of EC2 services. Browse the "Cloud" category of the module +documentation for a full list with examples. + +.. seealso:: + + :ref:`list_of_collections` + Browse existing collections, modules, and plugins + :ref:`working_with_playbooks` + An introduction to playbooks + :ref:`playbooks_delegation` + Delegation, useful for working with loud balancers, clouds, and locally executed steps. + `User Mailing List `_ + Have a question? Stop by the google group! + `irc.libera.chat `_ + #ansible IRC chat channel diff --git a/ansible_collections/amazon/aws/meta/runtime.yml b/ansible_collections/amazon/aws/meta/runtime.yml new file mode 100644 index 000000000..ea227181b --- /dev/null +++ b/ansible_collections/amazon/aws/meta/runtime.yml @@ -0,0 +1,128 @@ +--- +requires_ansible: '>=2.11.0' +action_groups: + aws: + - autoscaling_group + - autoscaling_group_info + - aws_az_info + - aws_caller_info + - aws_s3 + - cloudformation + - cloudformation_info + - cloudtrail + - cloudtrail_info + - cloudwatch_metric_alarm + - cloudwatchevent_rule + - cloudwatchevent_rule + - cloudwatchlogs_log_group + - cloudwatchlogs_log_group_info + - cloudwatchlogs_log_group_metric_filter + - cloudwatch_metric_alarm_info + - ec2_ami + - ec2_ami_info + - ec2_eip + - ec2_eip_info + - ec2_elb_lb + - ec2_eni + - ec2_eni_info + - ec2_group + - ec2_group_info + - ec2_instance + - ec2_instance_info + - ec2_key + - ec2_security_group + - ec2_security_group_info + - ec2_snapshot + - ec2_snapshot_info + - ec2_spot_instance + - ec2_spot_instance_info + - ec2_tag + - ec2_tag_info + - ec2_vol + - ec2_vol_info + - ec2_vpc_dhcp_option + - ec2_vpc_dhcp_option_info + - ec2_vpc_endpoint + - ec2_vpc_endpoint_info + - ec2_vpc_endpoint_service_info + - ec2_vpc_igw + - ec2_vpc_igw_info + - ec2_vpc_nat_gateway + - ec2_vpc_nat_gateway_info + - ec2_vpc_net + - ec2_vpc_net_info + - ec2_vpc_route_table + - ec2_vpc_route_table_info + - ec2_vpc_subnet + - ec2_vpc_subnet_info + - elb_application_lb + - elb_application_lb_info + - elb_classic_lb + - execute_lambda + - iam_policy + - iam_policy_info + - iam_user + - iam_user_info + - kms_key + - kms_key_info + - lambda + - lambda_alias + - lambda_event + - lambda_execute + - lambda_info + - lambda_layer + - lambda_layer_info + - lambda_policy + - rds_cluster + - rds_cluster_info + - rds_cluster_snapshot + - rds_instance + - rds_instance_info + - rds_instance_snapshot + - rds_option_group + - rds_option_group_info + - rds_param_group + - rds_snapshot_info + - rds_subnet_group + - route53 + - route53_health_check + - route53_info + - route53_zone + - s3_bucket + - s3_object + - s3_object_info +plugin_routing: + action: + aws_s3: + redirect: amazon.aws.s3_object + modules: + aws_kms: + # Deprecation for this alias should not *start* prior to 2024-09-01 + redirect: amazon.aws.kms_key + aws_kms_info: + # Deprecation for this alias should not *start* prior to 2024-09-01 + redirect: amazon.aws.kms_key_info + aws_s3: + # Deprecation for this alias should not *start* prior to 2024-09-01 + redirect: amazon.aws.s3_object + ec2_asg: + # Deprecation for this alias should not *start* prior to 2024-09-01 + redirect: amazon.aws.autoscaling_group + ec2_asg_info: + # Deprecation for this alias should not *start* prior to 2024-09-01 + redirect: amazon.aws.autoscaling_group_info + ec2_elb_lb: + # Deprecation for this alias should not *start* prior to 2024-09-01 + redirect: amazon.aws.elb_classic_lb + ec2_group: + # Deprecation for this alias should not *start* prior to 2024-09-01 + redirect: amazon.aws.ec2_security_group + ec2_group_info: + # Deprecation for this alias should not *start* prior to 2024-09-01 + redirect: amazon.aws.ec2_security_group_info + ec2_metric_alarm: + # Deprecation for this alias should not *start* prior to 2024-09-01 + redirect: amazon.aws.cloudwatch_metric_alarm + execute_lambda: + # Deprecation for this alias should not *start* prior to 2024-09-01 + redirect: amazon.aws.lambda_execute diff --git a/ansible_collections/amazon/aws/plugins/action/s3_object.py b/ansible_collections/amazon/aws/plugins/action/s3_object.py new file mode 100644 index 000000000..a78dd0bed --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/action/s3_object.py @@ -0,0 +1,75 @@ +# (c) 2012, Michael DeHaan +# (c) 2018, Will Thames +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleFileNotFound +from ansible.module_utils._text import to_text +from ansible.plugins.action import ActionBase +from ansible.utils.vars import merge_hash + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + + def run(self, tmp=None, task_vars=None): + ''' handler for s3_object operations + + This adds the magic that means 'src' can point to both a 'remote' file + on the 'host' or in the 'files/' lookup path on the controller. + ''' + self._supports_async = True + + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + source = self._task.args.get('src', None) + + try: + new_module_args = self._task.args.copy() + if source: + source = os.path.expanduser(source) + + # For backward compatibility check if the file exists on the remote; it should take precedence + if not self._remote_file_exists(source): + try: + source = self._loader.get_real_file(self._find_needle('files', source), decrypt=False) + new_module_args['src'] = source + except AnsibleFileNotFound: + # module handles error message for nonexistent files + new_module_args['src'] = source + except AnsibleError as e: + raise AnsibleActionFail(to_text(e)) + + wrap_async = self._task.async_val and not self._connection.has_native_async + # execute the s3_object module with the updated args + result = merge_hash(result, self._execute_module(module_args=new_module_args, task_vars=task_vars, wrap_async=wrap_async)) + + if not wrap_async: + # remove a temporary path we created + self._remove_tmp_path(self._connection._shell.tmpdir) + + except AnsibleAction as e: + result.update(e.result) + return result diff --git a/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py b/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py new file mode 100644 index 000000000..551a866a3 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py @@ -0,0 +1,71 @@ +# (C) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + name: aws_resource_actions + type: aggregate + short_description: summarizes all "resource:actions" completed + description: + - Ansible callback plugin for collecting the AWS actions completed by all boto3 modules using + AnsibleAWSModule in a playbook. Botocore endpoint logs need to be enabled for those modules, which can + be done easily by setting debug_botocore_endpoint_logs to True for group/aws using module_defaults. + requirements: + - whitelisting in configuration - see examples section below for details. +''' + +EXAMPLES = ''' +example: > + To enable, add this to your ansible.cfg file in the defaults block + [defaults] + callback_whitelist = aws_resource_actions +sample output: > +# +# AWS ACTIONS: ['s3:PutBucketAcl', 's3:HeadObject', 's3:DeleteObject', 's3:PutObjectAcl', 's3:CreateMultipartUpload', +# 's3:DeleteBucket', 's3:GetObject', 's3:DeleteObjects', 's3:CreateBucket', 's3:CompleteMultipartUpload', +# 's3:ListObjectsV2', 's3:HeadBucket', 's3:UploadPart', 's3:PutObject'] +# +sample output: > +# +# AWS ACTIONS: ['ec2:DescribeVpcAttribute', 'ec2:DescribeVpcClassicLink', 'ec2:ModifyVpcAttribute', 'ec2:CreateTags', +# 'sts:GetCallerIdentity', 'ec2:DescribeSecurityGroups', 'ec2:DescribeTags', 'ec2:DescribeVpcs', 'ec2:CreateVpc'] +# +''' + +from ansible.plugins.callback import CallbackBase +from ansible.module_utils._text import to_native + + +class CallbackModule(CallbackBase): + CALLBACK_VERSION = 2.8 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'amazon.aws.aws_resource_actions' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self): + self.aws_resource_actions = [] + super(CallbackModule, self).__init__() + + def extend_aws_resource_actions(self, result): + if result.get('resource_actions'): + self.aws_resource_actions.extend(result['resource_actions']) + + def runner_on_ok(self, host, res): + self.extend_aws_resource_actions(res) + + def runner_on_failed(self, host, res, ignore_errors=False): + self.extend_aws_resource_actions(res) + + def v2_runner_item_on_ok(self, result): + self.extend_aws_resource_actions(result._result) + + def v2_runner_item_on_failed(self, result): + self.extend_aws_resource_actions(result._result) + + def playbook_on_stats(self, stats): + if self.aws_resource_actions: + self.aws_resource_actions = sorted(list(to_native(action) for action in set(self.aws_resource_actions))) + self._display.display("AWS ACTIONS: {0}".format(self.aws_resource_actions)) diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py b/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py new file mode 100644 index 000000000..eeff899c6 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Will Thames +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + # AWS only documentation fragment + DOCUMENTATION = r''' +options: + access_key: + description: + - AWS access key ID. + - See the AWS documentation for more information about access tokens + U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + - The C(AWS_ACCESS_KEY_ID), C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY) + environment variables may also be used in decreasing order of + preference. Prior to release 6.0.0 these environment variables will be + ignored if the I(profile) parameter is passed. After release 6.0.0 + I(access_key) will always fall back to the environment variables if set. + - The I(aws_access_key) and I(profile) options are mutually exclusive. + - The I(aws_access_key_id) alias was added in release 5.1.0 for + consistency with the AWS botocore SDK. + - The I(ec2_access_key) alias has been deprecated and will be removed in a + release after 2024-12-01. + - Support for the C(EC2_ACCESS_KEY) environment variable has been + deprecated and will be removed in a release after 2024-12-01. + type: str + aliases: ['aws_access_key_id', 'aws_access_key', 'ec2_access_key'] + secret_key: + description: + - AWS secret access key. + - See the AWS documentation for more information about access tokens + U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + - The C(AWS_SECRET_ACCESS_KEY), C(AWS_SECRET_KEY), or C(EC2_SECRET_KEY) + environment variables may also be used in decreasing order of + preference. Prior to release 6.0.0 these environment variables will be + ignored if the I(profile) parameter is passed. After release 6.0.0 + I(secret_key) will always fall back to the environment variables if set. + - The I(secret_key) and I(profile) options are mutually exclusive. + - The I(aws_secret_access_key) alias was added in release 5.1.0 for + consistency with the AWS botocore SDK. + - The I(ec2_secret_key) alias has been deprecated and will be removed in a + release after 2024-12-01. + - Support for the C(EC2_SECRET_KEY) environment variable has been + deprecated and will be removed in a release after 2024-12-01. + type: str + aliases: ['aws_secret_access_key', 'aws_secret_key', 'ec2_secret_key'] + session_token: + description: + - AWS STS session token for use with temporary credentials. + - See the AWS documentation for more information about access tokens + U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + - The C(AWS_SESSION_TOKEN), C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN) + environment variables may also be used in decreasing order of preference. + Prior to release 6.0.0 these environment variables will be + ignored if the I(profile) parameter is passed. After release 6.0.0 + I(session_token) will always fall back to the environment variables if set. + - The I(security_token) and I(profile) options are mutually exclusive. + - Aliases I(aws_session_token) and I(session_token) were added in release + 3.2.0, with the parameter being renamed from I(security_token) to + I(session_token) in release 6.0.0. + - The I(security_token), I(aws_security_token), and I(access_token) + aliases have been deprecated and will be removed in a release after + 2024-12-01. + - Support for the C(EC2_SECRET_KEY) and C(AWS_SECURITY_TOKEN) environment + variables has been deprecated and will be removed in a release after + 2024-12-01. + type: str + aliases: ['aws_session_token', 'security_token', 'aws_security_token', 'access_token'] + profile: + description: + - A named AWS profile to use for authentication. + - See the AWS documentation for more information about named profiles + U(https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html). + - The C(AWS_PROFILE) environment variable may also be used. Prior to release 6.0.0 the + C(AWS_PROFILE) environment variable will be ignored if any of I(access_key), I(secret_key), + or I(session_token) are passed. After release 6.0.0 I(profile) will always fall back to the + C(AWS_PROFILE) environment variable if set. + - The I(profile) option is mutually exclusive with the I(aws_access_key), + I(aws_secret_key) and I(security_token) options. + type: str + aliases: ['aws_profile'] + + endpoint_url: + description: + - URL to connect to instead of the default AWS endpoints. While this + can be used to connection to other AWS-compatible services the + amazon.aws and community.aws collections are only tested against + AWS. + - The C(AWS_URL) or C(EC2_URL) environment variables may also be used, + in decreasing order of preference. + - The I(ec2_url) and I(s3_url) aliases have been deprecated and will be + removed in a release after 2024-12-01. + - Support for the C(EC2_URL) environment variable has been deprecated and + will be removed in a release after 2024-12-01. + type: str + aliases: ['ec2_url', 'aws_endpoint_url', 's3_url' ] + aws_ca_bundle: + description: + - The location of a CA Bundle to use when validating SSL certificates. + - The C(AWS_CA_BUNDLE) environment variable may also be used. + type: path + validate_certs: + description: + - When set to C(false), SSL certificates will not be validated for + communication with the AWS APIs. + - Setting I(validate_certs=false) is strongly discouraged, as an + alternative, consider setting I(aws_ca_bundle) instead. + type: bool + default: true + aws_config: + description: + - A dictionary to modify the botocore configuration. + - Parameters can be found in the AWS documentation + U(https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html#botocore.config.Config). + type: dict + debug_botocore_endpoint_logs: + description: + - Use a C(botocore.endpoint) logger to parse the unique (rather than total) + C("resource:action") API calls made during a task, outputing the set to + the resource_actions key in the task results. Use the + C(aws_resource_action) callback to output to total list made during + a playbook. + - The C(ANSIBLE_DEBUG_BOTOCORE_LOGS) environment variable may also be used. + type: bool + default: false +notes: + - B(Caution:) For modules, environment variables and configuration files are + read from the Ansible 'host' context and not the 'controller' context. + As such, files may need to be explicitly copied to the 'host'. For lookup + and connection plugins, environment variables and configuration files are + read from the Ansible 'controller' context and not the 'host' context. + - The AWS SDK (boto3) that Ansible uses may also read defaults for credentials + and other settings, such as the region, from its configuration files in the + Ansible 'host' context (typically C(~/.aws/credentials)). + See U(https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html) + for more information. +''' diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py b/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py new file mode 100644 index 000000000..73eff046e --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Plugin options for AWS credentials + DOCUMENTATION = r''' +options: + aws_profile: + description: The AWS profile + type: str + aliases: [ boto_profile ] + env: + - name: AWS_DEFAULT_PROFILE + - name: AWS_PROFILE + aws_access_key: + description: The AWS access key to use. + type: str + aliases: [ aws_access_key_id ] + env: + - name: EC2_ACCESS_KEY + - name: AWS_ACCESS_KEY + - name: AWS_ACCESS_KEY_ID + aws_secret_key: + description: The AWS secret key that corresponds to the access key. + type: str + aliases: [ aws_secret_access_key ] + env: + - name: EC2_SECRET_KEY + - name: AWS_SECRET_KEY + - name: AWS_SECRET_ACCESS_KEY + aws_security_token: + description: The AWS security token if using temporary access and secret keys. + type: str + env: + - name: EC2_SECURITY_TOKEN + - name: AWS_SESSION_TOKEN + - name: AWS_SECURITY_TOKEN +''' diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py b/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py new file mode 100644 index 000000000..521526601 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Plugin option for AWS region + DOCUMENTATION = r''' +options: + region: + description: The region for which to create the connection. + type: str + env: + - name: EC2_REGION + - name: AWS_REGION +''' diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/boto3.py b/ansible_collections/amazon/aws/plugins/doc_fragments/boto3.py new file mode 100644 index 000000000..a88e2e018 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/boto3.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2022, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Minimum requirements for the collection + DOCUMENTATION = r''' +options: {} +requirements: + - python >= 3.6 + - boto3 >= 1.18.0 + - botocore >= 1.21.0 +''' diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py b/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py new file mode 100644 index 000000000..017652b58 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Ansible, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + # EC2 only documentation fragment + DOCUMENTATION = r''' +options: + region: + description: + - The AWS region to use. + - For global services such as IAM, Route53 and CloudFront, I(region) + is ignored. + - The C(AWS_REGION) or C(EC2_REGION) environment variables may also + be used. + - See the Amazon AWS documentation for more information + U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region). + - The C(ec2_region) alias has been deprecated and will be removed in + a release after 2024-12-01 + - Support for the C(EC2_REGION) environment variable has been + deprecated and will be removed in a release after 2024-12-01. + type: str + aliases: [ aws_region, ec2_region ] +''' diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/tags.py b/ansible_collections/amazon/aws/plugins/doc_fragments/tags.py new file mode 100644 index 000000000..9d381cb8a --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/tags.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2022, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Standard Tagging related parameters + DOCUMENTATION = r''' +options: + tags: + description: + - A dictionary representing the tags to be applied to the resource. + - If the I(tags) parameter is not set then tags will not be modified. + type: dict + required: false + aliases: ['resource_tags'] + purge_tags: + description: + - If I(purge_tags=true) and I(tags) is set, existing tags will be purged + from the resource to match exactly what is defined by I(tags) parameter. + - If the I(tags) parameter is not set then tags will not be modified, even + if I(purge_tags=True). + - Tag keys beginning with C(aws:) are reserved by Amazon and can not be + modified. As such they will be ignored for the purposes of the + I(purge_tags) parameter. See the Amazon documentation for more information + U(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html#tag-conventions). + type: bool + default: true + required: false +''' + + # Some modules had a default of purge_tags=False, this was generally + # deprecated in release 4.0.0 + DEPRECATED_PURGE = r''' +options: + tags: + description: + - A dictionary representing the tags to be applied to the resource. + - If the I(tags) parameter is not set then tags will not be modified. + type: dict + required: false + aliases: ['resource_tags'] + purge_tags: + description: + - If I(purge_tags=true) and I(tags) is set, existing tags will be purged + from the resource to match exactly what is defined by I(tags) parameter. + - If the I(tags) parameter is not set then tags will not be modified, even + if I(purge_tags=True). + - Tag keys beginning with C(aws:) are reserved by Amazon and can not be + modified. As such they will be ignored for the purposes of the + I(purge_tags) parameter. See the Amazon documentation for more information + U(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html#tag-conventions). + - The current default value of C(False) has been deprecated. The default + value will change to C(True) in release 5.0.0. + type: bool + required: false +''' diff --git a/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py b/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py new file mode 100644 index 000000000..f1d069b5b --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py @@ -0,0 +1,926 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +name: aws_ec2 +short_description: EC2 inventory source +extends_documentation_fragment: + - inventory_cache + - constructed + - amazon.aws.boto3 + - amazon.aws.aws_credentials +description: + - Get inventory hosts from Amazon Web Services EC2. + - "The inventory file is a YAML configuration file and must end with C(aws_ec2.{yml|yaml}). Example: C(my_inventory.aws_ec2.yml)." +notes: + - If no credentials are provided and the control node has an associated IAM instance profile then the + role will be used for authentication. +author: + - Sloane Hertel (@s-hertel) +options: + plugin: + description: Token that ensures this is a source file for the plugin. + required: True + choices: ['aws_ec2', 'amazon.aws.aws_ec2'] + iam_role_arn: + description: + - The ARN of the IAM role to assume to perform the inventory lookup. You should still provide AWS + credentials with enough privilege to perform the AssumeRole action. + regions: + description: + - A list of regions in which to describe EC2 instances. + - If empty (the default) default this will include all regions, except possibly restricted ones like us-gov-west-1 and cn-north-1. + type: list + elements: str + default: [] + hostnames: + description: + - A list in order of precedence for hostname variables. + type: list + elements: dict + default: [] + suboptions: + name: + description: + - Name of the host. + - Can be one of the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options). + - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag. + - If value provided does not exist in the above options, it will be used as a literal string. + type: str + required: True + prefix: + description: + - Prefix to prepend to I(name). Same options as I(name). + - If I(prefix) is specified, final hostname will be I(prefix) + I(separator) + I(name). + type: str + default: '' + required: False + separator: + description: + - Value to separate I(prefix) and I(name) when I(prefix) is specified. + type: str + default: '_' + required: False + allow_duplicated_hosts: + description: + - By default, the first name that matches an entry of the I(hostnames) list is returned. + - Turn this flag on if you don't mind having duplicated entries in the inventory + and you want to get all the hostnames that match. + type: bool + default: False + version_added: 5.0.0 + filters: + description: + - A dictionary of filter value pairs. + - Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options). + type: dict + default: {} + include_filters: + description: + - A list of filters. Any instances matching at least one of the filters are included in the result. + - Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options). + - Every entry in this list triggers a search query. As such, from a performance point of view, it's better to + keep the list as short as possible. + type: list + elements: dict + default: [] + version_added: 1.5.0 + exclude_filters: + description: + - A list of filters. Any instances matching one of the filters are excluded from the result. + - The filters from C(exclude_filters) take priority over the C(include_filters) and C(filters) keys + - Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options). + - Every entry in this list triggers a search query. As such, from a performance point of view, it's better to + keep the list as short as possible. + type: list + elements: dict + default: [] + version_added: 1.5.0 + include_extra_api_calls: + description: + - Add two additional API calls for every instance to include 'persistent' and 'events' host variables. + - Spot instances may be persistent and instances may have associated events. + - The I(include_extra_api_calls) option had been deprecated and will be removed in release 6.0.0. + type: bool + default: False + strict_permissions: + description: + - By default if a 403 (Forbidden) error code is encountered this plugin will fail. + - You can set this option to False in the inventory config file which will allow 403 errors to be gracefully skipped. + type: bool + default: True + use_contrib_script_compatible_sanitization: + description: + - By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible. + This option allows you to override that, in efforts to allow migration from the old inventory script and + matches the sanitization of groups when the script's ``replace_dash_in_groups`` option is set to ``False``. + To replicate behavior of ``replace_dash_in_groups = True`` with constructed groups, + you will need to replace hyphens with underscores via the regex_replace filter for those entries. + - For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting, + otherwise the core engine will just use the standard sanitization on top. + - This is not the default as such names break certain functionality as not all characters are valid Python identifiers + which group names end up being used as. + type: bool + default: False + use_contrib_script_compatible_ec2_tag_keys: + description: + - Expose the host tags with ec2_tag_TAGNAME keys like the old ec2.py inventory script. + - The use of this feature is discouraged and we advise to migrate to the new ``tags`` structure. + type: bool + default: False + version_added: 1.5.0 + hostvars_prefix: + description: + - The prefix for host variables names coming from AWS. + type: str + version_added: 3.1.0 + hostvars_suffix: + description: + - The suffix for host variables names coming from AWS. + type: str + version_added: 3.1.0 +''' + +EXAMPLES = ''' +# Minimal example using environment vars or instance role credentials +# Fetch all hosts in us-east-1, the hostname is the public DNS if it exists, otherwise the private IP address +plugin: amazon.aws.aws_ec2 +regions: + - us-east-1 + +# Example using filters, ignoring permission errors, and specifying the hostname precedence +plugin: amazon.aws.aws_ec2 +# The values for profile, access key, secret key and token can be hardcoded like: +boto_profile: aws_profile +# or you could use Jinja as: +# boto_profile: "{{ lookup('env', 'AWS_PROFILE') | default('aws_profile', true) }}" +# Populate inventory with instances in these regions +regions: + - us-east-1 + - us-east-2 +filters: + # All instances with their `Environment` tag set to `dev` + tag:Environment: dev + # All dev and QA hosts + tag:Environment: + - dev + - qa + instance.group-id: sg-xxxxxxxx +# Ignores 403 errors rather than failing +strict_permissions: False +# Note: I(hostnames) sets the inventory_hostname. To modify ansible_host without modifying +# inventory_hostname use compose (see example below). +hostnames: + - tag:Name=Tag1,Name=Tag2 # Return specific hosts only + - tag:CustomDNSName + - dns-name + - name: 'tag:Name=Tag1,Name=Tag2' + - name: 'private-ip-address' + separator: '_' + prefix: 'tag:Name' + - name: 'test_literal' # Using literal values for hostname + separator: '-' # Hostname will be aws-test_literal + prefix: 'aws' + +# Returns all the hostnames for a given instance +allow_duplicated_hosts: False + +# Example using constructed features to create groups and set ansible_host +plugin: amazon.aws.aws_ec2 +regions: + - us-east-1 + - us-west-1 +# keyed_groups may be used to create custom groups +strict: False +keyed_groups: + # Add e.g. x86_64 hosts to an arch_x86_64 group + - prefix: arch + key: 'architecture' + # Add hosts to tag_Name_Value groups for each Name/Value tag pair + - prefix: tag + key: tags + # Add hosts to e.g. instance_type_z3_tiny + - prefix: instance_type + key: instance_type + # Create security_groups_sg_abcd1234 group for each SG + - key: 'security_groups|json_query("[].group_id")' + prefix: 'security_groups' + # Create a group for each value of the Application tag + - key: tags.Application + separator: '' + # Create a group per region e.g. aws_region_us_east_2 + - key: placement.region + prefix: aws_region + # Create a group (or groups) based on the value of a custom tag "Role" and add them to a metagroup called "project" + - key: tags['Role'] + prefix: foo + parent_group: "project" +# Set individual variables with compose +compose: + # Use the private IP address to connect to the host + # (note: this does not modify inventory_hostname, which is set via I(hostnames)) + ansible_host: private_ip_address + +# Example using include_filters and exclude_filters to compose the inventory. +plugin: amazon.aws.aws_ec2 +regions: + - us-east-1 + - us-west-1 +include_filters: +- tag:Name: + - 'my_second_tag' +- tag:Name: + - 'my_third_tag' +exclude_filters: +- tag:Name: + - 'my_first_tag' + +# Example using groups to assign the running hosts to a group based on vpc_id +plugin: amazon.aws.aws_ec2 +boto_profile: aws_profile +# Populate inventory with instances in these regions +regions: + - us-east-2 +filters: + # All instances with their state as `running` + instance-state-name: running +keyed_groups: + - prefix: tag + key: tags +compose: + ansible_host: public_dns_name +groups: + libvpc: vpc_id == 'vpc-####' +# Define prefix and suffix for host variables coming from AWS. +plugin: amazon.aws.aws_ec2 +regions: + - us-east-1 +hostvars_prefix: 'aws_' +hostvars_suffix: '_ec2' +''' + +import re + +try: + import boto3 + import botocore +except ImportError: + pass # will be captured by imported HAS_BOTO3 + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_native +from ansible.module_utils._text import to_text +from ansible.module_utils.basic import missing_required_lib +from ansible.plugins.inventory import BaseInventoryPlugin +from ansible.plugins.inventory import Cacheable +from ansible.plugins.inventory import Constructable +from ansible.template import Templar + +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + + +# The mappings give an array of keys to get from the filter name to the value +# returned by boto3's EC2 describe_instances method. + +instance_meta_filter_to_boto_attr = { + 'group-id': ('Groups', 'GroupId'), + 'group-name': ('Groups', 'GroupName'), + 'network-interface.attachment.instance-owner-id': ('OwnerId',), + 'owner-id': ('OwnerId',), + 'requester-id': ('RequesterId',), + 'reservation-id': ('ReservationId',), +} + +instance_data_filter_to_boto_attr = { + 'affinity': ('Placement', 'Affinity'), + 'architecture': ('Architecture',), + 'availability-zone': ('Placement', 'AvailabilityZone'), + 'block-device-mapping.attach-time': ('BlockDeviceMappings', 'Ebs', 'AttachTime'), + 'block-device-mapping.delete-on-termination': ('BlockDeviceMappings', 'Ebs', 'DeleteOnTermination'), + 'block-device-mapping.device-name': ('BlockDeviceMappings', 'DeviceName'), + 'block-device-mapping.status': ('BlockDeviceMappings', 'Ebs', 'Status'), + 'block-device-mapping.volume-id': ('BlockDeviceMappings', 'Ebs', 'VolumeId'), + 'client-token': ('ClientToken',), + 'dns-name': ('PublicDnsName',), + 'host-id': ('Placement', 'HostId'), + 'hypervisor': ('Hypervisor',), + 'iam-instance-profile.arn': ('IamInstanceProfile', 'Arn'), + 'image-id': ('ImageId',), + 'instance-id': ('InstanceId',), + 'instance-lifecycle': ('InstanceLifecycle',), + 'instance-state-code': ('State', 'Code'), + 'instance-state-name': ('State', 'Name'), + 'instance-type': ('InstanceType',), + 'instance.group-id': ('SecurityGroups', 'GroupId'), + 'instance.group-name': ('SecurityGroups', 'GroupName'), + 'ip-address': ('PublicIpAddress',), + 'kernel-id': ('KernelId',), + 'key-name': ('KeyName',), + 'launch-index': ('AmiLaunchIndex',), + 'launch-time': ('LaunchTime',), + 'monitoring-state': ('Monitoring', 'State'), + 'network-interface.addresses.private-ip-address': ('NetworkInterfaces', 'PrivateIpAddress'), + 'network-interface.addresses.primary': ('NetworkInterfaces', 'PrivateIpAddresses', 'Primary'), + 'network-interface.addresses.association.public-ip': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'PublicIp'), + 'network-interface.addresses.association.ip-owner-id': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'IpOwnerId'), + 'network-interface.association.public-ip': ('NetworkInterfaces', 'Association', 'PublicIp'), + 'network-interface.association.ip-owner-id': ('NetworkInterfaces', 'Association', 'IpOwnerId'), + 'network-interface.association.allocation-id': ('ElasticGpuAssociations', 'ElasticGpuId'), + 'network-interface.association.association-id': ('ElasticGpuAssociations', 'ElasticGpuAssociationId'), + 'network-interface.attachment.attachment-id': ('NetworkInterfaces', 'Attachment', 'AttachmentId'), + 'network-interface.attachment.instance-id': ('InstanceId',), + 'network-interface.attachment.device-index': ('NetworkInterfaces', 'Attachment', 'DeviceIndex'), + 'network-interface.attachment.status': ('NetworkInterfaces', 'Attachment', 'Status'), + 'network-interface.attachment.attach-time': ('NetworkInterfaces', 'Attachment', 'AttachTime'), + 'network-interface.attachment.delete-on-termination': ('NetworkInterfaces', 'Attachment', 'DeleteOnTermination'), + 'network-interface.availability-zone': ('Placement', 'AvailabilityZone'), + 'network-interface.description': ('NetworkInterfaces', 'Description'), + 'network-interface.group-id': ('NetworkInterfaces', 'Groups', 'GroupId'), + 'network-interface.group-name': ('NetworkInterfaces', 'Groups', 'GroupName'), + 'network-interface.ipv6-addresses.ipv6-address': ('NetworkInterfaces', 'Ipv6Addresses', 'Ipv6Address'), + 'network-interface.mac-address': ('NetworkInterfaces', 'MacAddress'), + 'network-interface.network-interface-id': ('NetworkInterfaces', 'NetworkInterfaceId'), + 'network-interface.owner-id': ('NetworkInterfaces', 'OwnerId'), + 'network-interface.private-dns-name': ('NetworkInterfaces', 'PrivateDnsName'), + # 'network-interface.requester-id': (), + 'network-interface.requester-managed': ('NetworkInterfaces', 'Association', 'IpOwnerId'), + 'network-interface.status': ('NetworkInterfaces', 'Status'), + 'network-interface.source-dest-check': ('NetworkInterfaces', 'SourceDestCheck'), + 'network-interface.subnet-id': ('NetworkInterfaces', 'SubnetId'), + 'network-interface.vpc-id': ('NetworkInterfaces', 'VpcId'), + 'placement-group-name': ('Placement', 'GroupName'), + 'platform': ('Platform',), + 'private-dns-name': ('PrivateDnsName',), + 'private-ip-address': ('PrivateIpAddress',), + 'product-code': ('ProductCodes', 'ProductCodeId'), + 'product-code.type': ('ProductCodes', 'ProductCodeType'), + 'ramdisk-id': ('RamdiskId',), + 'reason': ('StateTransitionReason',), + 'root-device-name': ('RootDeviceName',), + 'root-device-type': ('RootDeviceType',), + 'source-dest-check': ('SourceDestCheck',), + 'spot-instance-request-id': ('SpotInstanceRequestId',), + 'state-reason-code': ('StateReason', 'Code'), + 'state-reason-message': ('StateReason', 'Message'), + 'subnet-id': ('SubnetId',), + 'tag': ('Tags',), + 'tag-key': ('Tags',), + 'tag-value': ('Tags',), + 'tenancy': ('Placement', 'Tenancy'), + 'virtualization-type': ('VirtualizationType',), + 'vpc-id': ('VpcId',), +} + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + + NAME = 'amazon.aws.aws_ec2' + + def __init__(self): + super(InventoryModule, self).__init__() + + self.group_prefix = 'aws_ec2_' + + # credentials + self.boto_profile = None + self.aws_secret_access_key = None + self.aws_access_key_id = None + self.aws_security_token = None + self.iam_role_arn = None + + def _compile_values(self, obj, attr): + ''' + :param obj: A list or dict of instance attributes + :param attr: A key + :return The value(s) found via the attr + ''' + if obj is None: + return + + temp_obj = [] + + if isinstance(obj, list) or isinstance(obj, tuple): + for each in obj: + value = self._compile_values(each, attr) + if value: + temp_obj.append(value) + else: + temp_obj = obj.get(attr) + + has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)]) + if has_indexes and len(temp_obj) == 1: + return temp_obj[0] + + return temp_obj + + def _get_boto_attr_chain(self, filter_name, instance): + ''' + :param filter_name: The filter + :param instance: instance dict returned by boto3 ec2 describe_instances() + ''' + allowed_filters = sorted(list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys())) + + # If filter not in allow_filters -> use it as a literal string + if filter_name not in allowed_filters: + return filter_name + + if filter_name in instance_data_filter_to_boto_attr: + boto_attr_list = instance_data_filter_to_boto_attr[filter_name] + else: + boto_attr_list = instance_meta_filter_to_boto_attr[filter_name] + + instance_value = instance + for attribute in boto_attr_list: + instance_value = self._compile_values(instance_value, attribute) + return instance_value + + def _get_credentials(self): + ''' + :return A dictionary of boto client credentials + ''' + boto_params = {} + for credential in (('aws_access_key_id', self.aws_access_key_id), + ('aws_secret_access_key', self.aws_secret_access_key), + ('aws_session_token', self.aws_security_token)): + if credential[1]: + boto_params[credential[0]] = credential[1] + + return boto_params + + def _get_connection(self, credentials, region='us-east-1'): + try: + connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **credentials) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: + if self.boto_profile: + try: + connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: + raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) + else: + raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) + return connection + + def _boto3_assume_role(self, credentials, region=None): + """ + Assume an IAM role passed by iam_role_arn parameter + + :return: a dict containing the credentials of the assumed role + """ + + iam_role_arn = self.iam_role_arn + + try: + sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials) + sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_ec2_dynamic_inventory') + return dict( + aws_access_key_id=sts_session['Credentials']['AccessKeyId'], + aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'], + aws_session_token=sts_session['Credentials']['SessionToken'] + ) + except botocore.exceptions.ClientError as e: + raise AnsibleError("Unable to assume IAM role: %s" % to_native(e)) + + def _boto3_conn(self, regions): + ''' + :param regions: A list of regions to create a boto3 client + + Generator that yields a boto3 client and the region + ''' + + credentials = self._get_credentials() + iam_role_arn = self.iam_role_arn + + if not regions: + try: + # as per https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-regions-avail-zones.html + client = self._get_connection(credentials) + resp = client.describe_regions() + regions = [x['RegionName'] for x in resp.get('Regions', [])] + except botocore.exceptions.NoRegionError: + # above seems to fail depending on boto3 version, ignore and lets try something else + pass + except is_boto3_error_code('UnauthorizedOperation') as e: # pylint: disable=duplicate-except + if iam_role_arn is not None: + try: + # Describe regions assuming arn role + assumed_credentials = self._boto3_assume_role(credentials) + client = self._get_connection(assumed_credentials) + resp = client.describe_regions() + regions = [x['RegionName'] for x in resp.get('Regions', [])] + except botocore.exceptions.NoRegionError: + # above seems to fail depending on boto3 version, ignore and lets try something else + pass + else: + raise AnsibleError("Unauthorized operation: %s" % to_native(e)) + + # fallback to local list hardcoded in boto3 if still no regions + if not regions: + session = boto3.Session() + regions = session.get_available_regions('ec2') + + # I give up, now you MUST give me regions + if not regions: + raise AnsibleError('Unable to get regions list from available methods, you must specify the "regions" option to continue.') + + for region in regions: + connection = self._get_connection(credentials, region) + try: + if iam_role_arn is not None: + assumed_credentials = self._boto3_assume_role(credentials, region) + else: + assumed_credentials = credentials + connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **assumed_credentials) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: + if self.boto_profile: + try: + connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: + raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) + else: + raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) + yield connection, region + + def _get_instances_by_region(self, regions, filters, strict_permissions): + ''' + :param regions: a list of regions in which to describe instances + :param filters: a list of boto3 filter dictionaries + :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes + :return A list of instance dictionaries + ''' + all_instances = [] + + for connection, _region in self._boto3_conn(regions): + try: + # By default find non-terminated/terminating instances + if not any(f['Name'] == 'instance-state-name' for f in filters): + filters.append({'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']}) + paginator = connection.get_paginator('describe_instances') + reservations = paginator.paginate(Filters=filters).build_full_result().get('Reservations') + instances = [] + for r in reservations: + new_instances = r['Instances'] + for instance in new_instances: + instance.update(self._get_reservation_details(r)) + instances.extend(new_instances) + except botocore.exceptions.ClientError as e: + if e.response['ResponseMetadata']['HTTPStatusCode'] == 403 and not strict_permissions: + instances = [] + else: + raise AnsibleError("Failed to describe instances: %s" % to_native(e)) + except botocore.exceptions.BotoCoreError as e: + raise AnsibleError("Failed to describe instances: %s" % to_native(e)) + + all_instances.extend(instances) + + return all_instances + + def _get_reservation_details(self, reservation): + return { + 'OwnerId': reservation['OwnerId'], + 'RequesterId': reservation.get('RequesterId', ''), + 'ReservationId': reservation['ReservationId'] + } + + @classmethod + def _get_tag_hostname(cls, preference, instance): + tag_hostnames = preference.split('tag:', 1)[1] + if ',' in tag_hostnames: + tag_hostnames = tag_hostnames.split(',') + else: + tag_hostnames = [tag_hostnames] + + tags = boto3_tag_list_to_ansible_dict(instance.get('Tags', [])) + tag_values = [] + for v in tag_hostnames: + if '=' in v: + tag_name, tag_value = v.split('=') + if tags.get(tag_name) == tag_value: + tag_values.append(to_text(tag_name) + "_" + to_text(tag_value)) + else: + tag_value = tags.get(v) + if tag_value: + tag_values.append(to_text(tag_value)) + return tag_values + + def _sanitize_hostname(self, hostname): + if ':' in to_text(hostname): + return self._sanitize_group_name(to_text(hostname)) + else: + return to_text(hostname) + + def _get_preferred_hostname(self, instance, hostnames): + ''' + :param instance: an instance dict returned by boto3 ec2 describe_instances() + :param hostnames: a list of hostname destination variables in order of preference + :return the preferred identifer for the host + ''' + if not hostnames: + hostnames = ['dns-name', 'private-dns-name'] + + hostname = None + for preference in hostnames: + if isinstance(preference, dict): + if 'name' not in preference: + raise AnsibleError("A 'name' key must be defined in a hostnames dictionary.") + hostname = self._get_preferred_hostname(instance, [preference["name"]]) + hostname_from_prefix = self._get_preferred_hostname(instance, [preference["prefix"]]) + separator = preference.get("separator", "_") + if hostname and hostname_from_prefix and 'prefix' in preference: + hostname = hostname_from_prefix + separator + hostname + elif preference.startswith('tag:'): + tags = self._get_tag_hostname(preference, instance) + hostname = tags[0] if tags else None + else: + hostname = self._get_boto_attr_chain(preference, instance) + if hostname: + break + if hostname: + return self._sanitize_hostname(hostname) + + def get_all_hostnames(self, instance, hostnames): + ''' + :param instance: an instance dict returned by boto3 ec2 describe_instances() + :param hostnames: a list of hostname destination variables + :return all the candidats matching the expectation + ''' + if not hostnames: + hostnames = ['dns-name', 'private-dns-name'] + + hostname = None + hostname_list = [] + for preference in hostnames: + if isinstance(preference, dict): + if 'name' not in preference: + raise AnsibleError("A 'name' key must be defined in a hostnames dictionary.") + hostname = self.get_all_hostnames(instance, [preference["name"]]) + hostname_from_prefix = self.get_all_hostnames(instance, [preference["prefix"]]) + separator = preference.get("separator", "_") + if hostname and hostname_from_prefix and 'prefix' in preference: + hostname = hostname_from_prefix[0] + separator + hostname[0] + elif preference.startswith('tag:'): + hostname = self._get_tag_hostname(preference, instance) + else: + hostname = self._get_boto_attr_chain(preference, instance) + + if hostname: + if isinstance(hostname, list): + for host in hostname: + hostname_list.append(self._sanitize_hostname(host)) + elif isinstance(hostname, str): + hostname_list.append(self._sanitize_hostname(hostname)) + + return hostname_list + + def _query(self, regions, include_filters, exclude_filters, strict_permissions): + ''' + :param regions: a list of regions to query + :param include_filters: a list of boto3 filter dictionaries + :param exclude_filters: a list of boto3 filter dictionaries + :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes + + ''' + instances = [] + ids_to_ignore = [] + for filter in exclude_filters: + for i in self._get_instances_by_region( + regions, + ansible_dict_to_boto3_filter_list(filter), + strict_permissions): + ids_to_ignore.append(i['InstanceId']) + for filter in include_filters: + for i in self._get_instances_by_region( + regions, + ansible_dict_to_boto3_filter_list(filter), + strict_permissions): + if i['InstanceId'] not in ids_to_ignore: + instances.append(i) + ids_to_ignore.append(i['InstanceId']) + + instances = sorted(instances, key=lambda x: x['InstanceId']) + + return {'aws_ec2': instances} + + def _populate(self, groups, hostnames, allow_duplicated_hosts=False, + hostvars_prefix=None, hostvars_suffix=None, + use_contrib_script_compatible_ec2_tag_keys=False): + for group in groups: + group = self.inventory.add_group(group) + self._add_hosts( + hosts=groups[group], + group=group, + hostnames=hostnames, + allow_duplicated_hosts=allow_duplicated_hosts, + hostvars_prefix=hostvars_prefix, + hostvars_suffix=hostvars_suffix, + use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys) + self.inventory.add_child('all', group) + + @classmethod + def prepare_host_vars(cls, original_host_vars, hostvars_prefix=None, hostvars_suffix=None, + use_contrib_script_compatible_ec2_tag_keys=False): + host_vars = camel_dict_to_snake_dict(original_host_vars, ignore_list=['Tags']) + host_vars['tags'] = boto3_tag_list_to_ansible_dict(original_host_vars.get('Tags', [])) + + # Allow easier grouping by region + host_vars['placement']['region'] = host_vars['placement']['availability_zone'][:-1] + + if use_contrib_script_compatible_ec2_tag_keys: + for k, v in host_vars['tags'].items(): + host_vars["ec2_tag_%s" % k] = v + + if hostvars_prefix or hostvars_suffix: + for hostvar, hostval in host_vars.copy().items(): + del host_vars[hostvar] + if hostvars_prefix: + hostvar = hostvars_prefix + hostvar + if hostvars_suffix: + hostvar = hostvar + hostvars_suffix + host_vars[hostvar] = hostval + + return host_vars + + def iter_entry(self, hosts, hostnames, allow_duplicated_hosts=False, hostvars_prefix=None, + hostvars_suffix=None, use_contrib_script_compatible_ec2_tag_keys=False): + for host in hosts: + if allow_duplicated_hosts: + hostname_list = self.get_all_hostnames(host, hostnames) + else: + hostname_list = [self._get_preferred_hostname(host, hostnames)] + if not hostname_list or hostname_list[0] is None: + continue + + host_vars = self.prepare_host_vars( + host, + hostvars_prefix, + hostvars_suffix, + use_contrib_script_compatible_ec2_tag_keys) + for name in hostname_list: + yield to_text(name), host_vars + + def _add_hosts(self, hosts, group, hostnames, allow_duplicated_hosts=False, + hostvars_prefix=None, hostvars_suffix=None, use_contrib_script_compatible_ec2_tag_keys=False): + ''' + :param hosts: a list of hosts to be added to a group + :param group: the name of the group to which the hosts belong + :param hostnames: a list of hostname destination variables in order of preference + :param bool allow_duplicated_hosts: if true, accept same host with different names + :param str hostvars_prefix: starts the hostvars variable name with this prefix + :param str hostvars_suffix: ends the hostvars variable name with this suffix + :param bool use_contrib_script_compatible_ec2_tag_keys: transform the host name with the legacy naming system + ''' + + for name, host_vars in self.iter_entry( + hosts, hostnames, + allow_duplicated_hosts=allow_duplicated_hosts, + hostvars_prefix=hostvars_prefix, + hostvars_suffix=hostvars_suffix, + use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys): + self.inventory.add_host(name, group=group) + for k, v in host_vars.items(): + self.inventory.set_variable(name, k, v) + + # Use constructed if applicable + + strict = self.get_option('strict') + + # Composed variables + self._set_composite_vars(self.get_option('compose'), host_vars, name, strict=strict) + + # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group + self._add_host_to_composed_groups(self.get_option('groups'), host_vars, name, strict=strict) + + # Create groups based on variable values and add the corresponding hosts to it + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_vars, name, strict=strict) + + def _set_credentials(self, loader): + ''' + :param config_data: contents of the inventory config file + ''' + + t = Templar(loader=loader) + credentials = {} + + for credential_type in ['aws_profile', 'aws_access_key', 'aws_secret_key', 'aws_security_token', 'iam_role_arn']: + if t.is_template(self.get_option(credential_type)): + credentials[credential_type] = t.template(variable=self.get_option(credential_type), disable_lookups=False) + else: + credentials[credential_type] = self.get_option(credential_type) + + self.boto_profile = credentials['aws_profile'] + self.aws_access_key_id = credentials['aws_access_key'] + self.aws_secret_access_key = credentials['aws_secret_key'] + self.aws_security_token = credentials['aws_security_token'] + self.iam_role_arn = credentials['iam_role_arn'] + + if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key): + session = botocore.session.get_session() + try: + credentials = session.get_credentials().get_frozen_credentials() + except AttributeError: + pass + else: + self.aws_access_key_id = credentials.access_key + self.aws_secret_access_key = credentials.secret_key + self.aws_security_token = credentials.token + + if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key): + raise AnsibleError("Insufficient boto credentials found. Please provide them in your " + "inventory configuration file or set them as environment variables.") + + def verify_file(self, path): + ''' + :param loader: an ansible.parsing.dataloader.DataLoader object + :param path: the path to the inventory config file + :return the contents of the config file + ''' + if super(InventoryModule, self).verify_file(path): + if path.endswith(('aws_ec2.yml', 'aws_ec2.yaml')): + return True + self.display.debug("aws_ec2 inventory filename must end with 'aws_ec2.yml' or 'aws_ec2.yaml'") + return False + + def build_include_filters(self): + if self.get_option('filters'): + return [self.get_option('filters')] + self.get_option('include_filters') + elif self.get_option('include_filters'): + return self.get_option('include_filters') + else: # no filter + return [{}] + + def parse(self, inventory, loader, path, cache=True): + + super(InventoryModule, self).parse(inventory, loader, path) + + if not HAS_BOTO3: + raise AnsibleError(missing_required_lib('botocore and boto3')) + + self._read_config_data(path) + + if self.get_option('use_contrib_script_compatible_sanitization'): + self._sanitize_group_name = self._legacy_script_compatible_group_sanitization + + self._set_credentials(loader) + + # get user specifications + regions = self.get_option('regions') + include_filters = self.build_include_filters() + exclude_filters = self.get_option('exclude_filters') + hostnames = self.get_option('hostnames') + strict_permissions = self.get_option('strict_permissions') + allow_duplicated_hosts = self.get_option('allow_duplicated_hosts') + + hostvars_prefix = self.get_option("hostvars_prefix") + hostvars_suffix = self.get_option("hostvars_suffix") + use_contrib_script_compatible_ec2_tag_keys = self.get_option('use_contrib_script_compatible_ec2_tag_keys') + + cache_key = self.get_cache_key(path) + # false when refresh_cache or --flush-cache is used + if cache: + # get the user-specified directive + cache = self.get_option('cache') + + if self.get_option('include_extra_api_calls'): + self.display.deprecate( + "The include_extra_api_calls option has been deprecated " + " and will be removed in release 6.0.0.", + date='2024-09-01', collection_name='amazon.aws') + + # Generate inventory + cache_needs_update = False + if cache: + try: + results = self._cache[cache_key] + except KeyError: + # if cache expires or cache file doesn't exist + cache_needs_update = True + + if not cache or cache_needs_update: + results = self._query(regions, include_filters, exclude_filters, strict_permissions) + + self._populate( + results, + hostnames, + allow_duplicated_hosts=allow_duplicated_hosts, + hostvars_prefix=hostvars_prefix, + hostvars_suffix=hostvars_suffix, + use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys) + + # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used + # when the user is using caching, update the cached inventory + if cache_needs_update or (not cache and self.get_option('cache')): + self._cache[cache_key] = results + + @staticmethod + def _legacy_script_compatible_group_sanitization(name): + + # note that while this mirrors what the script used to do, it has many issues with unicode and usability in python + regex = re.compile(r"[^A-Za-z0-9\_\-]") + + return regex.sub('_', name) diff --git a/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py b/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py new file mode 100644 index 000000000..02f86073a --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py @@ -0,0 +1,403 @@ +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +name: aws_rds +short_description: RDS instance inventory source +description: + - Get instances and clusters from Amazon Web Services RDS. + - Uses a YAML configuration file that ends with aws_rds.(yml|yaml). +options: + regions: + description: + - A list of regions in which to describe RDS instances and clusters. Available regions are listed here + U(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). + default: [] + filters: + description: + - A dictionary of filter value pairs. Available filters are listed here + U(https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-instances.html#options). If you filter by + db-cluster-id and I(include_clusters) is True it will apply to clusters as well. + default: {} + strict_permissions: + description: + - By default if an AccessDenied exception is encountered this plugin will fail. You can set strict_permissions to + False in the inventory config file which will allow the restrictions to be gracefully skipped. + type: bool + default: True + include_clusters: + description: Whether or not to query for Aurora clusters as well as instances. + type: bool + default: False + statuses: + description: A list of desired states for instances/clusters to be added to inventory. Set to ['all'] as a shorthand to find everything. + type: list + elements: str + default: + - creating + - available + iam_role_arn: + description: + - The ARN of the IAM role to assume to perform the inventory lookup. You should still provide + AWS credentials with enough privilege to perform the AssumeRole action. + hostvars_prefix: + description: + - The prefix for host variables names coming from AWS. + type: str + version_added: 3.1.0 + hostvars_suffix: + description: + - The suffix for host variables names coming from AWS. + type: str + version_added: 3.1.0 +notes: + - Ansible versions prior to 2.10 should use the fully qualified plugin name 'amazon.aws.aws_rds'. +extends_documentation_fragment: + - inventory_cache + - constructed + - amazon.aws.boto3 + - amazon.aws.aws_credentials +author: + - Sloane Hertel (@s-hertel) +''' + +EXAMPLES = ''' +plugin: aws_rds +regions: + - us-east-1 + - ca-central-1 +keyed_groups: + - key: 'db_parameter_groups|json_query("[].db_parameter_group_name")' + prefix: rds_parameter_group + - key: engine + prefix: rds + - key: tags + - key: region +hostvars_prefix: aws_ +hostvars_suffix: _rds +''' + +try: + import boto3 + import botocore +except ImportError: + pass # will be captured by imported HAS_BOTO3 + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import missing_required_lib +from ansible.plugins.inventory import BaseInventoryPlugin +from ansible.plugins.inventory import Cacheable +from ansible.plugins.inventory import Constructable + +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + + NAME = 'amazon.aws.aws_rds' + + def __init__(self): + super(InventoryModule, self).__init__() + self.credentials = {} + self.boto_profile = None + self.iam_role_arn = None + + def _get_connection(self, credentials, region='us-east-1'): + try: + connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **credentials) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: + if self.boto_profile: + try: + connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: + raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) + else: + raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) + return connection + + def _boto3_assume_role(self, credentials, region): + """ + Assume an IAM role passed by iam_role_arn parameter + :return: a dict containing the credentials of the assumed role + """ + + iam_role_arn = self.iam_role_arn + + try: + sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials) + sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_rds_dynamic_inventory') + return dict( + aws_access_key_id=sts_session['Credentials']['AccessKeyId'], + aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'], + aws_session_token=sts_session['Credentials']['SessionToken'] + ) + except botocore.exceptions.ClientError as e: + raise AnsibleError("Unable to assume IAM role: %s" % to_native(e)) + + def _boto3_conn(self, regions): + ''' + :param regions: A list of regions to create a boto3 client + + Generator that yields a boto3 client and the region + ''' + iam_role_arn = self.iam_role_arn + credentials = self.credentials + for region in regions: + try: + if iam_role_arn is not None: + assumed_credentials = self._boto3_assume_role(credentials, region) + else: + assumed_credentials = credentials + connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **assumed_credentials) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: + if self.boto_profile: + try: + connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: + raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) + else: + raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) + yield connection, region + + def _get_hosts_by_region(self, connection, filters, strict): + + def _add_tags_for_hosts(connection, hosts, strict): + for host in hosts: + if 'DBInstanceArn' in host: + resource_arn = host['DBInstanceArn'] + else: + resource_arn = host['DBClusterArn'] + + try: + tags = connection.list_tags_for_resource(ResourceName=resource_arn)['TagList'] + except is_boto3_error_code('AccessDenied') as e: + if not strict: + tags = [] + else: + raise e + host['Tags'] = tags + + def wrapper(f, *args, **kwargs): + try: + results = f(*args, **kwargs) + if 'DBInstances' in results: + results = results['DBInstances'] + else: + results = results['DBClusters'] + _add_tags_for_hosts(connection, results, strict) + except is_boto3_error_code('AccessDenied') as e: # pylint: disable=duplicate-except + if not strict: + results = [] + else: + raise AnsibleError("Failed to query RDS: {0}".format(to_native(e))) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + raise AnsibleError("Failed to query RDS: {0}".format(to_native(e))) + return results + return wrapper + + def _get_all_hosts(self, regions, instance_filters, cluster_filters, strict, statuses, gather_clusters=False): + ''' + :param regions: a list of regions in which to describe hosts + :param instance_filters: a list of boto3 filter dictionaries + :param cluster_filters: a list of boto3 filter dictionaries + :param strict: a boolean determining whether to fail or ignore 403 error codes + :param statuses: a list of statuses that the returned hosts should match + :return A list of host dictionaries + ''' + all_instances = [] + all_clusters = [] + for connection, _region in self._boto3_conn(regions): + paginator = connection.get_paginator('describe_db_instances') + all_instances.extend( + self._get_hosts_by_region(connection, instance_filters, strict) + (paginator.paginate(Filters=instance_filters).build_full_result) + ) + if gather_clusters: + all_clusters.extend( + self._get_hosts_by_region(connection, cluster_filters, strict) + (connection.describe_db_clusters, **{'Filters': cluster_filters}) + ) + sorted_hosts = list( + sorted(all_instances, key=lambda x: x['DBInstanceIdentifier']) + + sorted(all_clusters, key=lambda x: x['DBClusterIdentifier']) + ) + return self.find_hosts_with_valid_statuses(sorted_hosts, statuses) + + def find_hosts_with_valid_statuses(self, hosts, statuses): + if 'all' in statuses: + return hosts + valid_hosts = [] + for host in hosts: + if host.get('DBInstanceStatus') in statuses: + valid_hosts.append(host) + elif host.get('Status') in statuses: + valid_hosts.append(host) + return valid_hosts + + def _populate(self, hosts): + group = 'aws_rds' + self.inventory.add_group(group) + if hosts: + self._add_hosts(hosts=hosts, group=group) + self.inventory.add_child('all', group) + + def _populate_from_source(self, source_data): + hostvars = source_data.pop('_meta', {}).get('hostvars', {}) + for group in source_data: + if group == 'all': + continue + else: + self.inventory.add_group(group) + hosts = source_data[group].get('hosts', []) + for host in hosts: + self._populate_host_vars([host], hostvars.get(host, {}), group) + self.inventory.add_child('all', group) + + def _get_hostname(self, host): + if host.get('DBInstanceIdentifier'): + return host['DBInstanceIdentifier'] + else: + return host['DBClusterIdentifier'] + + def _format_inventory(self, hosts): + results = {'_meta': {'hostvars': {}}} + group = 'aws_rds' + results[group] = {'hosts': []} + for host in hosts: + hostname = self._get_hostname(host) + results[group]['hosts'].append(hostname) + h = self.inventory.get_host(hostname) + results['_meta']['hostvars'][h.name] = h.vars + return results + + def _add_hosts(self, hosts, group): + ''' + :param hosts: a list of hosts to be added to a group + :param group: the name of the group to which the hosts belong + ''' + for host in hosts: + hostname = self._get_hostname(host) + host = camel_dict_to_snake_dict(host, ignore_list=['Tags']) + host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', [])) + + # Allow easier grouping by region + if 'availability_zone' in host: + host['region'] = host['availability_zone'][:-1] + elif 'availability_zones' in host: + host['region'] = host['availability_zones'][0][:-1] + + self.inventory.add_host(hostname, group=group) + hostvars_prefix = self.get_option("hostvars_prefix") + hostvars_suffix = self.get_option("hostvars_suffix") + new_vars = dict() + for hostvar, hostval in host.items(): + if hostvars_prefix: + hostvar = hostvars_prefix + hostvar + if hostvars_suffix: + hostvar = hostvar + hostvars_suffix + new_vars[hostvar] = hostval + self.inventory.set_variable(hostname, hostvar, hostval) + host.update(new_vars) + + # Use constructed if applicable + strict = self.get_option('strict') + # Composed variables + self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict) + # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group + self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict) + # Create groups based on variable values and add the corresponding hosts to it + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict) + + def _set_credentials(self): + ''' + ''' + self.boto_profile = self.get_option('aws_profile') + aws_access_key_id = self.get_option('aws_access_key') + aws_secret_access_key = self.get_option('aws_secret_key') + aws_security_token = self.get_option('aws_security_token') + self.iam_role_arn = self.get_option('iam_role_arn') + + if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key): + session = botocore.session.get_session() + if session.get_credentials() is not None: + aws_access_key_id = session.get_credentials().access_key + aws_secret_access_key = session.get_credentials().secret_key + aws_security_token = session.get_credentials().token + + if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key): + raise AnsibleError("Insufficient boto credentials found. Please provide them in your " + "inventory configuration file or set them as environment variables.") + + if aws_access_key_id: + self.credentials['aws_access_key_id'] = aws_access_key_id + if aws_secret_access_key: + self.credentials['aws_secret_access_key'] = aws_secret_access_key + if aws_security_token: + self.credentials['aws_session_token'] = aws_security_token + + def verify_file(self, path): + ''' + :param loader: an ansible.parsing.dataloader.DataLoader object + :param path: the path to the inventory config file + :return the contents of the config file + ''' + if super(InventoryModule, self).verify_file(path): + if path.endswith(('aws_rds.yml', 'aws_rds.yaml')): + return True + return False + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + + if not HAS_BOTO3: + raise AnsibleError(missing_required_lib('botocore and boto3')) + + self._read_config_data(path) + self._set_credentials() + + # get user specifications + regions = self.get_option('regions') + filters = self.get_option('filters') + strict_permissions = self.get_option('strict_permissions') + statuses = self.get_option('statuses') + include_clusters = self.get_option('include_clusters') + instance_filters = ansible_dict_to_boto3_filter_list(filters) + cluster_filters = [] + if 'db-cluster-id' in filters and include_clusters: + cluster_filters = ansible_dict_to_boto3_filter_list({'db-cluster-id': filters['db-cluster-id']}) + + cache_key = self.get_cache_key(path) + # false when refresh_cache or --flush-cache is used + if cache: + # get the user-specified directive + cache = self.get_option('cache') + + # Generate inventory + formatted_inventory = {} + cache_needs_update = False + if cache: + try: + results = self._cache[cache_key] + except KeyError: + # if cache expires or cache file doesn't exist + cache_needs_update = True + else: + self._populate_from_source(results) + + if not cache or cache_needs_update: + results = self._get_all_hosts(regions, instance_filters, cluster_filters, strict_permissions, statuses, include_clusters) + self._populate(results) + formatted_inventory = self._format_inventory(results) + + # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used + # when the user is using caching, update the cached inventory + if cache_needs_update or (not cache and self.get_option('cache')): + self._cache[cache_key] = formatted_inventory diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py b/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py new file mode 100644 index 000000000..415b76d75 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py @@ -0,0 +1,136 @@ +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +name: aws_account_attribute +author: + - Sloane Hertel (@s-hertel) +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.aws_credentials + - amazon.aws.aws_region +short_description: Look up AWS account attributes +description: + - Describes attributes of your AWS account. You can specify one of the listed + attribute choices or omit it to see all attributes. +options: + attribute: + description: The attribute for which to get the value(s). + choices: + - supported-platforms + - default-vpc + - max-instances + - vpc-max-security-groups-per-interface + - max-elastic-ips + - vpc-max-elastic-ips + - has-ec2-classic +''' + +EXAMPLES = """ +vars: + has_ec2_classic: "{{ lookup('aws_account_attribute', attribute='has-ec2-classic') }}" + # true | false + + default_vpc_id: "{{ lookup('aws_account_attribute', attribute='default-vpc') }}" + # vpc-xxxxxxxx | none + + account_details: "{{ lookup('aws_account_attribute', wantlist='true') }}" + # {'default-vpc': ['vpc-xxxxxxxx'], 'max-elastic-ips': ['5'], 'max-instances': ['20'], + # 'supported-platforms': ['VPC', 'EC2'], 'vpc-max-elastic-ips': ['5'], 'vpc-max-security-groups-per-interface': ['5']} + +""" + +RETURN = """ +_raw: + description: + Returns a boolean when I(attribute) is check_ec2_classic. Otherwise returns the value(s) of the attribute + (or all attributes if one is not specified). +""" + +try: + import boto3 + import botocore +except ImportError: + pass # will be captured by imported HAS_BOTO3 + +from ansible.errors import AnsibleLookupError +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import missing_required_lib +from ansible.plugins.lookup import LookupBase + +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 + + +def _boto3_conn(region, credentials): + boto_profile = credentials.pop('aws_profile', None) + + try: + connection = boto3.session.Session(profile_name=boto_profile).client('ec2', region, **credentials) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError): + if boto_profile: + try: + connection = boto3.session.Session(profile_name=boto_profile).client('ec2', region) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError): + raise AnsibleLookupError("Insufficient credentials found.") + else: + raise AnsibleLookupError("Insufficient credentials found.") + return connection + + +def _get_credentials(options): + credentials = {} + credentials['aws_profile'] = options['aws_profile'] + credentials['aws_secret_access_key'] = options['aws_secret_key'] + credentials['aws_access_key_id'] = options['aws_access_key'] + if options['aws_security_token']: + credentials['aws_session_token'] = options['aws_security_token'] + + return credentials + + +@AWSRetry.jittered_backoff(retries=10) +def _describe_account_attributes(client, **params): + return client.describe_account_attributes(**params) + + +class LookupModule(LookupBase): + def run(self, terms, variables, **kwargs): + + if not HAS_BOTO3: + raise AnsibleLookupError(missing_required_lib('botocore and boto3')) + + self.set_options(var_options=variables, direct=kwargs) + boto_credentials = _get_credentials(self._options) + + region = self._options['region'] + client = _boto3_conn(region, boto_credentials) + + attribute = kwargs.get('attribute') + params = {'AttributeNames': []} + check_ec2_classic = False + if 'has-ec2-classic' == attribute: + check_ec2_classic = True + params['AttributeNames'] = ['supported-platforms'] + elif attribute: + params['AttributeNames'] = [attribute] + + try: + response = _describe_account_attributes(client, **params)['AccountAttributes'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + raise AnsibleLookupError("Failed to describe account attributes: %s" % to_native(e)) + + if check_ec2_classic: + attr = response[0] + return any(value['AttributeValue'] == 'EC2' for value in attr['AttributeValues']) + + if attribute: + attr = response[0] + return [value['AttributeValue'] for value in attr['AttributeValues']] + + flattened = {} + for k_v_dict in response: + flattened[k_v_dict['AttributeName']] = [value['AttributeValue'] for value in k_v_dict['AttributeValues']] + return flattened diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_secret.py b/ansible_collections/amazon/aws/plugins/lookup/aws_secret.py new file mode 100644 index 000000000..0f694cfa0 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/lookup/aws_secret.py @@ -0,0 +1,295 @@ +# Copyright: (c) 2018, Aaron Smith +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +name: aws_secret +author: + - Aaron Smith (!UNKNOWN) +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.aws_credentials + - amazon.aws.aws_region + +short_description: Look up secrets stored in AWS Secrets Manager +description: + - Look up secrets stored in AWS Secrets Manager provided the caller + has the appropriate permissions to read the secret. + - Lookup is based on the secret's I(Name) value. + - Optional parameters can be passed into this lookup; I(version_id) and I(version_stage) +options: + _terms: + description: Name of the secret to look up in AWS Secrets Manager. + required: True + bypath: + description: A boolean to indicate whether the parameter is provided as a hierarchy. + default: false + type: boolean + version_added: 1.4.0 + nested: + description: A boolean to indicate the secret contains nested values. + type: boolean + default: false + version_added: 1.4.0 + version_id: + description: Version of the secret(s). + required: False + version_stage: + description: Stage of the secret version. + required: False + join: + description: + - Join two or more entries to form an extended secret. + - This is useful for overcoming the 4096 character limit imposed by AWS. + - No effect when used with I(bypath). + type: boolean + default: false + on_deleted: + description: + - Action to take if the secret has been marked for deletion. + - C(error) will raise a fatal error when the secret has been marked for deletion. + - C(skip) will silently ignore the deleted secret. + - C(warn) will skip over the deleted secret but issue a warning. + default: error + type: string + choices: ['error', 'skip', 'warn'] + version_added: 2.0.0 + on_missing: + description: + - Action to take if the secret is missing. + - C(error) will raise a fatal error when the secret is missing. + - C(skip) will silently ignore the missing secret. + - C(warn) will skip over the missing secret but issue a warning. + default: error + type: string + choices: ['error', 'skip', 'warn'] + on_denied: + description: + - Action to take if access to the secret is denied. + - C(error) will raise a fatal error when access to the secret is denied. + - C(skip) will silently ignore the denied secret. + - C(warn) will skip over the denied secret but issue a warning. + default: error + type: string + choices: ['error', 'skip', 'warn'] +''' + +EXAMPLES = r""" + - name: lookup secretsmanager secret in the current region + debug: msg="{{ lookup('amazon.aws.aws_secret', '/path/to/secrets', bypath=true) }}" + + - name: Create RDS instance with aws_secret lookup for password param + rds: + command: create + instance_name: app-db + db_engine: MySQL + size: 10 + instance_type: db.m1.small + username: dbadmin + password: "{{ lookup('amazon.aws.aws_secret', 'DbSecret') }}" + tags: + Environment: staging + + - name: skip if secret does not exist + debug: msg="{{ lookup('amazon.aws.aws_secret', 'secret-not-exist', on_missing='skip')}}" + + - name: warn if access to the secret is denied + debug: msg="{{ lookup('amazon.aws.aws_secret', 'secret-denied', on_denied='warn')}}" + + - name: lookup secretsmanager secret in the current region using the nested feature + debug: msg="{{ lookup('amazon.aws.aws_secret', 'secrets.environments.production.password', nested=true) }}" + # The secret can be queried using the following syntax: `aws_secret_object_name.key1.key2.key3`. + # If an object is of the form `{"key1":{"key2":{"key3":1}}}` the query would return the value `1`. + - name: lookup secretsmanager secret in a specific region using specified region and aws profile using nested feature + debug: > + msg="{{ lookup('amazon.aws.aws_secret', 'secrets.environments.production.password', region=region, aws_profile=aws_profile, + aws_access_key=aws_access_key, aws_secret_key=aws_secret_key, nested=true) }}" + # The secret can be queried using the following syntax: `aws_secret_object_name.key1.key2.key3`. + # If an object is of the form `{"key1":{"key2":{"key3":1}}}` the query would return the value `1`. + # Region is the AWS region where the AWS secret is stored. + # AWS_profile is the aws profile to use, that has access to the AWS secret. +""" + +RETURN = r""" +_raw: + description: + Returns the value of the secret stored in AWS Secrets Manager. +""" + +import json + +try: + import boto3 + import botocore +except ImportError: + pass # will be captured by imported HAS_BOTO3 + +from ansible.errors import AnsibleLookupError +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import missing_required_lib +from ansible.plugins.lookup import LookupBase + +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 + + +def _boto3_conn(region, credentials): + boto_profile = credentials.pop('aws_profile', None) + + try: + connection = boto3.session.Session(profile_name=boto_profile).client('secretsmanager', region, **credentials) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError): + if boto_profile: + try: + connection = boto3.session.Session(profile_name=boto_profile).client('secretsmanager', region) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError): + raise AnsibleLookupError("Insufficient credentials found.") + else: + raise AnsibleLookupError("Insufficient credentials found.") + return connection + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, boto_profile=None, aws_profile=None, + aws_secret_key=None, aws_access_key=None, aws_security_token=None, region=None, + bypath=False, nested=False, join=False, version_stage=None, version_id=None, on_missing='error', + on_denied='error', on_deleted='error'): + ''' + :arg terms: a list of lookups to run. + e.g. ['parameter_name', 'parameter_name_too' ] + :kwarg variables: ansible variables active at the time of the lookup + :kwarg aws_secret_key: identity of the AWS key to use + :kwarg aws_access_key: AWS secret key (matching identity) + :kwarg aws_security_token: AWS session key if using STS + :kwarg decrypt: Set to True to get decrypted parameters + :kwarg region: AWS region in which to do the lookup + :kwarg bypath: Set to True to do a lookup of variables under a path + :kwarg nested: Set to True to do a lookup of nested secrets + :kwarg join: Join two or more entries to form an extended secret + :kwarg version_stage: Stage of the secret version + :kwarg version_id: Version of the secret(s) + :kwarg on_missing: Action to take if the secret is missing + :kwarg on_deleted: Action to take if the secret is marked for deletion + :kwarg on_denied: Action to take if access to the secret is denied + :returns: A list of parameter values or a list of dictionaries if bypath=True. + ''' + if not HAS_BOTO3: + raise AnsibleLookupError(missing_required_lib('botocore and boto3')) + + deleted = on_deleted.lower() + if not isinstance(deleted, string_types) or deleted not in ['error', 'warn', 'skip']: + raise AnsibleLookupError('"on_deleted" must be a string and one of "error", "warn" or "skip", not %s' % deleted) + + missing = on_missing.lower() + if not isinstance(missing, string_types) or missing not in ['error', 'warn', 'skip']: + raise AnsibleLookupError('"on_missing" must be a string and one of "error", "warn" or "skip", not %s' % missing) + + denied = on_denied.lower() + if not isinstance(denied, string_types) or denied not in ['error', 'warn', 'skip']: + raise AnsibleLookupError('"on_denied" must be a string and one of "error", "warn" or "skip", not %s' % denied) + + credentials = {} + if aws_profile: + credentials['aws_profile'] = aws_profile + else: + credentials['aws_profile'] = boto_profile + credentials['aws_secret_access_key'] = aws_secret_key + credentials['aws_access_key_id'] = aws_access_key + credentials['aws_session_token'] = aws_security_token + + # fallback to IAM role credentials + if not credentials['aws_profile'] and not ( + credentials['aws_access_key_id'] and credentials['aws_secret_access_key']): + session = botocore.session.get_session() + if session.get_credentials() is not None: + credentials['aws_access_key_id'] = session.get_credentials().access_key + credentials['aws_secret_access_key'] = session.get_credentials().secret_key + credentials['aws_session_token'] = session.get_credentials().token + + client = _boto3_conn(region, credentials) + + if bypath: + secrets = {} + for term in terms: + try: + paginator = client.get_paginator('list_secrets') + paginator_response = paginator.paginate( + Filters=[{'Key': 'name', 'Values': [term]}]) + for object in paginator_response: + if 'SecretList' in object: + for secret_obj in object['SecretList']: + secrets.update({secret_obj['Name']: self.get_secret_value( + secret_obj['Name'], client, on_missing=missing, on_denied=denied)}) + secrets = [secrets] + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + raise AnsibleLookupError("Failed to retrieve secret: %s" % to_native(e)) + else: + secrets = [] + for term in terms: + value = self.get_secret_value(term, client, + version_stage=version_stage, version_id=version_id, + on_missing=missing, on_denied=denied, on_deleted=deleted, + nested=nested) + if value: + secrets.append(value) + if join: + joined_secret = [] + joined_secret.append(''.join(secrets)) + return joined_secret + + return secrets + + def get_secret_value(self, term, client, version_stage=None, version_id=None, on_missing=None, on_denied=None, on_deleted=None, nested=False): + params = {} + params['SecretId'] = term + if version_id: + params['VersionId'] = version_id + if version_stage: + params['VersionStage'] = version_stage + if nested: + if len(term.split('.')) < 2: + raise AnsibleLookupError("Nested query must use the following syntax: `aws_secret_name..") + secret_name = term.split('.')[0] + params['SecretId'] = secret_name + + try: + response = client.get_secret_value(**params) + if 'SecretBinary' in response: + return response['SecretBinary'] + if 'SecretString' in response: + if nested: + query = term.split('.')[1:] + secret_string = json.loads(response['SecretString']) + ret_val = secret_string + for key in query: + if key in ret_val: + ret_val = ret_val[key] + else: + raise AnsibleLookupError("Successfully retrieved secret but there exists no key {0} in the secret".format(key)) + return str(ret_val) + else: + return response['SecretString'] + except is_boto3_error_message('marked for deletion'): + if on_deleted == 'error': + raise AnsibleLookupError("Failed to find secret %s (marked for deletion)" % term) + elif on_deleted == 'warn': + self._display.warning('Skipping, did not find secret (marked for deletion) %s' % term) + except is_boto3_error_code('ResourceNotFoundException'): # pylint: disable=duplicate-except + if on_missing == 'error': + raise AnsibleLookupError("Failed to find secret %s (ResourceNotFound)" % term) + elif on_missing == 'warn': + self._display.warning('Skipping, did not find secret %s' % term) + except is_boto3_error_code('AccessDeniedException'): # pylint: disable=duplicate-except + if on_denied == 'error': + raise AnsibleLookupError("Failed to access secret %s (AccessDenied)" % term) + elif on_denied == 'warn': + self._display.warning('Skipping, access denied for secret %s' % term) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + raise AnsibleLookupError("Failed to retrieve secret: %s" % to_native(e)) + + return None diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py b/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py new file mode 100644 index 000000000..251debf40 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py @@ -0,0 +1,90 @@ +# (c) 2016 James Turner +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +name: aws_service_ip_ranges +author: + - James Turner (!UNKNOWN) +requirements: + - must have public internet connectivity +short_description: Look up the IP ranges for services provided in AWS such as EC2 and S3. +description: + - AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking. + - This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service. +options: + service: + description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS' + region: + description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1' + ipv6_prefixes: + description: 'When I(ipv6_prefixes=True) the lookup will return ipv6 addresses instead of ipv4 addresses' + version_added: 2.1.0 +''' + +EXAMPLES = """ +vars: + ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}" +tasks: + +- name: "use list return option and iterate as a loop" + debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}" +# "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 " + +- name: "Pull S3 IP ranges, and print the default return style" + debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}" +# "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17" +""" + +RETURN = """ +_raw: + description: comma-separated list of CIDR ranges +""" + +import json + +from ansible.errors import AnsibleLookupError +from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.module_utils.six.moves.urllib.error import URLError +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import ConnectionError +from ansible.module_utils.urls import open_url +from ansible.module_utils.urls import SSLValidationError +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + def run(self, terms, variables, **kwargs): + if "ipv6_prefixes" in kwargs and kwargs["ipv6_prefixes"]: + prefixes_label = "ipv6_prefixes" + ip_prefix_label = "ipv6_prefix" + else: + prefixes_label = "prefixes" + ip_prefix_label = "ip_prefix" + + try: + resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json') + amazon_response = json.load(resp)[prefixes_label] + except getattr(json.decoder, 'JSONDecodeError', ValueError) as e: + # on Python 3+, json.decoder.JSONDecodeError is raised for bad + # JSON. On 2.x it's a ValueError + raise AnsibleLookupError("Could not decode AWS IP ranges: %s" % to_native(e)) + except HTTPError as e: + raise AnsibleLookupError("Received HTTP error while pulling IP ranges: %s" % to_native(e)) + except SSLValidationError as e: + raise AnsibleLookupError("Error validating the server's certificate for: %s" % to_native(e)) + except URLError as e: + raise AnsibleLookupError("Failed look up IP range service: %s" % to_native(e)) + except ConnectionError as e: + raise AnsibleLookupError("Error connecting to IP range service: %s" % to_native(e)) + + if 'region' in kwargs: + region = kwargs['region'] + amazon_response = (item for item in amazon_response if item['region'] == region) + if 'service' in kwargs: + service = str.upper(kwargs['service']) + amazon_response = (item for item in amazon_response if item['service'] == service) + iprange = [item[ip_prefix_label] for item in amazon_response] + return iprange diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py b/ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py new file mode 100644 index 000000000..e71808560 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py @@ -0,0 +1,286 @@ +# (c) 2016, Bill Wang +# (c) 2017, Marat Bakeev +# (c) 2018, Michael De La Rue +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +name: aws_ssm +author: + - Bill Wang (!UNKNOWN) + - Marat Bakeev (!UNKNOWN) + - Michael De La Rue (!UNKNOWN) +short_description: Get the value for a SSM parameter or all parameters under a path +description: + - Get the value for an Amazon Simple Systems Manager parameter or a hierarchy of parameters. + The first argument you pass the lookup can either be a parameter name or a hierarchy of + parameters. Hierarchies start with a forward slash and end with the parameter name. Up to + 5 layers may be specified. + - If looking up an explicitly listed parameter by name which does not exist then the lookup + will generate an error. You can use the ```default``` filter to give a default value in + this case but must set the ```on_missing``` parameter to ```skip``` or ```warn```. You must + also set the second parameter of the ```default``` filter to ```true``` (see examples below). + - When looking up a path for parameters under it a dictionary will be returned for each path. + If there is no parameter under that path then the lookup will generate an error. + - If the lookup fails due to lack of permissions or due to an AWS client error then the aws_ssm + will generate an error. If you want to continue in this case then you will have to set up + two ansible tasks, one which sets a variable and ignores failures and one which uses the value + of that variable with a default. See the examples below. + +options: + decrypt: + description: A boolean to indicate whether to decrypt the parameter. + default: true + type: boolean + bypath: + description: A boolean to indicate whether the parameter is provided as a hierarchy. + default: false + type: boolean + recursive: + description: A boolean to indicate whether to retrieve all parameters within a hierarchy. + default: false + type: boolean + shortnames: + description: Indicates whether to return the name only without path if using a parameter hierarchy. + default: false + type: boolean + on_missing: + description: + - Action to take if the SSM parameter is missing. + - C(error) will raise a fatal error when the SSM parameter is missing. + - C(skip) will silently ignore the missing SSM parameter. + - C(warn) will skip over the missing SSM parameter but issue a warning. + default: error + type: string + choices: ['error', 'skip', 'warn'] + version_added: 2.0.0 + on_denied: + description: + - Action to take if access to the SSM parameter is denied. + - C(error) will raise a fatal error when access to the SSM parameter is denied. + - C(skip) will silently ignore the denied SSM parameter. + - C(warn) will skip over the denied SSM parameter but issue a warning. + default: error + type: string + choices: ['error', 'skip', 'warn'] + version_added: 2.0.0 + endpoint: + description: Use a custom endpoint when connecting to SSM service. + type: string + version_added: 3.3.0 +extends_documentation_fragment: + - amazon.aws.boto3 +''' + +EXAMPLES = ''' +# lookup sample: +- name: lookup ssm parameter store in the current region + debug: msg="{{ lookup('aws_ssm', 'Hello' ) }}" + +- name: lookup ssm parameter store in specified region + debug: msg="{{ lookup('aws_ssm', 'Hello', region='us-east-2' ) }}" + +- name: lookup ssm parameter store without decryption + debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=False ) }}" + +- name: lookup ssm parameter store using a specified aws profile + debug: msg="{{ lookup('aws_ssm', 'Hello', aws_profile='myprofile' ) }}" + +- name: lookup ssm parameter store using explicit aws credentials + debug: msg="{{ lookup('aws_ssm', 'Hello', aws_access_key=my_aws_access_key, aws_secret_key=my_aws_secret_key, aws_security_token=my_security_token ) }}" + +- name: lookup ssm parameter store with all options + debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=false, region='us-east-2', aws_profile='myprofile') }}" + +- name: lookup ssm parameter and fail if missing + debug: msg="{{ lookup('aws_ssm', 'missing-parameter') }}" + +- name: lookup a key which doesn't exist, returning a default ('root') + debug: msg="{{ lookup('aws_ssm', 'AdminID', on_missing="skip") | default('root', true) }}" + +- name: lookup a key which doesn't exist failing to store it in a fact + set_fact: + temp_secret: "{{ lookup('aws_ssm', '/NoAccess/hiddensecret') }}" + ignore_errors: true + +- name: show fact default to "access failed" if we don't have access + debug: msg="{{ 'the secret was:' ~ temp_secret | default('could not access secret') }}" + +- name: return a dictionary of ssm parameters from a hierarchy path + debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', bypath=true, recursive=true ) }}" + +- name: return a dictionary of ssm parameters from a hierarchy path with shortened names (param instead of /PATH/to/param) + debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', shortnames=true, bypath=true, recursive=true ) }}" + +- name: Iterate over a parameter hierarchy (one iteration per parameter) + debug: msg='Key contains {{ item.key }} , with value {{ item.value }}' + loop: '{{ lookup("aws_ssm", "/demo/", region="ap-southeast-2", bypath=True) | dict2items }}' + +- name: Iterate over multiple paths as dictionaries (one iteration per path) + debug: msg='Path contains {{ item }}' + loop: '{{ lookup("aws_ssm", "/demo/", "/demo1/", bypath=True)}}' + +- name: lookup ssm parameter warn if access is denied + debug: msg="{{ lookup('aws_ssm', 'missing-parameter', on_denied="warn" ) }}" +''' + +try: + import botocore +except ImportError: + pass # will be captured by imported HAS_BOTO3 + +from ansible.errors import AnsibleLookupError +from ansible.module_utils._text import to_native +from ansible.plugins.lookup import LookupBase +from ansible.utils.display import Display +from ansible.module_utils.six import string_types +from ansible.module_utils.basic import missing_required_lib + +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + +display = Display() + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, boto_profile=None, aws_profile=None, + aws_secret_key=None, aws_access_key=None, aws_security_token=None, region=None, + bypath=False, shortnames=False, recursive=False, decrypt=True, on_missing="error", + on_denied="error", endpoint=None): + ''' + :arg terms: a list of lookups to run. + e.g. ['parameter_name', 'parameter_name_too' ] + :kwarg variables: ansible variables active at the time of the lookup + :kwarg aws_secret_key: identity of the AWS key to use + :kwarg aws_access_key: AWS secret key (matching identity) + :kwarg aws_security_token: AWS session key if using STS + :kwarg decrypt: Set to True to get decrypted parameters + :kwarg region: AWS region in which to do the lookup + :kwarg bypath: Set to True to do a lookup of variables under a path + :kwarg recursive: Set to True to recurse below the path (requires bypath=True) + :kwarg on_missing: Action to take if the SSM parameter is missing + :kwarg on_denied: Action to take if access to the SSM parameter is denied + :kwarg endpoint: Endpoint for SSM client + :returns: A list of parameter values or a list of dictionaries if bypath=True. + ''' + + if not HAS_BOTO3: + raise AnsibleLookupError(missing_required_lib('botocore and boto3')) + + # validate arguments 'on_missing' and 'on_denied' + if on_missing is not None and (not isinstance(on_missing, string_types) or on_missing.lower() not in ['error', 'warn', 'skip']): + raise AnsibleLookupError('"on_missing" must be a string and one of "error", "warn" or "skip", not %s' % on_missing) + if on_denied is not None and (not isinstance(on_denied, string_types) or on_denied.lower() not in ['error', 'warn', 'skip']): + raise AnsibleLookupError('"on_denied" must be a string and one of "error", "warn" or "skip", not %s' % on_denied) + + ret = [] + ssm_dict = {} + + self.params = variables + + cli_region, cli_endpoint, cli_boto_params = get_aws_connection_info(self, boto3=True) + + if region: + cli_region = region + + if endpoint: + cli_endpoint = endpoint + + # For backward compatibility + if aws_access_key: + cli_boto_params.update({'aws_access_key_id': aws_access_key}) + if aws_secret_key: + cli_boto_params.update({'aws_secret_access_key': aws_secret_key}) + if aws_security_token: + cli_boto_params.update({'aws_session_token': aws_security_token}) + if boto_profile: + cli_boto_params.update({'profile_name': boto_profile}) + if aws_profile: + cli_boto_params.update({'profile_name': aws_profile}) + + cli_boto_params.update(dict( + conn_type='client', + resource='ssm', + region=cli_region, + endpoint=cli_endpoint, + )) + + client = boto3_conn(module=self, **cli_boto_params) + + ssm_dict['WithDecryption'] = decrypt + + # Lookup by path + if bypath: + ssm_dict['Recursive'] = recursive + for term in terms: + display.vvv("AWS_ssm path lookup term: %s in region: %s" % (term, region)) + + paramlist = self.get_path_parameters(client, ssm_dict, term, on_missing.lower(), on_denied.lower()) + # Shorten parameter names. Yes, this will return + # duplicate names with different values. + if shortnames: + for x in paramlist: + x['Name'] = x['Name'][x['Name'].rfind('/') + 1:] + + display.vvvv("AWS_ssm path lookup returned: %s" % str(paramlist)) + + ret.append(boto3_tag_list_to_ansible_dict(paramlist, + tag_name_key_name="Name", + tag_value_key_name="Value")) + # Lookup by parameter name - always returns a list with one or + # no entry. + else: + display.vvv("AWS_ssm name lookup term: %s" % terms) + for term in terms: + ret.append(self.get_parameter_value(client, ssm_dict, term, on_missing.lower(), on_denied.lower())) + display.vvvv("AWS_ssm path lookup returning: %s " % str(ret)) + return ret + + def get_path_parameters(self, client, ssm_dict, term, on_missing, on_denied): + ssm_dict["Path"] = term + paginator = client.get_paginator('get_parameters_by_path') + try: + paramlist = paginator.paginate(**ssm_dict).build_full_result()['Parameters'] + except is_boto3_error_code('AccessDeniedException'): + if on_denied == 'error': + raise AnsibleLookupError("Failed to access SSM parameter path %s (AccessDenied)" % term) + elif on_denied == 'warn': + self._display.warning('Skipping, access denied for SSM parameter path %s' % term) + paramlist = [{}] + elif on_denied == 'skip': + paramlist = [{}] + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + raise AnsibleLookupError("SSM lookup exception: {0}".format(to_native(e))) + + if not len(paramlist): + if on_missing == "error": + raise AnsibleLookupError("Failed to find SSM parameter path %s (ResourceNotFound)" % term) + elif on_missing == "warn": + self._display.warning('Skipping, did not find SSM parameter path %s' % term) + + return paramlist + + def get_parameter_value(self, client, ssm_dict, term, on_missing, on_denied): + ssm_dict["Name"] = term + try: + response = client.get_parameter(**ssm_dict) + return response['Parameter']['Value'] + except is_boto3_error_code('ParameterNotFound'): + if on_missing == 'error': + raise AnsibleLookupError("Failed to find SSM parameter %s (ResourceNotFound)" % term) + elif on_missing == 'warn': + self._display.warning('Skipping, did not find SSM parameter %s' % term) + except is_boto3_error_code('AccessDeniedException'): # pylint: disable=duplicate-except + if on_denied == 'error': + raise AnsibleLookupError("Failed to access SSM parameter %s (AccessDenied)" % term) + elif on_denied == 'warn': + self._display.warning('Skipping, access denied for SSM parameter %s' % term) + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + raise AnsibleLookupError("SSM lookup exception: {0}".format(to_native(e))) + return None diff --git a/ansible_collections/amazon/aws/plugins/module_utils/_version.py b/ansible_collections/amazon/aws/plugins/module_utils/_version.py new file mode 100644 index 000000000..d91cf3ab4 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/_version.py @@ -0,0 +1,344 @@ +# Vendored copy of distutils/version.py from CPython 3.9.5 +# +# Implements multiple version numbering conventions for the +# Python Module Distribution Utilities. +# +# PSF License (see PSF-license.txt or https://opensource.org/licenses/Python-2.0) +# + +"""Provides classes to represent module version numbers (one class for +each style of version numbering). There are currently two such classes +implemented: StrictVersion and LooseVersion. + +Every version number class implements the following interface: + * the 'parse' method takes a string and parses it to some internal + representation; if the string is an invalid version number, + 'parse' raises a ValueError exception + * the class constructor takes an optional string argument which, + if supplied, is passed to 'parse' + * __str__ reconstructs the string that was passed to 'parse' (or + an equivalent string -- ie. one that will generate an equivalent + version number instance) + * __repr__ generates Python code to recreate the version number instance + * _cmp compares the current instance with either another instance + of the same class or a string (which will be parsed to an instance + of the same class, thus must follow the same rules) +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import re + +try: + RE_FLAGS = re.VERBOSE | re.ASCII +except AttributeError: + RE_FLAGS = re.VERBOSE + + +class Version: + """Abstract base class for version numbering classes. Just provides + constructor (__init__) and reproducer (__repr__), because those + seem to be the same for all version numbering classes; and route + rich comparisons to _cmp. + """ + + def __init__(self, vstring=None): + if vstring: + self.parse(vstring) + + def __repr__(self): + return "%s ('%s')" % (self.__class__.__name__, str(self)) + + def __eq__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c == 0 + + def __lt__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c < 0 + + def __le__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c <= 0 + + def __gt__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c > 0 + + def __ge__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c >= 0 + + +# Interface for version-number classes -- must be implemented +# by the following classes (the concrete ones -- Version should +# be treated as an abstract class). +# __init__ (string) - create and take same action as 'parse' +# (string parameter is optional) +# parse (string) - convert a string representation to whatever +# internal representation is appropriate for +# this style of version numbering +# __str__ (self) - convert back to a string; should be very similar +# (if not identical to) the string supplied to parse +# __repr__ (self) - generate Python code to recreate +# the instance +# _cmp (self, other) - compare two version numbers ('other' may +# be an unparsed version string, or another +# instance of your version class) + + +class StrictVersion(Version): + """Version numbering for anal retentives and software idealists. + Implements the standard interface for version number classes as + described above. A version number consists of two or three + dot-separated numeric components, with an optional "pre-release" tag + on the end. The pre-release tag consists of the letter 'a' or 'b' + followed by a number. If the numeric components of two version + numbers are equal, then one with a pre-release tag will always + be deemed earlier (lesser) than one without. + + The following are valid version numbers (shown in the order that + would be obtained by sorting according to the supplied cmp function): + + 0.4 0.4.0 (these two are equivalent) + 0.4.1 + 0.5a1 + 0.5b3 + 0.5 + 0.9.6 + 1.0 + 1.0.4a3 + 1.0.4b1 + 1.0.4 + + The following are examples of invalid version numbers: + + 1 + 2.7.2.2 + 1.3.a4 + 1.3pl1 + 1.3c4 + + The rationale for this version numbering system will be explained + in the distutils documentation. + """ + + version_re = re.compile(r"^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$", RE_FLAGS) + + def parse(self, vstring): + match = self.version_re.match(vstring) + if not match: + raise ValueError("invalid version number '%s'" % vstring) + + (major, minor, patch, prerelease, prerelease_num) = match.group(1, 2, 4, 5, 6) + + if patch: + self.version = tuple(map(int, [major, minor, patch])) + else: + self.version = tuple(map(int, [major, minor])) + (0,) + + if prerelease: + self.prerelease = (prerelease[0], int(prerelease_num)) + else: + self.prerelease = None + + def __str__(self): + if self.version[2] == 0: + vstring = ".".join(map(str, self.version[0:2])) + else: + vstring = ".".join(map(str, self.version)) + + if self.prerelease: + vstring = vstring + self.prerelease[0] + str(self.prerelease[1]) + + return vstring + + def _cmp(self, other): + if isinstance(other, str): + other = StrictVersion(other) + elif not isinstance(other, StrictVersion): + return NotImplemented + + if self.version != other.version: + # numeric versions don't match + # prerelease stuff doesn't matter + if self.version < other.version: + return -1 + else: + return 1 + + # have to compare prerelease + # case 1: neither has prerelease; they're equal + # case 2: self has prerelease, other doesn't; other is greater + # case 3: self doesn't have prerelease, other does: self is greater + # case 4: both have prerelease: must compare them! + + if not self.prerelease and not other.prerelease: + return 0 + elif self.prerelease and not other.prerelease: + return -1 + elif not self.prerelease and other.prerelease: + return 1 + elif self.prerelease and other.prerelease: + if self.prerelease == other.prerelease: + return 0 + elif self.prerelease < other.prerelease: + return -1 + else: + return 1 + else: + raise AssertionError("never get here") + + +# end class StrictVersion + +# The rules according to Greg Stein: +# 1) a version number has 1 or more numbers separated by a period or by +# sequences of letters. If only periods, then these are compared +# left-to-right to determine an ordering. +# 2) sequences of letters are part of the tuple for comparison and are +# compared lexicographically +# 3) recognize the numeric components may have leading zeroes +# +# The LooseVersion class below implements these rules: a version number +# string is split up into a tuple of integer and string components, and +# comparison is a simple tuple comparison. This means that version +# numbers behave in a predictable and obvious way, but a way that might +# not necessarily be how people *want* version numbers to behave. There +# wouldn't be a problem if people could stick to purely numeric version +# numbers: just split on period and compare the numbers as tuples. +# However, people insist on putting letters into their version numbers; +# the most common purpose seems to be: +# - indicating a "pre-release" version +# ('alpha', 'beta', 'a', 'b', 'pre', 'p') +# - indicating a post-release patch ('p', 'pl', 'patch') +# but of course this can't cover all version number schemes, and there's +# no way to know what a programmer means without asking him. +# +# The problem is what to do with letters (and other non-numeric +# characters) in a version number. The current implementation does the +# obvious and predictable thing: keep them as strings and compare +# lexically within a tuple comparison. This has the desired effect if +# an appended letter sequence implies something "post-release": +# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002". +# +# However, if letters in a version number imply a pre-release version, +# the "obvious" thing isn't correct. Eg. you would expect that +# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison +# implemented here, this just isn't so. +# +# Two possible solutions come to mind. The first is to tie the +# comparison algorithm to a particular set of semantic rules, as has +# been done in the StrictVersion class above. This works great as long +# as everyone can go along with bondage and discipline. Hopefully a +# (large) subset of Python module programmers will agree that the +# particular flavour of bondage and discipline provided by StrictVersion +# provides enough benefit to be worth using, and will submit their +# version numbering scheme to its domination. The free-thinking +# anarchists in the lot will never give in, though, and something needs +# to be done to accommodate them. +# +# Perhaps a "moderately strict" version class could be implemented that +# lets almost anything slide (syntactically), and makes some heuristic +# assumptions about non-digits in version number strings. This could +# sink into special-case-hell, though; if I was as talented and +# idiosyncratic as Larry Wall, I'd go ahead and implement a class that +# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is +# just as happy dealing with things like "2g6" and "1.13++". I don't +# think I'm smart enough to do it right though. +# +# In any case, I've coded the test suite for this module (see +# ../test/test_version.py) specifically to fail on things like comparing +# "1.2a2" and "1.2". That's not because the *code* is doing anything +# wrong, it's because the simple, obvious design doesn't match my +# complicated, hairy expectations for real-world version numbers. It +# would be a snap to fix the test suite to say, "Yep, LooseVersion does +# the Right Thing" (ie. the code matches the conception). But I'd rather +# have a conception that matches common notions about version numbers. + + +class LooseVersion(Version): + """Version numbering for anarchists and software realists. + Implements the standard interface for version number classes as + described above. A version number consists of a series of numbers, + separated by either periods or strings of letters. When comparing + version numbers, the numeric components will be compared + numerically, and the alphabetic components lexically. The following + are all valid version numbers, in no particular order: + + 1.5.1 + 1.5.2b2 + 161 + 3.10a + 8.02 + 3.4j + 1996.07.12 + 3.2.pl0 + 3.1.1.6 + 2g6 + 11g + 0.960923 + 2.2beta29 + 1.13++ + 5.5.kw + 2.0b1pl0 + + In fact, there is no such thing as an invalid version number under + this scheme; the rules for comparison are simple and predictable, + but may not always give the results you want (for some definition + of "want"). + """ + + component_re = re.compile(r"(\d+ | [a-z]+ | \.)", re.VERBOSE) + + def __init__(self, vstring=None): + if vstring: + self.parse(vstring) + + def parse(self, vstring): + # I've given up on thinking I can reconstruct the version string + # from the parsed tuple -- so I just store the string here for + # use by __str__ + self.vstring = vstring + components = [x for x in self.component_re.split(vstring) if x and x != "."] + for i, obj in enumerate(components): + try: + components[i] = int(obj) + except ValueError: + pass + + self.version = components + + def __str__(self): + return self.vstring + + def __repr__(self): + return "LooseVersion ('%s')" % str(self) + + def _cmp(self, other): + if isinstance(other, str): + other = LooseVersion(other) + elif not isinstance(other, LooseVersion): + return NotImplemented + + if self.version == other.version: + return 0 + if self.version < other.version: + return -1 + if self.version > other.version: + return 1 + + +# end class LooseVersion diff --git a/ansible_collections/amazon/aws/plugins/module_utils/acm.py b/ansible_collections/amazon/aws/plugins/module_utils/acm.py new file mode 100644 index 000000000..81c65507e --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/acm.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . +# +# Author: +# - Matthew Davis +# on behalf of Telstra Corporation Limited +# +# Common functionality to be used by the modules: +# - acm_certificate +# - acm_certificate_info + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +""" +Common Amazon Certificate Manager facts shared between modules +""" + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass + +from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from .core import is_boto3_error_code +from .ec2 import AWSRetry +from .ec2 import ansible_dict_to_boto3_tag_list +from .ec2 import boto3_tag_list_to_ansible_dict + + +class ACMServiceManager(object): + """Handles ACM Facts Services""" + + def __init__(self, module): + self.module = module + self.client = module.client('acm') + + @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException']) + def delete_certificate_with_backoff(self, client, arn): + client.delete_certificate(CertificateArn=arn) + + def delete_certificate(self, client, module, arn): + module.debug("Attempting to delete certificate %s" % arn) + try: + self.delete_certificate_with_backoff(client, arn) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't delete certificate %s" % arn) + module.debug("Successfully deleted certificate %s" % arn) + + @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException']) + def list_certificates_with_backoff(self, client, statuses=None): + paginator = client.get_paginator('list_certificates') + kwargs = dict() + if statuses: + kwargs['CertificateStatuses'] = statuses + return paginator.paginate(**kwargs).build_full_result()['CertificateSummaryList'] + + @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException']) + def get_certificate_with_backoff(self, client, certificate_arn): + response = client.get_certificate(CertificateArn=certificate_arn) + # strip out response metadata + return {'Certificate': response['Certificate'], + 'CertificateChain': response['CertificateChain']} + + @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException']) + def describe_certificate_with_backoff(self, client, certificate_arn): + return client.describe_certificate(CertificateArn=certificate_arn)['Certificate'] + + @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException']) + def list_certificate_tags_with_backoff(self, client, certificate_arn): + return client.list_tags_for_certificate(CertificateArn=certificate_arn)['Tags'] + + # Returns a list of certificates + # if domain_name is specified, returns only certificates with that domain + # if an ARN is specified, returns only that certificate + # only_tags is a dict, e.g. {'key':'value'}. If specified this function will return + # only certificates which contain all those tags (key exists, value matches). + def get_certificates(self, client, module, domain_name=None, statuses=None, arn=None, only_tags=None): + try: + all_certificates = self.list_certificates_with_backoff(client=client, statuses=statuses) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't obtain certificates") + if domain_name: + certificates = [cert for cert in all_certificates + if cert['DomainName'] == domain_name] + else: + certificates = all_certificates + + if arn: + # still return a list, not just one item + certificates = [c for c in certificates if c['CertificateArn'] == arn] + + results = [] + for certificate in certificates: + try: + cert_data = self.describe_certificate_with_backoff(client, certificate['CertificateArn']) + except is_boto3_error_code('ResourceNotFoundException'): + # The certificate was deleted after the call to list_certificates_with_backoff. + continue + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't obtain certificate metadata for domain %s" % certificate['DomainName']) + + # in some states, ACM resources do not have a corresponding cert + if cert_data['Status'] not in ['PENDING_VALIDATION', 'VALIDATION_TIMED_OUT', 'FAILED']: + try: + cert_data.update(self.get_certificate_with_backoff(client, certificate['CertificateArn'])) + except is_boto3_error_code('ResourceNotFoundException'): + # The certificate was deleted after the call to list_certificates_with_backoff. + continue + except (BotoCoreError, ClientError, KeyError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't obtain certificate data for domain %s" % certificate['DomainName']) + cert_data = camel_dict_to_snake_dict(cert_data) + try: + tags = self.list_certificate_tags_with_backoff(client, certificate['CertificateArn']) + except is_boto3_error_code('ResourceNotFoundException'): + # The certificate was deleted after the call to list_certificates_with_backoff. + continue + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't obtain tags for domain %s" % certificate['DomainName']) + + cert_data['tags'] = boto3_tag_list_to_ansible_dict(tags) + results.append(cert_data) + + if only_tags: + for tag_key in only_tags: + try: + results = [c for c in results if ('tags' in c) and (tag_key in c['tags']) and (c['tags'][tag_key] == only_tags[tag_key])] + except (TypeError, AttributeError) as e: + for c in results: + if 'tags' not in c: + module.debug("cert is %s" % str(c)) + module.fail_json(msg="ACM tag filtering err", exception=e) + + return results + + # returns the domain name of a certificate (encoded in the public cert) + # for a given ARN + # A cert with that ARN must already exist + def get_domain_of_cert(self, client, module, arn): + if arn is None: + module.fail(msg="Internal error with ACM domain fetching, no certificate ARN specified") + try: + cert_data = self.describe_certificate_with_backoff(client=client, certificate_arn=arn) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't obtain certificate data for arn %s" % arn) + return cert_data['DomainName'] + + @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException']) + def import_certificate_with_backoff(self, client, certificate, private_key, certificate_chain, arn): + if certificate_chain: + if arn: + ret = client.import_certificate(Certificate=to_bytes(certificate), + PrivateKey=to_bytes(private_key), + CertificateChain=to_bytes(certificate_chain), + CertificateArn=arn) + else: + ret = client.import_certificate(Certificate=to_bytes(certificate), + PrivateKey=to_bytes(private_key), + CertificateChain=to_bytes(certificate_chain)) + else: + if arn: + ret = client.import_certificate(Certificate=to_bytes(certificate), + PrivateKey=to_bytes(private_key), + CertificateArn=arn) + else: + ret = client.import_certificate(Certificate=to_bytes(certificate), + PrivateKey=to_bytes(private_key)) + return ret['CertificateArn'] + + # Tags are a normal Ansible style dict + # {'Key':'Value'} + @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException']) + def tag_certificate_with_backoff(self, client, arn, tags): + aws_tags = ansible_dict_to_boto3_tag_list(tags) + client.add_tags_to_certificate(CertificateArn=arn, Tags=aws_tags) + + def import_certificate(self, client, module, certificate, private_key, arn=None, certificate_chain=None, tags=None): + + original_arn = arn + + # upload cert + try: + arn = self.import_certificate_with_backoff(client, certificate, private_key, certificate_chain, arn) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Couldn't upload new certificate") + + if original_arn and (arn != original_arn): + # I'm not sure whether the API guarentees that the ARN will not change + # I'm failing just in case. + # If I'm wrong, I'll catch it in the integration tests. + module.fail_json(msg="ARN changed with ACM update, from %s to %s" % (original_arn, arn)) + + # tag that cert + try: + self.tag_certificate_with_backoff(client, arn, tags) + except (BotoCoreError, ClientError) as e: + module.debug("Attempting to delete the cert we just created, arn=%s" % arn) + try: + self.delete_certificate_with_backoff(client, arn) + except (BotoCoreError, ClientError): + module.warn("Certificate %s exists, and is not tagged. So Ansible will not see it on the next run.") + module.fail_json_aws(e, msg="Couldn't tag certificate %s, couldn't delete it either" % arn) + module.fail_json_aws(e, msg="Couldn't tag certificate %s" % arn) + + return arn diff --git a/ansible_collections/amazon/aws/plugins/module_utils/arn.py b/ansible_collections/amazon/aws/plugins/module_utils/arn.py new file mode 100644 index 000000000..ac8dfc9e0 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/arn.py @@ -0,0 +1,69 @@ +# +# Copyright 2017 Michael De La Rue | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + + +def parse_aws_arn(arn): + """ + The following are the general formats for ARNs. + arn:partition:service:region:account-id:resource-id + arn:partition:service:region:account-id:resource-type/resource-id + arn:partition:service:region:account-id:resource-type:resource-id + The specific formats depend on the resource. + The ARNs for some resources omit the Region, the account ID, or both the Region and the account ID. + """ + m = re.search(r"arn:(aws(-([a-z\-]+))?):([\w-]+):([a-z0-9\-]*):(\d*|aws|aws-managed):(.*)", arn) + if m is None: + return None + result = dict() + result.update(dict(partition=m.group(1))) + result.update(dict(service=m.group(4))) + result.update(dict(region=m.group(5))) + result.update(dict(account_id=m.group(6))) + result.update(dict(resource=m.group(7))) + + return result + + +# An implementation of this used was originally in ec2.py, however Outposts +# aren't specific to the EC2 service +def is_outpost_arn(arn): + """ + Validates that the ARN is for an AWS Outpost + + + API Specification Document: + https://docs.aws.amazon.com/outposts/latest/APIReference/API_Outpost.html + """ + details = parse_aws_arn(arn) + + if not details: + return False + + service = details.get('service') or "" + if service.lower() != 'outposts': + return False + resource = details.get('resource') or "" + if not re.match('^outpost/op-[a-f0-9]{17}$', resource): + return False + + return True diff --git a/ansible_collections/amazon/aws/plugins/module_utils/batch.py b/ansible_collections/amazon/aws/plugins/module_utils/batch.py new file mode 100644 index 000000000..c27214519 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/batch.py @@ -0,0 +1,58 @@ +# Copyright (c) 2017 Ansible Project +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +""" +This module adds shared support for Batch modules. +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + + +def cc(key): + """ + Changes python key into Camel case equivalent. For example, 'compute_environment_name' becomes + 'computeEnvironmentName'. + + :param key: + :return: + """ + components = key.split('_') + return components[0] + "".join([token.capitalize() for token in components[1:]]) + + +def set_api_params(module, module_params): + """ + Sets module parameters to those expected by the boto3 API. + :param module: + :param module_params: + :return: + """ + api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None) + return snake_dict_to_camel_dict(api_params) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/botocore.py b/ansible_collections/amazon/aws/plugins/module_utils/botocore.py new file mode 100644 index 000000000..a8a014c20 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/botocore.py @@ -0,0 +1,394 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +A set of helper functions designed to help with initializing boto3/botocore +connections. +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import traceback + +BOTO3_IMP_ERR = None +try: + import boto3 + import botocore + HAS_BOTO3 = True +except ImportError: + BOTO3_IMP_ERR = traceback.format_exc() + HAS_BOTO3 = False + +from ansible.module_utils._text import to_native +from ansible.module_utils.ansible_release import __version__ +from ansible.module_utils.basic import missing_required_lib +from ansible.module_utils.six import binary_type +from ansible.module_utils.six import text_type + +from .retries import AWSRetry + + +def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params): + """ + Builds a boto3 resource/client connection cleanly wrapping the most common failures. + Handles: + ValueError, + botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError, + botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError, + botocore.exceptions.NoRegionError + """ + try: + return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params) + except ValueError as e: + module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e)) + except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError, + botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e: + module.fail_json(msg=to_native(e)) + except botocore.exceptions.NoRegionError: + module.fail_json(msg="The %s module requires a region and none was found in configuration, " + "environment variables or module parameters" % module._name) + + +def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params): + """ + Builds a boto3 resource/client connection cleanly wrapping the most common failures. + No exceptions are caught/handled. + """ + profile = params.pop('profile_name', None) + + if conn_type not in ['both', 'resource', 'client']: + raise ValueError('There is an issue in the calling code. You ' + 'must specify either both, resource, or client to ' + 'the conn_type parameter in the boto3_conn function ' + 'call') + + config = botocore.config.Config( + user_agent_extra='Ansible/{0}'.format(__version__), + ) + + if params.get('config') is not None: + config = config.merge(params.pop('config')) + if params.get('aws_config') is not None: + config = config.merge(params.pop('aws_config')) + + session = boto3.session.Session( + profile_name=profile, + ) + + enable_placebo(session) + + if conn_type == 'resource': + return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params) + elif conn_type == 'client': + return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params) + else: + client = session.client(resource, region_name=region, endpoint_url=endpoint, **params) + resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params) + return client, resource + + +# Inventory plugins don't have access to the same 'module', they need to throw +# an exception rather than calling module.fail_json +boto3_inventory_conn = _boto3_conn + + +def boto_exception(err): + """ + Extracts the error message from a boto exception. + + :param err: Exception from boto + :return: Error message + """ + if hasattr(err, 'error_message'): + error = err.error_message + elif hasattr(err, 'message'): + error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err)) + else: + error = '%s: %s' % (Exception, err) + + return error + + +def get_aws_region(module, boto3=None): + region = module.params.get('region') + + if region: + return region + + if not HAS_BOTO3: + module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR) + + # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None. + try: + profile_name = module.params.get('profile') + return botocore.session.Session(profile=profile_name).get_config_variable('region') + except botocore.exceptions.ProfileNotFound: + return None + + +def get_aws_connection_info(module, boto3=None): + + # Check module args for credentials, then check environment vars + # access_key + + endpoint_url = module.params.get('endpoint_url') + access_key = module.params.get('access_key') + secret_key = module.params.get('secret_key') + session_token = module.params.get('session_token') + region = get_aws_region(module) + profile_name = module.params.get('profile') + validate_certs = module.params.get('validate_certs') + ca_bundle = module.params.get('aws_ca_bundle') + config = module.params.get('aws_config') + + # Only read the profile environment variables if we've *not* been passed + # any credentials as parameters. + if not profile_name and not access_key and not secret_key: + if os.environ.get('AWS_PROFILE'): + profile_name = os.environ.get('AWS_PROFILE') + if os.environ.get('AWS_DEFAULT_PROFILE'): + profile_name = os.environ.get('AWS_DEFAULT_PROFILE') + + if profile_name and (access_key or secret_key or session_token): + module.fail_json(msg="Passing both a profile and access tokens is not supported.") + + # Botocore doesn't like empty strings, make sure we default to None in the case of an empty + # string. + if not access_key: + # AWS_ACCESS_KEY_ID is the one supported by the AWS CLI + # AWS_ACCESS_KEY is to match up with our parameter name + if os.environ.get('AWS_ACCESS_KEY_ID'): + access_key = os.environ['AWS_ACCESS_KEY_ID'] + elif os.environ.get('AWS_ACCESS_KEY'): + access_key = os.environ['AWS_ACCESS_KEY'] + # Deprecated - 'EC2' implies just EC2, but is global + elif os.environ.get('EC2_ACCESS_KEY'): + access_key = os.environ['EC2_ACCESS_KEY'] + else: + # in case access_key came in as empty string + access_key = None + + if not secret_key: + # AWS_SECRET_ACCESS_KEY is the one supported by the AWS CLI + # AWS_SECRET_KEY is to match up with our parameter name + if os.environ.get('AWS_SECRET_ACCESS_KEY'): + secret_key = os.environ['AWS_SECRET_ACCESS_KEY'] + elif os.environ.get('AWS_SECRET_KEY'): + secret_key = os.environ['AWS_SECRET_KEY'] + # Deprecated - 'EC2' implies just EC2, but is global + elif os.environ.get('EC2_SECRET_KEY'): + secret_key = os.environ['EC2_SECRET_KEY'] + else: + # in case secret_key came in as empty string + secret_key = None + + if not session_token: + # AWS_SESSION_TOKEN is supported by the AWS CLI + if os.environ.get('AWS_SESSION_TOKEN'): + session_token = os.environ['AWS_SESSION_TOKEN'] + # Deprecated - boto + elif os.environ.get('AWS_SECURITY_TOKEN'): + session_token = os.environ['AWS_SECURITY_TOKEN'] + # Deprecated - 'EC2' implies just EC2, but is global + elif os.environ.get('EC2_SECURITY_TOKEN'): + session_token = os.environ['EC2_SECURITY_TOKEN'] + else: + # in case secret_token came in as empty string + session_token = None + + if profile_name: + boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None) + boto_params['profile_name'] = profile_name + else: + boto_params = dict( + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + aws_session_token=session_token, + ) + + if validate_certs and ca_bundle: + boto_params['verify'] = ca_bundle + else: + boto_params['verify'] = validate_certs + + if config is not None: + boto_params['aws_config'] = botocore.config.Config(**config) + + for param, value in boto_params.items(): + if isinstance(value, binary_type): + boto_params[param] = text_type(value, 'utf-8', 'strict') + + return region, endpoint_url, boto_params + + +def _paginated_query(client, paginator_name, **params): + paginator = client.get_paginator(paginator_name) + result = paginator.paginate(**params).build_full_result() + return result + + +def paginated_query_with_retries(client, paginator_name, retry_decorator=None, **params): + """ + Performs a boto3 paginated query. + By default uses uses AWSRetry.jittered_backoff(retries=10) to retry queries + with temporary failures. + + Examples: + tags = paginated_query_with_retries(client, "describe_tags", Filters=[]) + + decorator = AWSRetry.backoff(tries=5, delay=5, backoff=2.0, + catch_extra_error_codes=['RequestInProgressException']) + certificates = paginated_query_with_retries(client, "list_certificates", decorator) + """ + if retry_decorator is None: + retry_decorator = AWSRetry.jittered_backoff(retries=10) + result = retry_decorator(_paginated_query)(client, paginator_name, **params) + return result + + +def gather_sdk_versions(): + """Gather AWS SDK (boto3 and botocore) dependency versions + + Returns {'boto3_version': str, 'botocore_version': str} + Returns {} if either module is not installed + """ + if not HAS_BOTO3: + return {} + import boto3 + import botocore + return dict(boto3_version=boto3.__version__, + botocore_version=botocore.__version__) + + +def is_boto3_error_code(code, e=None): + """Check if the botocore exception is raised by a specific error code. + + Returns ClientError if the error code matches, a dummy exception if it does not have an error code or does not match + + Example: + try: + ec2.describe_instances(InstanceIds=['potato']) + except is_boto3_error_code('InvalidInstanceID.Malformed'): + # handle the error for that code case + except botocore.exceptions.ClientError as e: + # handle the generic error case for all other codes + """ + from botocore.exceptions import ClientError + if e is None: + import sys + dummy, e, dummy = sys.exc_info() + if not isinstance(code, list): + code = [code] + if isinstance(e, ClientError) and e.response['Error']['Code'] in code: + return ClientError + return type('NeverEverRaisedException', (Exception,), {}) + + +def is_boto3_error_message(msg, e=None): + """Check if the botocore exception contains a specific error message. + + Returns ClientError if the error code matches, a dummy exception if it does not have an error code or does not match + + Example: + try: + ec2.describe_vpc_classic_link(VpcIds=[vpc_id]) + except is_boto3_error_message('The functionality you requested is not available in this region.'): + # handle the error for that error message + except botocore.exceptions.ClientError as e: + # handle the generic error case for all other codes + """ + from botocore.exceptions import ClientError + if e is None: + import sys + dummy, e, dummy = sys.exc_info() + if isinstance(e, ClientError) and msg in e.response['Error']['Message']: + return ClientError + return type('NeverEverRaisedException', (Exception,), {}) + + +def get_boto3_client_method_parameters(client, method_name, required=False): + op = client.meta.method_to_api_mapping.get(method_name) + input_shape = client._service_model.operation_model(op).input_shape + if not input_shape: + parameters = [] + elif required: + parameters = list(input_shape.required_members) + else: + parameters = list(input_shape.members.keys()) + return parameters + + +# Used by normalize_boto3_result +def _boto3_handler(obj): + if hasattr(obj, 'isoformat'): + return obj.isoformat() + else: + return obj + + +def normalize_boto3_result(result): + """ + Because Boto3 returns datetime objects where it knows things are supposed to + be dates we need to mass-convert them over to strings which Ansible/Jinja + handle better. This also makes it easier to compare complex objects which + include a mix of dates in string format (from parameters) and dates as + datetime objects. Boto3 is happy to be passed ISO8601 format strings. + """ + return json.loads(json.dumps(result, default=_boto3_handler)) + + +def enable_placebo(session): + """ + Helper to record or replay offline modules for testing purpose. + """ + if "_ANSIBLE_PLACEBO_RECORD" in os.environ: + import placebo + existing_entries = os.listdir(os.environ["_ANSIBLE_PLACEBO_RECORD"]) + idx = len(existing_entries) + data_path = f"{os.environ['_ANSIBLE_PLACEBO_RECORD']}/{idx}" + os.mkdir(data_path) + pill = placebo.attach(session, data_path=data_path) + pill.record() + if "_ANSIBLE_PLACEBO_REPLAY" in os.environ: + import shutil + import placebo + existing_entries = sorted([int(i) for i in os.listdir(os.environ["_ANSIBLE_PLACEBO_REPLAY"])]) + idx = str(existing_entries[0]) + data_path = os.environ['_ANSIBLE_PLACEBO_REPLAY'] + "/" + idx + try: + shutil.rmtree("_tmp") + except FileNotFoundError: + pass + shutil.move(data_path, "_tmp") + if len(existing_entries) == 1: + os.rmdir(os.environ["_ANSIBLE_PLACEBO_REPLAY"]) + pill = placebo.attach(session, data_path="_tmp") + pill.playback() diff --git a/ansible_collections/amazon/aws/plugins/module_utils/cloud.py b/ansible_collections/amazon/aws/plugins/module_utils/cloud.py new file mode 100644 index 000000000..e690c0a86 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/cloud.py @@ -0,0 +1,213 @@ +# Copyright (c) 2021 Ansible Project +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import time +import functools +import random +import ansible.module_utils.common.warnings as ansible_warnings + + +class BackoffIterator: + """iterate sleep value based on the exponential or jitter back-off algorithm. + Args: + delay (int or float): initial delay. + backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry. + max_delay (int or None): maximum amount of time to wait between retries. + jitter (bool): if set to true, add jitter to the generate value. + """ + + def __init__(self, delay, backoff, max_delay=None, jitter=False): + self.delay = delay + self.backoff = backoff + self.max_delay = max_delay + self.jitter = jitter + + def __iter__(self): + self.current_delay = self.delay + return self + + def __next__(self): + return_value = self.current_delay if self.max_delay is None else min(self.current_delay, self.max_delay) + if self.jitter: + return_value = random.uniform(0.0, return_value) + self.current_delay *= self.backoff + return return_value + + +def _retry_func(func, sleep_time_generator, retries, catch_extra_error_codes, found_f, status_code_from_except_f, base_class): + counter = 0 + for sleep_time in sleep_time_generator: + try: + return func() + except Exception as exc: # pylint: disable=broad-except + counter += 1 + if counter == retries: + raise + if base_class and not isinstance(exc, base_class): + raise + status_code = status_code_from_except_f(exc) + if found_f(status_code, catch_extra_error_codes): + time.sleep(sleep_time) + else: + raise + + +class CloudRetry: + """ + The base class to be used by other cloud providers to provide a backoff/retry decorator based on status codes. + """ + + base_class = type(None) + + @staticmethod + def status_code_from_exception(error): + """ + Returns the Error 'code' from an exception. + Args: + error: The Exception from which the error code is to be extracted. + error will be an instance of class.base_class. + """ + raise NotImplementedError() + + @staticmethod + def found(response_code, catch_extra_error_codes=None): + def _is_iterable(): + try: + iter(catch_extra_error_codes) + except TypeError: + # not iterable + return False + else: + # iterable + return True + return _is_iterable() and response_code in catch_extra_error_codes + + @classmethod + def base_decorator(cls, retries, found, status_code_from_exception, catch_extra_error_codes, sleep_time_generator): + def retry_decorator(func): + @functools.wraps(func) + def _retry_wrapper(*args, **kwargs): + partial_func = functools.partial(func, *args, **kwargs) + return _retry_func( + func=partial_func, + sleep_time_generator=sleep_time_generator, + retries=retries, + catch_extra_error_codes=catch_extra_error_codes, + found_f=found, + status_code_from_except_f=status_code_from_exception, + base_class=cls.base_class, + ) + return _retry_wrapper + return retry_decorator + + @classmethod + def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None): + """Wrap a callable with retry behavior. + Args: + retries (int): Number of times to retry a failed request before giving up + default=10 + delay (int or float): Initial delay between retries in seconds + default=3 + backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry + default=2 + max_delay (int or None): maximum amount of time to wait between retries. + default=60 + catch_extra_error_codes: Additional error messages to catch, in addition to those which may be defined by a subclass of CloudRetry + default=None + Returns: + Callable: A generator that calls the decorated function using an exponential backoff. + """ + sleep_time_generator = BackoffIterator(delay=delay, backoff=backoff, max_delay=max_delay) + return cls.base_decorator( + retries=retries, + found=cls.found, + status_code_from_exception=cls.status_code_from_exception, + catch_extra_error_codes=catch_extra_error_codes, + sleep_time_generator=sleep_time_generator, + ) + + @classmethod + def jittered_backoff(cls, retries=10, delay=3, backoff=2.0, max_delay=60, catch_extra_error_codes=None): + """Wrap a callable with retry behavior. + Args: + retries (int): Number of times to retry a failed request before giving up + default=10 + delay (int or float): Initial delay between retries in seconds + default=3 + backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry + default=2.0 + max_delay (int or None): maximum amount of time to wait between retries. + default=60 + catch_extra_error_codes: Additional error messages to catch, in addition to those which may be defined by a subclass of CloudRetry + default=None + Returns: + Callable: A generator that calls the decorated function using using a jittered backoff strategy. + """ + sleep_time_generator = BackoffIterator(delay=delay, backoff=backoff, max_delay=max_delay, jitter=True) + return cls.base_decorator( + retries=retries, + found=cls.found, + status_code_from_exception=cls.status_code_from_exception, + catch_extra_error_codes=catch_extra_error_codes, + sleep_time_generator=sleep_time_generator, + ) + + @classmethod + def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None): + """ + Wrap a callable with retry behavior. + Developers should use CloudRetry.exponential_backoff instead. + This method has been deprecated and will be removed in release 6.0.0, consider using exponential_backoff method instead. + Args: + retries (int): Number of times to retry a failed request before giving up + default=10 + delay (int or float): Initial delay between retries in seconds + default=3 + backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry + default=1.1 + catch_extra_error_codes: Additional error messages to catch, in addition to those which may be defined by a subclass of CloudRetry + default=None + Returns: + Callable: A generator that calls the decorated function using an exponential backoff. + """ + # This won't emit a warning (we don't have the context available to us), but will trigger + # sanity failures as we prepare for 6.0.0 + ansible_warnings.deprecate( + 'CloudRetry.backoff has been deprecated, please use CloudRetry.exponential_backoff instead', + version='6.0.0', collection_name='amazon.aws') + + return cls.exponential_backoff( + retries=tries, + delay=delay, + backoff=backoff, + max_delay=None, + catch_extra_error_codes=catch_extra_error_codes, + ) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py b/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py new file mode 100644 index 000000000..c628bff14 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py @@ -0,0 +1,229 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017 Willem van Ketwich +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . +# +# Author: +# - Willem van Ketwich +# +# Common functionality to be used by the modules: +# - cloudfront_distribution +# - cloudfront_invalidation +# - cloudfront_origin_access_identity +""" +Common cloudfront facts shared between modules +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +try: + import botocore +except ImportError: + pass + +from .ec2 import AWSRetry +from .ec2 import boto3_tag_list_to_ansible_dict + + +class CloudFrontFactsServiceManager(object): + """Handles CloudFront Facts Services""" + + def __init__(self, module): + self.module = module + self.client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff()) + + def get_distribution(self, distribution_id): + try: + return self.client.get_distribution(Id=distribution_id, aws_retry=True) + except botocore.exceptions.ClientError as e: + self.module.fail_json_aws(e, msg="Error describing distribution") + + def get_distribution_config(self, distribution_id): + try: + return self.client.get_distribution_config(Id=distribution_id, aws_retry=True) + except botocore.exceptions.ClientError as e: + self.module.fail_json_aws(e, msg="Error describing distribution configuration") + + def get_origin_access_identity(self, origin_access_identity_id): + try: + return self.client.get_cloud_front_origin_access_identity(Id=origin_access_identity_id, aws_retry=True) + except botocore.exceptions.ClientError as e: + self.module.fail_json_aws(e, msg="Error describing origin access identity") + + def get_origin_access_identity_config(self, origin_access_identity_id): + try: + return self.client.get_cloud_front_origin_access_identity_config(Id=origin_access_identity_id, aws_retry=True) + except botocore.exceptions.ClientError as e: + self.module.fail_json_aws(e, msg="Error describing origin access identity configuration") + + def get_invalidation(self, distribution_id, invalidation_id): + try: + return self.client.get_invalidation(DistributionId=distribution_id, Id=invalidation_id, aws_retry=True) + except botocore.exceptions.ClientError as e: + self.module.fail_json_aws(e, msg="Error describing invalidation") + + def get_streaming_distribution(self, distribution_id): + try: + return self.client.get_streaming_distribution(Id=distribution_id, aws_retry=True) + except botocore.exceptions.ClientError as e: + self.module.fail_json_aws(e, msg="Error describing streaming distribution") + + def get_streaming_distribution_config(self, distribution_id): + try: + return self.client.get_streaming_distribution_config(Id=distribution_id, aws_retry=True) + except botocore.exceptions.ClientError as e: + self.module.fail_json_aws(e, msg="Error describing streaming distribution") + + def list_origin_access_identities(self): + try: + paginator = self.client.get_paginator('list_cloud_front_origin_access_identities') + result = paginator.paginate().build_full_result().get('CloudFrontOriginAccessIdentityList', {}) + return result.get('Items', []) + except botocore.exceptions.ClientError as e: + self.module.fail_json_aws(e, msg="Error listing cloud front origin access identities") + + def list_distributions(self, keyed=True): + try: + paginator = self.client.get_paginator('list_distributions') + result = paginator.paginate().build_full_result().get('DistributionList', {}) + distribution_list = result.get('Items', []) + if not keyed: + return distribution_list + return self.keyed_list_helper(distribution_list) + except botocore.exceptions.ClientError as e: + self.module.fail_json_aws(e, msg="Error listing distributions") + + def list_distributions_by_web_acl_id(self, web_acl_id): + try: + result = self.client.list_distributions_by_web_acl_id(WebAclId=web_acl_id, aws_retry=True) + distribution_list = result.get('DistributionList', {}).get('Items', []) + return self.keyed_list_helper(distribution_list) + except botocore.exceptions.ClientError as e: + self.module.fail_json_aws(e, msg="Error listing distributions by web acl id") + + def list_invalidations(self, distribution_id): + try: + paginator = self.client.get_paginator('list_invalidations') + result = paginator.paginate(DistributionId=distribution_id).build_full_result() + return result.get('InvalidationList', {}).get('Items', []) + except botocore.exceptions.ClientError as e: + self.module.fail_json_aws(e, msg="Error listing invalidations") + + def list_streaming_distributions(self, keyed=True): + try: + paginator = self.client.get_paginator('list_streaming_distributions') + result = paginator.paginate().build_full_result() + streaming_distribution_list = result.get('StreamingDistributionList', {}).get('Items', []) + if not keyed: + return streaming_distribution_list + return self.keyed_list_helper(streaming_distribution_list) + except botocore.exceptions.ClientError as e: + self.module.fail_json_aws(e, msg="Error listing streaming distributions") + + def summary(self): + summary_dict = {} + summary_dict.update(self.summary_get_distribution_list(False)) + summary_dict.update(self.summary_get_distribution_list(True)) + summary_dict.update(self.summary_get_origin_access_identity_list()) + return summary_dict + + def summary_get_origin_access_identity_list(self): + try: + origin_access_identity_list = {'origin_access_identities': []} + origin_access_identities = self.list_origin_access_identities() + for origin_access_identity in origin_access_identities: + oai_id = origin_access_identity['Id'] + oai_full_response = self.get_origin_access_identity(oai_id) + oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']} + origin_access_identity_list['origin_access_identities'].append(oai_summary) + return origin_access_identity_list + except botocore.exceptions.ClientError as e: + self.module.fail_json_aws(e, msg="Error generating summary of origin access identities") + + def summary_get_distribution_list(self, streaming=False): + try: + list_name = 'streaming_distributions' if streaming else 'distributions' + key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled'] + distribution_list = {list_name: []} + distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False) + for dist in distributions: + temp_distribution = {} + for key_name in key_list: + temp_distribution[key_name] = dist[key_name] + temp_distribution['Aliases'] = list(dist['Aliases'].get('Items', [])) + temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming) + if not streaming: + temp_distribution['WebACLId'] = dist['WebACLId'] + invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id']) + if invalidation_ids: + temp_distribution['Invalidations'] = invalidation_ids + resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'], aws_retry=True) + temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', [])) + distribution_list[list_name].append(temp_distribution) + return distribution_list + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Error generating summary of distributions") + + def get_etag_from_distribution_id(self, distribution_id, streaming): + distribution = {} + if not streaming: + distribution = self.get_distribution(distribution_id) + else: + distribution = self.get_streaming_distribution(distribution_id) + return distribution['ETag'] + + def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id): + try: + invalidation_ids = [] + invalidations = self.list_invalidations(distribution_id) + for invalidation in invalidations: + invalidation_ids.append(invalidation['Id']) + return invalidation_ids + except botocore.exceptions.ClientError as e: + self.module.fail_json_aws(e, msg="Error getting list of invalidation ids") + + def get_distribution_id_from_domain_name(self, domain_name): + try: + distribution_id = "" + distributions = self.list_distributions(False) + distributions += self.list_streaming_distributions(False) + for dist in distributions: + if 'Items' in dist['Aliases']: + for alias in dist['Aliases']['Items']: + if str(alias).lower() == domain_name.lower(): + distribution_id = dist['Id'] + break + return distribution_id + except botocore.exceptions.ClientError as e: + self.module.fail_json_aws(e, msg="Error getting distribution id from domain name") + + def get_aliases_from_distribution_id(self, distribution_id): + try: + distribution = self.get_distribution(distribution_id) + return distribution['DistributionConfig']['Aliases'].get('Items', []) + except botocore.exceptions.ClientError as e: + self.module.fail_json_aws(e, msg="Error getting list of aliases from distribution_id") + + def keyed_list_helper(self, list_to_key): + keyed_list = dict() + for item in list_to_key: + distribution_id = item['Id'] + if 'Items' in item['Aliases']: + aliases = item['Aliases']['Items'] + for alias in aliases: + keyed_list.update({alias: item}) + keyed_list.update({distribution_id: item}) + return keyed_list diff --git a/ansible_collections/amazon/aws/plugins/module_utils/core.py b/ansible_collections/amazon/aws/plugins/module_utils/core.py new file mode 100644 index 000000000..bfd7fe101 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/core.py @@ -0,0 +1,77 @@ +# +# Copyright 2017 Michael De La Rue | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +"""This module adds shared support for generic Amazon AWS modules + +In order to use this module, include it as part of a custom +module as shown below. + + from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean + mutually_exclusive=list1, required_together=list2) + +The 'AnsibleAWSModule' module provides similar, but more restricted, +interfaces to the normal Ansible module. It also includes the +additional methods for connecting to AWS using the standard module arguments + + m.resource('lambda') # - get an AWS connection as a boto3 resource. + +or + + m.client('sts') # - get an AWS connection as a boto3 client. + +To make use of AWSRetry easier, it can now be wrapped around any call from a +module-created client. To add retries to a client, create a client: + + m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + +Any calls from that client can be made to use the decorator passed at call-time +using the `aws_retry` argument. By default, no retries are used. + + ec2 = m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True) + +The call will be retried the specified number of times, so the calling functions +don't need to be wrapped in the backoff decorator. +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.arn +from .arn import parse_aws_arn # pylint: disable=unused-import + +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore +from .botocore import HAS_BOTO3 # pylint: disable=unused-import +from .botocore import is_boto3_error_code # pylint: disable=unused-import +from .botocore import is_boto3_error_message # pylint: disable=unused-import +from .botocore import get_boto3_client_method_parameters # pylint: disable=unused-import +from .botocore import normalize_boto3_result # pylint: disable=unused-import + +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.modules +from .modules import AnsibleAWSModule # pylint: disable=unused-import + +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.modules +from .transformation import scrub_none_parameters # pylint: disable=unused-import + +# We will also export HAS_BOTO3 so end user modules can use it. +__all__ = ('AnsibleAWSModule', 'HAS_BOTO3', 'is_boto3_error_code', 'is_boto3_error_message') + + +class AnsibleAWSError(Exception): + pass diff --git a/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py b/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py new file mode 100644 index 000000000..abcbcfd23 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py @@ -0,0 +1,89 @@ +# Copyright (c) 2017 Ansible Project +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +""" +This module adds shared support for Direct Connect modules. +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import traceback + +try: + import botocore +except ImportError: + pass + +from .ec2 import AWSRetry + + +class DirectConnectError(Exception): + def __init__(self, msg, last_traceback=None, exception=None): + self.msg = msg + self.last_traceback = last_traceback + self.exception = exception + + +def delete_connection(client, connection_id): + try: + AWSRetry.jittered_backoff()(client.delete_connection)(connectionId=connection_id) + except botocore.exceptions.ClientError as e: + raise DirectConnectError(msg="Failed to delete DirectConnection {0}.".format(connection_id), + last_traceback=traceback.format_exc(), + exception=e) + + +def associate_connection_and_lag(client, connection_id, lag_id): + try: + AWSRetry.jittered_backoff()(client.associate_connection_with_lag)(connectionId=connection_id, + lagId=lag_id) + except botocore.exceptions.ClientError as e: + raise DirectConnectError(msg="Failed to associate Direct Connect connection {0}" + " with link aggregation group {1}.".format(connection_id, lag_id), + last_traceback=traceback.format_exc(), + exception=e) + + +def disassociate_connection_and_lag(client, connection_id, lag_id): + try: + AWSRetry.jittered_backoff()(client.disassociate_connection_from_lag)(connectionId=connection_id, + lagId=lag_id) + except botocore.exceptions.ClientError as e: + raise DirectConnectError(msg="Failed to disassociate Direct Connect connection {0}" + " from link aggregation group {1}.".format(connection_id, lag_id), + last_traceback=traceback.format_exc(), + exception=e) + + +def delete_virtual_interface(client, virtual_interface): + try: + AWSRetry.jittered_backoff()(client.delete_virtual_interface)(virtualInterfaceId=virtual_interface) + except botocore.exceptions.ClientError as e: + raise DirectConnectError(msg="Could not delete virtual interface {0}".format(virtual_interface), + last_traceback=traceback.format_exc(), + exception=e) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/ec2.py b/ansible_collections/amazon/aws/plugins/module_utils/ec2.py new file mode 100644 index 000000000..817c12298 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/ec2.py @@ -0,0 +1,310 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +""" +This module adds helper functions for various EC2 specific services. + +It also includes a large number of imports for functions which historically +lived here. Most of these functions were not specific to EC2, they ended +up in this module because "that's where the AWS code was" (originally). +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from ansible.module_utils.ansible_release import __version__ +from ansible.module_utils.six import string_types +from ansible.module_utils.six import integer_types +# Used to live here, moved into ansible.module_utils.common.dict_transformations +from ansible.module_utils.common.dict_transformations import _camel_to_snake # pylint: disable=unused-import +from ansible.module_utils.common.dict_transformations import _snake_to_camel # pylint: disable=unused-import +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict # pylint: disable=unused-import +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict # pylint: disable=unused-import + +# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.arn +from .arn import is_outpost_arn as is_outposts_arn # pylint: disable=unused-import + +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore +from .botocore import HAS_BOTO3 # pylint: disable=unused-import +from .botocore import boto3_conn # pylint: disable=unused-import +from .botocore import boto3_inventory_conn # pylint: disable=unused-import +from .botocore import boto_exception # pylint: disable=unused-import +from .botocore import get_aws_region # pylint: disable=unused-import +from .botocore import get_aws_connection_info # pylint: disable=unused-import + +from .botocore import paginated_query_with_retries + +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore +from .core import AnsibleAWSError # pylint: disable=unused-import + +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.modules +# The names have been changed in .modules to better reflect their applicability. +from .modules import _aws_common_argument_spec as aws_common_argument_spec # pylint: disable=unused-import +from .modules import aws_argument_spec as ec2_argument_spec # pylint: disable=unused-import + +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.tagging +from .tagging import ansible_dict_to_boto3_tag_list # pylint: disable=unused-import +from .tagging import boto3_tag_list_to_ansible_dict # pylint: disable=unused-import +from .tagging import compare_aws_tags # pylint: disable=unused-import + +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.transformation +from .transformation import ansible_dict_to_boto3_filter_list # pylint: disable=unused-import +from .transformation import map_complex_type # pylint: disable=unused-import + +# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.policy +from .policy import _py3cmp as py3cmp # pylint: disable=unused-import +from .policy import compare_policies # pylint: disable=unused-import +from .policy import sort_json_policy_dict # pylint: disable=unused-import + +# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.retries +from .retries import AWSRetry # pylint: disable=unused-import + +try: + import botocore +except ImportError: + pass # Handled by HAS_BOTO3 + + +def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=None): + + """ Return list of security group IDs from security group names. Note that security group names are not unique + across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This + will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in + a try block + """ + + def get_sg_name(sg, boto3=None): + return str(sg['GroupName']) + + def get_sg_id(sg, boto3=None): + return str(sg['GroupId']) + + sec_group_id_list = [] + + if isinstance(sec_group_list, string_types): + sec_group_list = [sec_group_list] + + # Get all security groups + if vpc_id: + filters = [ + { + 'Name': 'vpc-id', + 'Values': [ + vpc_id, + ] + } + ] + all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups'] + else: + all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups'] + + unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups) + sec_group_name_list = list(set(sec_group_list) - set(unmatched)) + + if len(unmatched) > 0: + # If we have unmatched names that look like an ID, assume they are + sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)] + still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)] + if len(still_unmatched) > 0: + raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched)) + + sec_group_id_list += [get_sg_id(all_sg) for all_sg in all_sec_groups if get_sg_name(all_sg) in sec_group_name_list] + + return sec_group_id_list + + +def add_ec2_tags(client, module, resource_id, tags_to_set, retry_codes=None): + """ + Sets Tags on an EC2 resource. + + :param client: an EC2 boto3 client + :param module: an AnsibleAWSModule object + :param resource_id: the identifier for the resource + :param tags_to_set: A dictionary of key/value pairs to set + :param retry_codes: additional boto3 error codes to trigger retries + """ + + if not tags_to_set: + return False + if module.check_mode: + return True + + if not retry_codes: + retry_codes = [] + + try: + tags_to_add = ansible_dict_to_boto3_tag_list(tags_to_set) + AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)( + client.create_tags + )( + Resources=[resource_id], Tags=tags_to_add + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Unable to add tags {0} to {1}".format(tags_to_set, resource_id)) + return True + + +def remove_ec2_tags(client, module, resource_id, tags_to_unset, retry_codes=None): + """ + Removes Tags from an EC2 resource. + + :param client: an EC2 boto3 client + :param module: an AnsibleAWSModule object + :param resource_id: the identifier for the resource + :param tags_to_unset: a list of tag keys to removes + :param retry_codes: additional boto3 error codes to trigger retries + """ + + if not tags_to_unset: + return False + if module.check_mode: + return True + + if not retry_codes: + retry_codes = [] + + tags_to_remove = [dict(Key=tagkey) for tagkey in tags_to_unset] + + try: + AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)( + client.delete_tags + )( + Resources=[resource_id], Tags=tags_to_remove + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Unable to delete tags {0} from {1}".format(tags_to_unset, resource_id)) + return True + + +def describe_ec2_tags(client, module, resource_id, resource_type=None, retry_codes=None): + """ + Performs a paginated search of EC2 resource tags. + + :param client: an EC2 boto3 client + :param module: an AnsibleAWSModule object + :param resource_id: the identifier for the resource + :param resource_type: the type of the resource + :param retry_codes: additional boto3 error codes to trigger retries + """ + filters = {'resource-id': resource_id} + if resource_type: + filters['resource-type'] = resource_type + filters = ansible_dict_to_boto3_filter_list(filters) + + if not retry_codes: + retry_codes = [] + + try: + retry_decorator = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes) + results = paginated_query_with_retries(client, 'describe_tags', retry_decorator=retry_decorator, + Filters=filters) + return boto3_tag_list_to_ansible_dict(results.get('Tags', None)) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to describe tags for EC2 Resource: {0}".format(resource_id)) + + +def ensure_ec2_tags(client, module, resource_id, resource_type=None, tags=None, purge_tags=True, retry_codes=None): + """ + Updates the tags on an EC2 resource. + + To remove all tags the tags parameter must be explicitly set to an empty dictionary. + + :param client: an EC2 boto3 client + :param module: an AnsibleAWSModule object + :param resource_id: the identifier for the resource + :param resource_type: the type of the resource + :param tags: the Tags to apply to the resource + :param purge_tags: whether tags missing from the tag list should be removed + :param retry_codes: additional boto3 error codes to trigger retries + :return: changed: returns True if the tags are changed + """ + + if tags is None: + return False + + if not retry_codes: + retry_codes = [] + + changed = False + current_tags = describe_ec2_tags(client, module, resource_id, resource_type, retry_codes) + + tags_to_set, tags_to_unset = compare_aws_tags(current_tags, tags, purge_tags) + + if purge_tags and not tags: + tags_to_unset = current_tags + + changed |= remove_ec2_tags(client, module, resource_id, tags_to_unset, retry_codes) + changed |= add_ec2_tags(client, module, resource_id, tags_to_set, retry_codes) + + return changed + + +def normalize_ec2_vpc_dhcp_config(option_config): + """ + The boto2 module returned a config dict, but boto3 returns a list of dicts + Make the data we return look like the old way, so we don't break users. + This is also much more user-friendly. + boto3: + 'DhcpConfigurations': [ + {'Key': 'domain-name', 'Values': [{'Value': 'us-west-2.compute.internal'}]}, + {'Key': 'domain-name-servers', 'Values': [{'Value': 'AmazonProvidedDNS'}]}, + {'Key': 'netbios-name-servers', 'Values': [{'Value': '1.2.3.4'}, {'Value': '5.6.7.8'}]}, + {'Key': 'netbios-node-type', 'Values': [1]}, + {'Key': 'ntp-servers', 'Values': [{'Value': '1.2.3.4'}, {'Value': '5.6.7.8'}]} + ], + The module historically returned: + "new_options": { + "domain-name": "ec2.internal", + "domain-name-servers": ["AmazonProvidedDNS"], + "netbios-name-servers": ["10.0.0.1", "10.0.1.1"], + "netbios-node-type": "1", + "ntp-servers": ["10.0.0.2", "10.0.1.2"] + }, + """ + config_data = {} + + if len(option_config) == 0: + # If there is no provided config, return the empty dictionary + return config_data + + for config_item in option_config: + # Handle single value keys + if config_item['Key'] == 'netbios-node-type': + if isinstance(config_item['Values'], integer_types): + config_data['netbios-node-type'] = str((config_item['Values'])) + elif isinstance(config_item['Values'], list): + config_data['netbios-node-type'] = str((config_item['Values'][0]['Value'])) + # Handle actual lists of values + for option in ['domain-name', 'domain-name-servers', 'ntp-servers', 'netbios-name-servers']: + if config_item['Key'] == option: + config_data[option] = [val['Value'] for val in config_item['Values']] + + return config_data diff --git a/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py b/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py new file mode 100644 index 000000000..218052d2f --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py @@ -0,0 +1,109 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass + +from .core import is_boto3_error_code +from .ec2 import AWSRetry + + +def get_elb(connection, module, elb_name): + """ + Get an ELB based on name. If not found, return None. + + :param connection: AWS boto3 elbv2 connection + :param module: Ansible module + :param elb_name: Name of load balancer to get + :return: boto3 ELB dict or None if not found + """ + try: + return _get_elb(connection, module, elb_name) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e) + + +@AWSRetry.jittered_backoff() +def _get_elb(connection, module, elb_name): + """ + Get an ELB based on name using AWSRetry. If not found, return None. + + :param connection: AWS boto3 elbv2 connection + :param module: Ansible module + :param elb_name: Name of load balancer to get + :return: boto3 ELB dict or None if not found + """ + + try: + load_balancer_paginator = connection.get_paginator('describe_load_balancers') + return (load_balancer_paginator.paginate(Names=[elb_name]).build_full_result())['LoadBalancers'][0] + except is_boto3_error_code('LoadBalancerNotFound'): + return None + + +def get_elb_listener(connection, module, elb_arn, listener_port): + """ + Get an ELB listener based on the port provided. If not found, return None. + + :param connection: AWS boto3 elbv2 connection + :param module: Ansible module + :param elb_arn: ARN of the ELB to look at + :param listener_port: Port of the listener to look for + :return: boto3 ELB listener dict or None if not found + """ + + try: + listener_paginator = connection.get_paginator('describe_listeners') + listeners = (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=elb_arn).build_full_result())['Listeners'] + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e) + + l = None + + for listener in listeners: + if listener['Port'] == listener_port: + l = listener + break + + return l + + +def get_elb_listener_rules(connection, module, listener_arn): + """ + Get rules for a particular ELB listener using the listener ARN. + + :param connection: AWS boto3 elbv2 connection + :param module: Ansible module + :param listener_arn: ARN of the ELB listener + :return: boto3 ELB rules list + """ + + try: + return AWSRetry.jittered_backoff()(connection.describe_rules)(ListenerArn=listener_arn)['Rules'] + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e) + + +def convert_tg_name_to_arn(connection, module, tg_name): + """ + Get ARN of a target group using the target group's name + + :param connection: AWS boto3 elbv2 connection + :param module: Ansible module + :param tg_name: Name of the target group + :return: target group ARN string + """ + + try: + response = AWSRetry.jittered_backoff()(connection.describe_target_groups)(Names=[tg_name]) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e) + + tg_arn = response['TargetGroups'][0]['TargetGroupArn'] + + return tg_arn diff --git a/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py b/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py new file mode 100644 index 000000000..04f6114e1 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py @@ -0,0 +1,1092 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import traceback +from copy import deepcopy + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass + +from .ec2 import AWSRetry +from .ec2 import ansible_dict_to_boto3_tag_list +from .ec2 import boto3_tag_list_to_ansible_dict +from .ec2 import get_ec2_security_group_ids_from_names +from .elb_utils import convert_tg_name_to_arn +from .elb_utils import get_elb +from .elb_utils import get_elb_listener +from .waiters import get_waiter + + +def _simple_forward_config_arn(config, parent_arn): + config = deepcopy(config) + + stickiness = config.pop('TargetGroupStickinessConfig', {'Enabled': False}) + # Stickiness options set, non default value + if stickiness != {'Enabled': False}: + return False + + target_groups = config.pop('TargetGroups', []) + + # non-default config left over, probably invalid + if config: + return False + # Multiple TGS, not simple + if len(target_groups) > 1: + return False + + if not target_groups: + # with no TGs defined, but an ARN set, this is one of the minimum possible configs + return parent_arn or False + + target_group = target_groups[0] + # We don't care about the weight with a single TG + target_group.pop('Weight', None) + + target_group_arn = target_group.pop('TargetGroupArn', None) + + # non-default config left over + if target_group: + return False + + # We didn't find an ARN + if not (target_group_arn or parent_arn): + return False + + # Only one + if not parent_arn: + return target_group_arn + if not target_group_arn: + return parent_arn + + if parent_arn != target_group_arn: + return False + + return target_group_arn + + +# ForwardConfig may be optional if we've got a single TargetGroupArn entry +def _prune_ForwardConfig(action): + """ + Drops a redundant ForwardConfig where TargetGroupARN has already been set. + (So we can perform comparisons) + """ + if action.get('Type', "") != 'forward': + return action + if "ForwardConfig" not in action: + return action + + parent_arn = action.get('TargetGroupArn', None) + arn = _simple_forward_config_arn(action["ForwardConfig"], parent_arn) + if not arn: + return action + + # Remove the redundant ForwardConfig + newAction = action.copy() + del newAction["ForwardConfig"] + newAction["TargetGroupArn"] = arn + return newAction + + +# remove the client secret if UseExistingClientSecret, because aws won't return it +# add default values when they are not requested +def _prune_secret(action): + if action['Type'] != 'authenticate-oidc': + return action + + if not action['AuthenticateOidcConfig'].get('Scope', False): + action['AuthenticateOidcConfig']['Scope'] = 'openid' + + if not action['AuthenticateOidcConfig'].get('SessionTimeout', False): + action['AuthenticateOidcConfig']['SessionTimeout'] = 604800 + + if action['AuthenticateOidcConfig'].get('UseExistingClientSecret', False): + action['AuthenticateOidcConfig'].pop('ClientSecret', None) + + return action + + +# while AWS api also won't return UseExistingClientSecret key +# it must be added, because it's requested and compared +def _append_use_existing_client_secretn(action): + if action['Type'] != 'authenticate-oidc': + return action + + action['AuthenticateOidcConfig']['UseExistingClientSecret'] = True + + return action + + +def _sort_actions(actions): + return sorted(actions, key=lambda x: x.get('Order', 0)) + + +class ElasticLoadBalancerV2(object): + + def __init__(self, connection, module): + + self.connection = connection + self.module = module + self.changed = False + self.new_load_balancer = False + self.scheme = module.params.get("scheme") + self.name = module.params.get("name") + self.subnet_mappings = module.params.get("subnet_mappings") + self.subnets = module.params.get("subnets") + self.deletion_protection = module.params.get("deletion_protection") + self.elb_ip_addr_type = module.params.get("ip_address_type") + self.wait = module.params.get("wait") + + if module.params.get("tags") is not None: + self.tags = ansible_dict_to_boto3_tag_list(module.params.get("tags")) + else: + self.tags = None + + self.purge_tags = module.params.get("purge_tags") + + self.elb = get_elb(connection, module, self.name) + if self.elb is not None: + self.elb_attributes = self.get_elb_attributes() + self.elb_ip_addr_type = self.get_elb_ip_address_type() + self.elb['tags'] = self.get_elb_tags() + else: + self.elb_attributes = None + + def wait_for_ip_type(self, elb_arn, ip_type): + """ + Wait for load balancer to reach 'active' status + + :param elb_arn: The load balancer ARN + :return: + """ + + if not self.wait: + return + + waiter_names = { + 'ipv4': 'load_balancer_ip_address_type_ipv4', + 'dualstack': 'load_balancer_ip_address_type_dualstack', + } + if ip_type not in waiter_names: + return + + try: + waiter = get_waiter(self.connection, waiter_names.get(ip_type)) + waiter.wait(LoadBalancerArns=[elb_arn]) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + def wait_for_status(self, elb_arn): + """ + Wait for load balancer to reach 'active' status + + :param elb_arn: The load balancer ARN + :return: + """ + + if not self.wait: + return + + try: + waiter = get_waiter(self.connection, 'load_balancer_available') + waiter.wait(LoadBalancerArns=[elb_arn]) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + def wait_for_deletion(self, elb_arn): + """ + Wait for load balancer to reach 'active' status + + :param elb_arn: The load balancer ARN + :return: + """ + + if not self.wait: + return + + try: + waiter = get_waiter(self.connection, 'load_balancers_deleted') + waiter.wait(LoadBalancerArns=[elb_arn]) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + def get_elb_attributes(self): + """ + Get load balancer attributes + + :return: + """ + + try: + attr_list = AWSRetry.jittered_backoff()( + self.connection.describe_load_balancer_attributes + )(LoadBalancerArn=self.elb['LoadBalancerArn'])['Attributes'] + + elb_attributes = boto3_tag_list_to_ansible_dict(attr_list) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + # Replace '.' with '_' in attribute key names to make it more Ansibley + return dict((k.replace('.', '_'), v) for k, v in elb_attributes.items()) + + def get_elb_ip_address_type(self): + """ + Retrieve load balancer ip address type using describe_load_balancers + + :return: + """ + + return self.elb.get('IpAddressType', None) + + def update_elb_attributes(self): + """ + Update the elb_attributes parameter + :return: + """ + self.elb_attributes = self.get_elb_attributes() + + def get_elb_tags(self): + """ + Get load balancer tags + + :return: + """ + + try: + return AWSRetry.jittered_backoff()( + self.connection.describe_tags + )(ResourceArns=[self.elb['LoadBalancerArn']])['TagDescriptions'][0]['Tags'] + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + def delete_tags(self, tags_to_delete): + """ + Delete elb tags + + :return: + """ + + try: + AWSRetry.jittered_backoff()( + self.connection.remove_tags + )(ResourceArns=[self.elb['LoadBalancerArn']], TagKeys=tags_to_delete) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + self.changed = True + + def modify_tags(self): + """ + Modify elb tags + + :return: + """ + + try: + AWSRetry.jittered_backoff()( + self.connection.add_tags + )(ResourceArns=[self.elb['LoadBalancerArn']], Tags=self.tags) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + self.changed = True + + def delete(self): + """ + Delete elb + :return: + """ + + try: + AWSRetry.jittered_backoff()( + self.connection.delete_load_balancer + )(LoadBalancerArn=self.elb['LoadBalancerArn']) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + self.wait_for_deletion(self.elb['LoadBalancerArn']) + + self.changed = True + + def compare_subnets(self): + """ + Compare user subnets with current ELB subnets + + :return: bool True if they match otherwise False + """ + + subnet_mapping_id_list = [] + subnet_mappings = [] + + # Check if we're dealing with subnets or subnet_mappings + if self.subnets is not None: + # Convert subnets to subnet_mappings format for comparison + for subnet in self.subnets: + subnet_mappings.append({'SubnetId': subnet}) + + if self.subnet_mappings is not None: + # Use this directly since we're comparing as a mapping + subnet_mappings = self.subnet_mappings + + # Build a subnet_mapping style struture of what's currently + # on the load balancer + for subnet in self.elb['AvailabilityZones']: + this_mapping = {'SubnetId': subnet['SubnetId']} + for address in subnet.get('LoadBalancerAddresses', []): + if 'AllocationId' in address: + this_mapping['AllocationId'] = address['AllocationId'] + break + + subnet_mapping_id_list.append(this_mapping) + + return set(frozenset(mapping.items()) for mapping in subnet_mapping_id_list) == set(frozenset(mapping.items()) for mapping in subnet_mappings) + + def modify_subnets(self): + """ + Modify elb subnets to match module parameters + :return: + """ + + try: + AWSRetry.jittered_backoff()( + self.connection.set_subnets + )(LoadBalancerArn=self.elb['LoadBalancerArn'], Subnets=self.subnets) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + self.changed = True + + def update(self): + """ + Update the elb from AWS + :return: + """ + + self.elb = get_elb(self.connection, self.module, self.module.params.get("name")) + self.elb['tags'] = self.get_elb_tags() + + def modify_ip_address_type(self, ip_addr_type): + """ + Modify ELB ip address type + :return: + """ + if ip_addr_type is None: + return + if self.elb_ip_addr_type == ip_addr_type: + return + + try: + AWSRetry.jittered_backoff()( + self.connection.set_ip_address_type + )(LoadBalancerArn=self.elb['LoadBalancerArn'], IpAddressType=ip_addr_type) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + self.changed = True + self.wait_for_ip_type(self.elb['LoadBalancerArn'], ip_addr_type) + + def _elb_create_params(self): + # Required parameters + params = dict() + params['Name'] = self.name + params['Type'] = self.type + + # Other parameters + if self.elb_ip_addr_type is not None: + params['IpAddressType'] = self.elb_ip_addr_type + if self.subnets is not None: + params['Subnets'] = self.subnets + if self.subnet_mappings is not None: + params['SubnetMappings'] = self.subnet_mappings + if self.tags: + params['Tags'] = self.tags + # Scheme isn't supported for GatewayLBs, so we won't add it here, even though we don't + # support them yet. + + return params + + def create_elb(self): + """ + Create a load balancer + :return: + """ + + params = self._elb_create_params() + + try: + self.elb = AWSRetry.jittered_backoff()(self.connection.create_load_balancer)(**params)['LoadBalancers'][0] + self.changed = True + self.new_load_balancer = True + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + self.wait_for_status(self.elb['LoadBalancerArn']) + + +class ApplicationLoadBalancer(ElasticLoadBalancerV2): + + def __init__(self, connection, connection_ec2, module): + """ + + :param connection: boto3 connection + :param module: Ansible module + """ + super(ApplicationLoadBalancer, self).__init__(connection, module) + + self.connection_ec2 = connection_ec2 + + # Ansible module parameters specific to ALBs + self.type = 'application' + if module.params.get('security_groups') is not None: + try: + self.security_groups = AWSRetry.jittered_backoff()( + get_ec2_security_group_ids_from_names + )(module.params.get('security_groups'), self.connection_ec2, boto3=True) + except ValueError as e: + self.module.fail_json(msg=str(e), exception=traceback.format_exc()) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + else: + self.security_groups = module.params.get('security_groups') + self.access_logs_enabled = module.params.get("access_logs_enabled") + self.access_logs_s3_bucket = module.params.get("access_logs_s3_bucket") + self.access_logs_s3_prefix = module.params.get("access_logs_s3_prefix") + self.idle_timeout = module.params.get("idle_timeout") + self.http2 = module.params.get("http2") + self.http_desync_mitigation_mode = module.params.get("http_desync_mitigation_mode") + self.http_drop_invalid_header_fields = module.params.get("http_drop_invalid_header_fields") + self.http_x_amzn_tls_version_and_cipher_suite = module.params.get("http_x_amzn_tls_version_and_cipher_suite") + self.http_xff_client_port = module.params.get("http_xff_client_port") + self.waf_fail_open = module.params.get("waf_fail_open") + + if self.elb is not None and self.elb['Type'] != 'application': + self.module.fail_json(msg="The load balancer type you are trying to manage is not application. Try elb_network_lb module instead.") + + def _elb_create_params(self): + params = super()._elb_create_params() + + if self.security_groups is not None: + params['SecurityGroups'] = self.security_groups + params['Scheme'] = self.scheme + + return params + + def compare_elb_attributes(self): + """ + Compare user attributes with current ELB attributes + :return: bool True if they match otherwise False + """ + + update_attributes = [] + if self.access_logs_enabled is not None and str(self.access_logs_enabled).lower() != self.elb_attributes['access_logs_s3_enabled']: + update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': str(self.access_logs_enabled).lower()}) + if self.access_logs_s3_bucket is not None and self.access_logs_s3_bucket != self.elb_attributes['access_logs_s3_bucket']: + update_attributes.append({'Key': 'access_logs.s3.bucket', 'Value': self.access_logs_s3_bucket}) + if self.access_logs_s3_prefix is not None and self.access_logs_s3_prefix != self.elb_attributes['access_logs_s3_prefix']: + update_attributes.append({'Key': 'access_logs.s3.prefix', 'Value': self.access_logs_s3_prefix}) + if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']: + update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()}) + if self.idle_timeout is not None and str(self.idle_timeout) != self.elb_attributes['idle_timeout_timeout_seconds']: + update_attributes.append({'Key': 'idle_timeout.timeout_seconds', 'Value': str(self.idle_timeout)}) + if self.http2 is not None and str(self.http2).lower() != self.elb_attributes['routing_http2_enabled']: + update_attributes.append({'Key': 'routing.http2.enabled', 'Value': str(self.http2).lower()}) + if self.http_desync_mitigation_mode is not None and str(self.http_desync_mitigation_mode).lower() != \ + self.elb_attributes['routing_http_desync_mitigation_mode']: + update_attributes.append({'Key': 'routing.http.desync_mitigation_mode', 'Value': str(self.http_desync_mitigation_mode).lower()}) + if self.http_drop_invalid_header_fields is not None and str(self.http_drop_invalid_header_fields).lower() != \ + self.elb_attributes['routing_http_drop_invalid_header_fields_enabled']: + update_attributes.append({'Key': 'routing.http.drop_invalid_header_fields.enabled', 'Value': str(self.http_drop_invalid_header_fields).lower()}) + if self.http_x_amzn_tls_version_and_cipher_suite is not None and str(self.http_x_amzn_tls_version_and_cipher_suite).lower() != \ + self.elb_attributes['routing_http_x_amzn_tls_version_and_cipher_suite_enabled']: + update_attributes.append({'Key': 'routing.http.x_amzn_tls_version_and_cipher_suite.enabled', + 'Value': str(self.http_x_amzn_tls_version_and_cipher_suite).lower()}) + if self.http_xff_client_port is not None and str(self.http_xff_client_port).lower() != \ + self.elb_attributes['routing_http_xff_client_port_enabled']: + update_attributes.append({'Key': 'routing.http.xff_client_port.enabled', 'Value': str(self.http_xff_client_port).lower()}) + if self.waf_fail_open is not None and str(self.waf_fail_open).lower() != \ + self.elb_attributes['waf_fail_open_enabled']: + update_attributes.append({'Key': 'waf.fail_open.enabled', 'Value': str(self.waf_fail_open).lower()}) + + if update_attributes: + return False + else: + return True + + def modify_elb_attributes(self): + """ + Update Application ELB attributes if required + + :return: + """ + + update_attributes = [] + + if self.access_logs_enabled is not None and str(self.access_logs_enabled).lower() != self.elb_attributes['access_logs_s3_enabled']: + update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': str(self.access_logs_enabled).lower()}) + if self.access_logs_s3_bucket is not None and self.access_logs_s3_bucket != self.elb_attributes['access_logs_s3_bucket']: + update_attributes.append({'Key': 'access_logs.s3.bucket', 'Value': self.access_logs_s3_bucket}) + if self.access_logs_s3_prefix is not None and self.access_logs_s3_prefix != self.elb_attributes['access_logs_s3_prefix']: + update_attributes.append({'Key': 'access_logs.s3.prefix', 'Value': self.access_logs_s3_prefix}) + if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']: + update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()}) + if self.idle_timeout is not None and str(self.idle_timeout) != self.elb_attributes['idle_timeout_timeout_seconds']: + update_attributes.append({'Key': 'idle_timeout.timeout_seconds', 'Value': str(self.idle_timeout)}) + if self.http2 is not None and str(self.http2).lower() != self.elb_attributes['routing_http2_enabled']: + update_attributes.append({'Key': 'routing.http2.enabled', 'Value': str(self.http2).lower()}) + if self.http_desync_mitigation_mode is not None and str(self.http_desync_mitigation_mode).lower() != \ + self.elb_attributes['routing_http_desync_mitigation_mode']: + update_attributes.append({'Key': 'routing.http.desync_mitigation_mode', 'Value': str(self.http_desync_mitigation_mode).lower()}) + if self.http_drop_invalid_header_fields is not None and str(self.http_drop_invalid_header_fields).lower() != \ + self.elb_attributes['routing_http_drop_invalid_header_fields_enabled']: + update_attributes.append({'Key': 'routing.http.drop_invalid_header_fields.enabled', 'Value': str(self.http_drop_invalid_header_fields).lower()}) + if self.http_x_amzn_tls_version_and_cipher_suite is not None and str(self.http_x_amzn_tls_version_and_cipher_suite).lower() != \ + self.elb_attributes['routing_http_x_amzn_tls_version_and_cipher_suite_enabled']: + update_attributes.append({'Key': 'routing.http.x_amzn_tls_version_and_cipher_suite.enabled', + 'Value': str(self.http_x_amzn_tls_version_and_cipher_suite).lower()}) + if self.http_xff_client_port is not None and str(self.http_xff_client_port).lower() != \ + self.elb_attributes['routing_http_xff_client_port_enabled']: + update_attributes.append({'Key': 'routing.http.xff_client_port.enabled', 'Value': str(self.http_xff_client_port).lower()}) + if self.waf_fail_open is not None and str(self.waf_fail_open).lower() != \ + self.elb_attributes['waf_fail_open_enabled']: + update_attributes.append({'Key': 'waf.fail_open.enabled', 'Value': str(self.waf_fail_open).lower()}) + + if update_attributes: + try: + AWSRetry.jittered_backoff()( + self.connection.modify_load_balancer_attributes + )(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes) + self.changed = True + except (BotoCoreError, ClientError) as e: + # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state + if self.new_load_balancer: + AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn']) + self.module.fail_json_aws(e) + + def compare_security_groups(self): + """ + Compare user security groups with current ELB security groups + + :return: bool True if they match otherwise False + """ + + if set(self.elb['SecurityGroups']) != set(self.security_groups): + return False + else: + return True + + def modify_security_groups(self): + """ + Modify elb security groups to match module parameters + :return: + """ + + try: + AWSRetry.jittered_backoff()( + self.connection.set_security_groups + )(LoadBalancerArn=self.elb['LoadBalancerArn'], SecurityGroups=self.security_groups) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + self.changed = True + + +class NetworkLoadBalancer(ElasticLoadBalancerV2): + + def __init__(self, connection, connection_ec2, module): + + """ + + :param connection: boto3 connection + :param module: Ansible module + """ + super(NetworkLoadBalancer, self).__init__(connection, module) + + self.connection_ec2 = connection_ec2 + + # Ansible module parameters specific to NLBs + self.type = 'network' + self.cross_zone_load_balancing = module.params.get('cross_zone_load_balancing') + + if self.elb is not None and self.elb['Type'] != 'network': + self.module.fail_json(msg="The load balancer type you are trying to manage is not network. Try elb_application_lb module instead.") + + def _elb_create_params(self): + params = super()._elb_create_params() + + params['Scheme'] = self.scheme + + return params + + def modify_elb_attributes(self): + """ + Update Network ELB attributes if required + + :return: + """ + + update_attributes = [] + + if self.cross_zone_load_balancing is not None and str(self.cross_zone_load_balancing).lower() != \ + self.elb_attributes['load_balancing_cross_zone_enabled']: + update_attributes.append({'Key': 'load_balancing.cross_zone.enabled', 'Value': str(self.cross_zone_load_balancing).lower()}) + if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']: + update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()}) + + if update_attributes: + try: + AWSRetry.jittered_backoff()( + self.connection.modify_load_balancer_attributes + )(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes) + self.changed = True + except (BotoCoreError, ClientError) as e: + # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state + if self.new_load_balancer: + AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn']) + self.module.fail_json_aws(e) + + def modify_subnets(self): + """ + Modify elb subnets to match module parameters (unsupported for NLB) + :return: + """ + + self.module.fail_json(msg='Modifying subnets and elastic IPs is not supported for Network Load Balancer') + + +class ELBListeners(object): + + def __init__(self, connection, module, elb_arn): + + self.connection = connection + self.module = module + self.elb_arn = elb_arn + listeners = module.params.get("listeners") + if listeners is not None: + # Remove suboption argspec defaults of None from each listener + listeners = [dict((x, listener_dict[x]) for x in listener_dict if listener_dict[x] is not None) for listener_dict in listeners] + self.listeners = self._ensure_listeners_default_action_has_arn(listeners) + self.current_listeners = self._get_elb_listeners() + self.purge_listeners = module.params.get("purge_listeners") + self.changed = False + + def update(self): + """ + Update the listeners for the ELB + + :return: + """ + self.current_listeners = self._get_elb_listeners() + + def _get_elb_listeners(self): + """ + Get ELB listeners + + :return: + """ + + try: + listener_paginator = self.connection.get_paginator('describe_listeners') + return (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=self.elb_arn).build_full_result())['Listeners'] + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + def _ensure_listeners_default_action_has_arn(self, listeners): + """ + If a listener DefaultAction has been passed with a Target Group Name instead of ARN, lookup the ARN and + replace the name. + + :param listeners: a list of listener dicts + :return: the same list of dicts ensuring that each listener DefaultActions dict has TargetGroupArn key. If a TargetGroupName key exists, it is removed. + """ + + if not listeners: + listeners = [] + + fixed_listeners = [] + for listener in listeners: + fixed_actions = [] + for action in listener['DefaultActions']: + if 'TargetGroupName' in action: + action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection, + self.module, + action['TargetGroupName']) + del action['TargetGroupName'] + fixed_actions.append(action) + listener['DefaultActions'] = fixed_actions + fixed_listeners.append(listener) + + return fixed_listeners + + def compare_listeners(self): + """ + + :return: + """ + listeners_to_modify = [] + listeners_to_delete = [] + listeners_to_add = deepcopy(self.listeners) + + # Check each current listener port to see if it's been passed to the module + for current_listener in self.current_listeners: + current_listener_passed_to_module = False + for new_listener in self.listeners[:]: + new_listener['Port'] = int(new_listener['Port']) + if current_listener['Port'] == new_listener['Port']: + current_listener_passed_to_module = True + # Remove what we match so that what is left can be marked as 'to be added' + listeners_to_add.remove(new_listener) + modified_listener = self._compare_listener(current_listener, new_listener) + if modified_listener: + modified_listener['Port'] = current_listener['Port'] + modified_listener['ListenerArn'] = current_listener['ListenerArn'] + listeners_to_modify.append(modified_listener) + break + + # If the current listener was not matched against passed listeners and purge is True, mark for removal + if not current_listener_passed_to_module and self.purge_listeners: + listeners_to_delete.append(current_listener['ListenerArn']) + + return listeners_to_add, listeners_to_modify, listeners_to_delete + + def _compare_listener(self, current_listener, new_listener): + """ + Compare two listeners. + + :param current_listener: + :param new_listener: + :return: + """ + + modified_listener = {} + + # Port + if current_listener['Port'] != new_listener['Port']: + modified_listener['Port'] = new_listener['Port'] + + # Protocol + if current_listener['Protocol'] != new_listener['Protocol']: + modified_listener['Protocol'] = new_listener['Protocol'] + + # If Protocol is HTTPS, check additional attributes + if current_listener['Protocol'] == 'HTTPS' and new_listener['Protocol'] == 'HTTPS': + # Cert + if current_listener['SslPolicy'] != new_listener['SslPolicy']: + modified_listener['SslPolicy'] = new_listener['SslPolicy'] + if current_listener['Certificates'][0]['CertificateArn'] != new_listener['Certificates'][0]['CertificateArn']: + modified_listener['Certificates'] = [] + modified_listener['Certificates'].append({}) + modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn'] + elif current_listener['Protocol'] != 'HTTPS' and new_listener['Protocol'] == 'HTTPS': + modified_listener['SslPolicy'] = new_listener['SslPolicy'] + modified_listener['Certificates'] = [] + modified_listener['Certificates'].append({}) + modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn'] + + # Default action + + # If the lengths of the actions are the same, we'll have to verify that the + # contents of those actions are the same + if len(current_listener['DefaultActions']) == len(new_listener['DefaultActions']): + current_actions_sorted = _sort_actions(current_listener['DefaultActions']) + new_actions_sorted = _sort_actions(new_listener['DefaultActions']) + + new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted] + + if [_prune_ForwardConfig(i) for i in current_actions_sorted] != [_prune_ForwardConfig(i) for i in new_actions_sorted_no_secret]: + modified_listener['DefaultActions'] = new_listener['DefaultActions'] + # If the action lengths are different, then replace with the new actions + else: + modified_listener['DefaultActions'] = new_listener['DefaultActions'] + + if modified_listener: + return modified_listener + else: + return None + + +class ELBListener(object): + + def __init__(self, connection, module, listener, elb_arn): + """ + + :param connection: + :param module: + :param listener: + :param elb_arn: + """ + + self.connection = connection + self.module = module + self.listener = listener + self.elb_arn = elb_arn + + def add(self): + + try: + # Rules is not a valid parameter for create_listener + if 'Rules' in self.listener: + self.listener.pop('Rules') + AWSRetry.jittered_backoff()(self.connection.create_listener)(LoadBalancerArn=self.elb_arn, **self.listener) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + def modify(self): + + try: + # Rules is not a valid parameter for modify_listener + if 'Rules' in self.listener: + self.listener.pop('Rules') + AWSRetry.jittered_backoff()(self.connection.modify_listener)(**self.listener) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + def delete(self): + + try: + AWSRetry.jittered_backoff()(self.connection.delete_listener)(ListenerArn=self.listener) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + +class ELBListenerRules(object): + + def __init__(self, connection, module, elb_arn, listener_rules, listener_port): + + self.connection = connection + self.module = module + self.elb_arn = elb_arn + self.rules = self._ensure_rules_action_has_arn(listener_rules) + self.changed = False + + # Get listener based on port so we can use ARN + self.current_listener = get_elb_listener(connection, module, elb_arn, listener_port) + self.listener_arn = self.current_listener['ListenerArn'] + self.rules_to_add = deepcopy(self.rules) + self.rules_to_modify = [] + self.rules_to_delete = [] + + # If the listener exists (i.e. has an ARN) get rules for the listener + if 'ListenerArn' in self.current_listener: + self.current_rules = self._get_elb_listener_rules() + else: + self.current_rules = [] + + def _ensure_rules_action_has_arn(self, rules): + """ + If a rule Action has been passed with a Target Group Name instead of ARN, lookup the ARN and + replace the name. + + :param rules: a list of rule dicts + :return: the same list of dicts ensuring that each rule Actions dict has TargetGroupArn key. If a TargetGroupName key exists, it is removed. + """ + + fixed_rules = [] + for rule in rules: + fixed_actions = [] + for action in rule['Actions']: + if 'TargetGroupName' in action: + action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection, self.module, action['TargetGroupName']) + del action['TargetGroupName'] + fixed_actions.append(action) + rule['Actions'] = fixed_actions + fixed_rules.append(rule) + + return fixed_rules + + def _get_elb_listener_rules(self): + + try: + return AWSRetry.jittered_backoff()(self.connection.describe_rules)(ListenerArn=self.current_listener['ListenerArn'])['Rules'] + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + def _compare_condition(self, current_conditions, condition): + """ + + :param current_conditions: + :param condition: + :return: + """ + + condition_found = False + + for current_condition in current_conditions: + # host-header: current_condition includes both HostHeaderConfig AND Values while + # condition can be defined with either HostHeaderConfig OR Values. Only use + # HostHeaderConfig['Values'] comparison if both conditions includes HostHeaderConfig. + if current_condition.get('HostHeaderConfig') and condition.get('HostHeaderConfig'): + if (current_condition['Field'] == condition['Field'] and + sorted(current_condition['HostHeaderConfig']['Values']) == sorted(condition['HostHeaderConfig']['Values'])): + condition_found = True + break + elif current_condition.get('HttpHeaderConfig'): + if (current_condition['Field'] == condition['Field'] and + sorted(current_condition['HttpHeaderConfig']['Values']) == sorted(condition['HttpHeaderConfig']['Values']) and + current_condition['HttpHeaderConfig']['HttpHeaderName'] == condition['HttpHeaderConfig']['HttpHeaderName']): + condition_found = True + break + elif current_condition.get('HttpRequestMethodConfig'): + if (current_condition['Field'] == condition['Field'] and + sorted(current_condition['HttpRequestMethodConfig']['Values']) == sorted(condition['HttpRequestMethodConfig']['Values'])): + condition_found = True + break + # path-pattern: current_condition includes both PathPatternConfig AND Values while + # condition can be defined with either PathPatternConfig OR Values. Only use + # PathPatternConfig['Values'] comparison if both conditions includes PathPatternConfig. + elif current_condition.get('PathPatternConfig') and condition.get('PathPatternConfig'): + if (current_condition['Field'] == condition['Field'] and + sorted(current_condition['PathPatternConfig']['Values']) == sorted(condition['PathPatternConfig']['Values'])): + condition_found = True + break + elif current_condition.get('QueryStringConfig'): + # QueryString Values is not sorted as it is the only list of dicts (not strings). + if (current_condition['Field'] == condition['Field'] and + current_condition['QueryStringConfig']['Values'] == condition['QueryStringConfig']['Values']): + condition_found = True + break + elif current_condition.get('SourceIpConfig'): + if (current_condition['Field'] == condition['Field'] and + sorted(current_condition['SourceIpConfig']['Values']) == sorted(condition['SourceIpConfig']['Values'])): + condition_found = True + break + # Not all fields are required to have Values list nested within a *Config dict + # e.g. fields host-header/path-pattern can directly list Values + elif current_condition['Field'] == condition['Field'] and sorted(current_condition['Values']) == sorted(condition['Values']): + condition_found = True + break + + return condition_found + + def _compare_rule(self, current_rule, new_rule): + """ + + :return: + """ + + modified_rule = {} + + # Priority + if int(current_rule['Priority']) != int(new_rule['Priority']): + modified_rule['Priority'] = new_rule['Priority'] + + # Actions + + # If the lengths of the actions are the same, we'll have to verify that the + # contents of those actions are the same + if len(current_rule['Actions']) == len(new_rule['Actions']): + # if actions have just one element, compare the contents and then update if + # they're different + current_actions_sorted = _sort_actions(current_rule['Actions']) + new_actions_sorted = _sort_actions(new_rule['Actions']) + + new_current_actions_sorted = [_append_use_existing_client_secretn(i) for i in current_actions_sorted] + new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted] + + if [_prune_ForwardConfig(i) for i in new_current_actions_sorted] != [_prune_ForwardConfig(i) for i in new_actions_sorted_no_secret]: + modified_rule['Actions'] = new_rule['Actions'] + # If the action lengths are different, then replace with the new actions + else: + modified_rule['Actions'] = new_rule['Actions'] + + # Conditions + modified_conditions = [] + for condition in new_rule['Conditions']: + if not self._compare_condition(current_rule['Conditions'], condition): + modified_conditions.append(condition) + + if modified_conditions: + modified_rule['Conditions'] = modified_conditions + + return modified_rule + + def compare_rules(self): + """ + + :return: + """ + + rules_to_modify = [] + rules_to_delete = [] + rules_to_add = deepcopy(self.rules) + + for current_rule in self.current_rules: + current_rule_passed_to_module = False + for new_rule in self.rules[:]: + if current_rule['Priority'] == str(new_rule['Priority']): + current_rule_passed_to_module = True + # Remove what we match so that what is left can be marked as 'to be added' + rules_to_add.remove(new_rule) + modified_rule = self._compare_rule(current_rule, new_rule) + if modified_rule: + modified_rule['Priority'] = int(current_rule['Priority']) + modified_rule['RuleArn'] = current_rule['RuleArn'] + modified_rule['Actions'] = new_rule['Actions'] + modified_rule['Conditions'] = new_rule['Conditions'] + rules_to_modify.append(modified_rule) + break + + # If the current rule was not matched against passed rules, mark for removal + if not current_rule_passed_to_module and not current_rule['IsDefault']: + rules_to_delete.append(current_rule['RuleArn']) + + return rules_to_add, rules_to_modify, rules_to_delete + + +class ELBListenerRule(object): + + def __init__(self, connection, module, rule, listener_arn): + + self.connection = connection + self.module = module + self.rule = rule + self.listener_arn = listener_arn + self.changed = False + + def create(self): + """ + Create a listener rule + + :return: + """ + + try: + self.rule['ListenerArn'] = self.listener_arn + self.rule['Priority'] = int(self.rule['Priority']) + AWSRetry.jittered_backoff()(self.connection.create_rule)(**self.rule) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + self.changed = True + + def modify(self): + """ + Modify a listener rule + + :return: + """ + + try: + del self.rule['Priority'] + AWSRetry.jittered_backoff()(self.connection.modify_rule)(**self.rule) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + self.changed = True + + def delete(self): + """ + Delete a listener rule + + :return: + """ + + try: + AWSRetry.jittered_backoff()(self.connection.delete_rule)(RuleArn=self.rule['RuleArn']) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + self.changed = True diff --git a/ansible_collections/amazon/aws/plugins/module_utils/iam.py b/ansible_collections/amazon/aws/plugins/module_utils/iam.py new file mode 100644 index 000000000..6ebed23ba --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/iam.py @@ -0,0 +1,75 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +try: + import botocore +except ImportError: + pass + +from ansible.module_utils._text import to_native + +from .ec2 import AWSRetry +from .core import is_boto3_error_code +from .core import parse_aws_arn + + +def get_aws_account_id(module): + """ Given an AnsibleAWSModule instance, get the active AWS account ID + """ + + return get_aws_account_info(module)[0] + + +def get_aws_account_info(module): + """Given an AnsibleAWSModule instance, return the account information + (account id and partition) we are currently working on + + get_account_info tries too find out the account that we are working + on. It's not guaranteed that this will be easy so we try in + several different ways. Giving either IAM or STS privileges to + the account should be enough to permit this. + + Tries: + - sts:GetCallerIdentity + - iam:GetUser + - sts:DecodeAuthorizationMessage + """ + account_id = None + partition = None + try: + sts_client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff()) + caller_id = sts_client.get_caller_identity(aws_retry=True) + account_id = caller_id.get('Account') + partition = caller_id.get('Arn').split(':')[1] + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError): + try: + iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + _arn, partition, _service, _reg, account_id, _resource = iam_client.get_user(aws_retry=True)['User']['Arn'].split(':') + except is_boto3_error_code('AccessDenied') as e: + try: + except_msg = to_native(e.message) + except AttributeError: + except_msg = to_native(e) + result = parse_aws_arn(except_msg) + if result is None or result['service'] != 'iam': + module.fail_json_aws( + e, + msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions." + ) + account_id = result.get('account_id') + partition = result.get('partition') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions." + ) + + if account_id is None or partition is None: + module.fail_json( + msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions." + ) + + return (to_native(account_id), to_native(partition)) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/modules.py b/ansible_collections/amazon/aws/plugins/module_utils/modules.py new file mode 100644 index 000000000..7d4ba717f --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/modules.py @@ -0,0 +1,447 @@ +# +# Copyright 2017 Michael De La Rue | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +"""This module adds shared support for generic Amazon AWS modules + +In order to use this module, include it as part of a custom +module as shown below. + + from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean + mutually_exclusive=list1, required_together=list2) + +The 'AnsibleAWSModule' module provides similar, but more restricted, +interfaces to the normal Ansible module. It also includes the +additional methods for connecting to AWS using the standard module arguments + + m.resource('lambda') # - get an AWS connection as a boto3 resource. + +or + + m.client('sts') # - get an AWS connection as a boto3 client. + +To make use of AWSRetry easier, it can now be wrapped around any call from a +module-created client. To add retries to a client, create a client: + + m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + +Any calls from that client can be made to use the decorator passed at call-time +using the `aws_retry` argument. By default, no retries are used. + + ec2 = m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True) + +The call will be retried the specified number of times, so the calling functions +don't need to be wrapped in the backoff decorator. +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from functools import wraps +import logging +import os +import re +import traceback + + +try: + from cStringIO import StringIO +except ImportError: + # Python 3 + from io import StringIO + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import env_fallback +from ansible.module_utils.basic import missing_required_lib +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils._text import to_native + +from .botocore import HAS_BOTO3 +from .botocore import boto3_conn +from .botocore import get_aws_connection_info +from .botocore import get_aws_region +from .botocore import gather_sdk_versions + +from .version import LooseVersion + +# Currently only AnsibleAWSModule. However we have a lot of Copy and Paste code +# for Inventory and Lookup modules which we should refactor + + +class AnsibleAWSModule(object): + """An ansible module class for AWS modules + + AnsibleAWSModule provides an a class for building modules which + connect to Amazon Web Services. The interface is currently more + restricted than the basic module class with the aim that later the + basic module class can be reduced. If you find that any key + feature is missing please contact the author/Ansible AWS team + (available on #ansible-aws on IRC) to request the additional + features needed. + """ + default_settings = { + "default_args": True, + "check_boto3": True, + "auto_retry": True, + "module_class": AnsibleModule + } + + def __init__(self, **kwargs): + local_settings = {} + for key in AnsibleAWSModule.default_settings: + try: + local_settings[key] = kwargs.pop(key) + except KeyError: + local_settings[key] = AnsibleAWSModule.default_settings[key] + self.settings = local_settings + + if local_settings["default_args"]: + argument_spec_full = aws_argument_spec() + try: + argument_spec_full.update(kwargs["argument_spec"]) + except (TypeError, NameError): + pass + kwargs["argument_spec"] = argument_spec_full + + self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs) + + if local_settings["check_boto3"]: + if not HAS_BOTO3: + self._module.fail_json( + msg=missing_required_lib('botocore and boto3')) + if not self.botocore_at_least('1.21.0'): + self.warn('botocore < 1.21.0 is not supported or tested.' + ' Some features may not work.') + if not self.boto3_at_least("1.18.0"): + self.warn('boto3 < 1.18.0 is not supported or tested.' + ' Some features may not work.') + + deprecated_vars = {'EC2_REGION', 'EC2_SECURITY_TOKEN', 'EC2_SECRET_KEY', 'EC2_ACCESS_KEY', + 'EC2_URL', 'S3_URL'} + if deprecated_vars.intersection(set(os.environ.keys())): + self._module.deprecate( + "Support for the 'EC2_REGION', 'EC2_ACCESS_KEY', 'EC2_SECRET_KEY', " + "'EC2_SECURITY_TOKEN', 'EC2_URL', and 'S3_URL' environment " + "variables has been deprecated. " + "These variables are currently used for all AWS services which can " + "cause confusion. We recomend using the relevant module " + "parameters or alternatively the 'AWS_REGION', 'AWS_ACCESS_KEY_ID', " + "'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN', and 'AWS_URL' " + "environment variables can be used instead.", + date='2024-12-01', collection_name='amazon.aws', + ) + + if 'AWS_SECURITY_TOKEN' in os.environ.keys(): + self._module.deprecate( + "Support for the 'AWS_SECURITY_TOKEN' environment variable " + "has been deprecated. This variable was based on the original " + "boto SDK, support for which has now been dropped. " + "We recommend using the 'session_token' module parameter " + "or alternatively the 'AWS_SESSION_TOKEN' environment variable " + "can be used instead.", + date='2024-12-01', collection_name='amazon.aws', + ) + + self.check_mode = self._module.check_mode + self._diff = self._module._diff + self._name = self._module._name + + self._botocore_endpoint_log_stream = StringIO() + self.logger = None + if self.params.get('debug_botocore_endpoint_logs'): + self.logger = logging.getLogger('botocore.endpoint') + self.logger.setLevel(logging.DEBUG) + self.logger.addHandler(logging.StreamHandler(self._botocore_endpoint_log_stream)) + + @property + def params(self): + return self._module.params + + def _get_resource_action_list(self): + actions = [] + for ln in self._botocore_endpoint_log_stream.getvalue().split('\n'): + ln = ln.strip() + if not ln: + continue + found_operational_request = re.search(r"OperationModel\(name=.*?\)", ln) + if found_operational_request: + operation_request = found_operational_request.group(0)[20:-1] + resource = re.search(r"https://.*?\.", ln).group(0)[8:-1] + actions.append("{0}:{1}".format(resource, operation_request)) + return list(set(actions)) + + def exit_json(self, *args, **kwargs): + if self.params.get('debug_botocore_endpoint_logs'): + kwargs['resource_actions'] = self._get_resource_action_list() + return self._module.exit_json(*args, **kwargs) + + def fail_json(self, *args, **kwargs): + if self.params.get('debug_botocore_endpoint_logs'): + kwargs['resource_actions'] = self._get_resource_action_list() + return self._module.fail_json(*args, **kwargs) + + def debug(self, *args, **kwargs): + return self._module.debug(*args, **kwargs) + + def warn(self, *args, **kwargs): + return self._module.warn(*args, **kwargs) + + def deprecate(self, *args, **kwargs): + return self._module.deprecate(*args, **kwargs) + + def boolean(self, *args, **kwargs): + return self._module.boolean(*args, **kwargs) + + def md5(self, *args, **kwargs): + return self._module.md5(*args, **kwargs) + + def client(self, service, retry_decorator=None): + region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True) + conn = boto3_conn(self, conn_type='client', resource=service, + region=region, endpoint=endpoint_url, **aws_connect_kwargs) + return conn if retry_decorator is None else _RetryingBotoClientWrapper(conn, retry_decorator) + + def resource(self, service): + region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True) + return boto3_conn(self, conn_type='resource', resource=service, + region=region, endpoint=endpoint_url, **aws_connect_kwargs) + + @property + def region(self): + return get_aws_region(self, True) + + def fail_json_aws(self, exception, msg=None, **kwargs): + """call fail_json with processed exception + + function for converting exceptions thrown by AWS SDK modules, + botocore, boto3 and boto, into nice error messages. + """ + last_traceback = traceback.format_exc() + + # to_native is trusted to handle exceptions that str() could + # convert to text. + try: + except_msg = to_native(exception.message) + except AttributeError: + except_msg = to_native(exception) + + if msg is not None: + message = '{0}: {1}'.format(msg, except_msg) + else: + message = except_msg + + try: + response = exception.response + except AttributeError: + response = None + + failure = dict( + msg=message, + exception=last_traceback, + **self._gather_versions() + ) + + failure.update(kwargs) + + if response is not None: + failure.update(**camel_dict_to_snake_dict(response)) + + self.fail_json(**failure) + + def _gather_versions(self): + """Gather AWS SDK (boto3 and botocore) dependency versions + + Returns {'boto3_version': str, 'botocore_version': str} + Returns {} if either is not installed + """ + return gather_sdk_versions() + + def require_boto3_at_least(self, desired, **kwargs): + """Check if the available boto3 version is greater than or equal to a desired version. + + calls fail_json() when the boto3 version is less than the desired + version + + Usage: + module.require_boto3_at_least("1.2.3", reason="to update tags") + module.require_boto3_at_least("1.1.1") + + :param desired the minimum desired version + :param reason why the version is required (optional) + """ + if not self.boto3_at_least(desired): + self._module.fail_json( + msg=missing_required_lib('boto3>={0}'.format(desired), **kwargs), + **self._gather_versions() + ) + + def boto3_at_least(self, desired): + """Check if the available boto3 version is greater than or equal to a desired version. + + Usage: + if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'): + # conditionally fail on old boto3 versions if a specific feature is not supported + module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.") + """ + existing = self._gather_versions() + return LooseVersion(existing['boto3_version']) >= LooseVersion(desired) + + def require_botocore_at_least(self, desired, **kwargs): + """Check if the available botocore version is greater than or equal to a desired version. + + calls fail_json() when the botocore version is less than the desired + version + + Usage: + module.require_botocore_at_least("1.2.3", reason="to update tags") + module.require_botocore_at_least("1.1.1") + + :param desired the minimum desired version + :param reason why the version is required (optional) + """ + if not self.botocore_at_least(desired): + self._module.fail_json( + msg=missing_required_lib('botocore>={0}'.format(desired), **kwargs), + **self._gather_versions() + ) + + def botocore_at_least(self, desired): + """Check if the available botocore version is greater than or equal to a desired version. + + Usage: + if not module.botocore_at_least('1.2.3'): + module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3') + if not module.botocore_at_least('1.5.3'): + module.warn('Botocore did not include waiters for Service X before 1.5.3. ' + 'To wait until Service X resources are fully available, update botocore.') + """ + existing = self._gather_versions() + return LooseVersion(existing['botocore_version']) >= LooseVersion(desired) + + +class _RetryingBotoClientWrapper(object): + __never_wait = ( + 'get_paginator', 'can_paginate', + 'get_waiter', 'generate_presigned_url', + ) + + def __init__(self, client, retry): + self.client = client + self.retry = retry + + def _create_optional_retry_wrapper_function(self, unwrapped): + retrying_wrapper = self.retry(unwrapped) + + @wraps(unwrapped) + def deciding_wrapper(aws_retry=False, *args, **kwargs): + if aws_retry: + return retrying_wrapper(*args, **kwargs) + else: + return unwrapped(*args, **kwargs) + return deciding_wrapper + + def __getattr__(self, name): + unwrapped = getattr(self.client, name) + if name in self.__never_wait: + return unwrapped + elif callable(unwrapped): + wrapped = self._create_optional_retry_wrapper_function(unwrapped) + setattr(self, name, wrapped) + return wrapped + else: + return unwrapped + + +def _aws_common_argument_spec(): + """ + This does not include 'region' as some AWS APIs don't require a + region. However, it's not recommended to do this as it means module_defaults + can't include the region parameter. + """ + return dict( + access_key=dict( + aliases=['aws_access_key_id', 'aws_access_key', 'ec2_access_key'], + deprecated_aliases=[ + dict(name='ec2_access_key', date='2024-12-01', collection_name='amazon.aws'), + ], + no_log=False, + ), + secret_key=dict( + aliases=['aws_secret_access_key', 'aws_secret_key', 'ec2_secret_key'], + deprecated_aliases=[ + dict(name='ec2_secret_key', date='2024-12-01', collection_name='amazon.aws'), + ], + no_log=True, + ), + session_token=dict( + aliases=['aws_session_token', 'security_token', 'access_token', 'aws_security_token'], + deprecated_aliases=[ + dict(name='access_token', date='2024-12-01', collection_name='amazon.aws'), + dict(name='security_token', date='2024-12-01', collection_name='amazon.aws'), + dict(name='aws_security_token', date='2024-12-01', collection_name='amazon.aws'), + ], + no_log=True, + ), + profile=dict( + aliases=['aws_profile'], + ), + + endpoint_url=dict( + aliases=['aws_endpoint_url', 'ec2_url', 's3_url'], + deprecated_aliases=[ + dict(name='ec2_url', date='2024-12-01', collection_name='amazon.aws'), + dict(name='s3_url', date='2024-12-01', collection_name='amazon.aws'), + ], + fallback=(env_fallback, ['AWS_URL', 'EC2_URL', 'S3_URL']), + ), + validate_certs=dict( + type='bool', + default=True, + ), + aws_ca_bundle=dict( + type='path', + fallback=(env_fallback, ['AWS_CA_BUNDLE']), + ), + aws_config=dict( + type='dict', + ), + debug_botocore_endpoint_logs=dict( + type='bool', + default=False, + fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), + ), + ) + + +def aws_argument_spec(): + """ + Returns a dictionary containing the argument_spec common to all AWS modules. + """ + region_spec = dict( + region=dict( + aliases=['aws_region', 'ec2_region'], + deprecated_aliases=[ + dict(name='ec2_region', date='2024-12-01', collection_name='amazon.aws'), + ], + fallback=(env_fallback, ['AWS_REGION', 'AWS_DEFAULT_REGION', 'EC2_REGION']), + ), + ) + spec = _aws_common_argument_spec() + spec.update(region_spec) + return spec diff --git a/ansible_collections/amazon/aws/plugins/module_utils/policy.py b/ansible_collections/amazon/aws/plugins/module_utils/policy.py new file mode 100644 index 000000000..4aeabd5f2 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/policy.py @@ -0,0 +1,179 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from functools import cmp_to_key + +from ansible.module_utils._text import to_text +from ansible.module_utils.six import binary_type +from ansible.module_utils.six import string_types + + +def _hashable_policy(policy, policy_list): + """ + Takes a policy and returns a list, the contents of which are all hashable and sorted. + Example input policy: + {'Version': '2012-10-17', + 'Statement': [{'Action': 's3:PutObjectAcl', + 'Sid': 'AddCannedAcl2', + 'Resource': 'arn:aws:s3:::test_policy/*', + 'Effect': 'Allow', + 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']} + }]} + Returned value: + [('Statement', ((('Action', ('s3:PutObjectAcl',)), + ('Effect', ('Allow',)), + ('Principal', ('AWS', (('arn:aws:iam::XXXXXXXXXXXX:user/username1',), ('arn:aws:iam::XXXXXXXXXXXX:user/username2',)))), + ('Resource', ('arn:aws:s3:::test_policy/*',)), ('Sid', ('AddCannedAcl2',)))), + ('Version', ('2012-10-17',)))] + + """ + # Amazon will automatically convert bool and int to strings for us + if isinstance(policy, bool): + return tuple([str(policy).lower()]) + elif isinstance(policy, int): + return tuple([str(policy)]) + + if isinstance(policy, list): + for each in policy: + tupleified = _hashable_policy(each, []) + if isinstance(tupleified, list): + tupleified = tuple(tupleified) + policy_list.append(tupleified) + elif isinstance(policy, string_types) or isinstance(policy, binary_type): + policy = to_text(policy) + # convert root account ARNs to just account IDs + if policy.startswith('arn:aws:iam::') and policy.endswith(':root'): + policy = policy.split(':')[4] + return [policy] + elif isinstance(policy, dict): + sorted_keys = list(policy.keys()) + sorted_keys.sort() + for key in sorted_keys: + element = policy[key] + # Special case defined in + # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html + if key in ["NotPrincipal", "Principal"] and policy[key] == "*": + element = {"AWS": "*"} + tupleified = _hashable_policy(element, []) + if isinstance(tupleified, list): + tupleified = tuple(tupleified) + policy_list.append((key, tupleified)) + + # ensure we aren't returning deeply nested structures of length 1 + if len(policy_list) == 1 and isinstance(policy_list[0], tuple): + policy_list = policy_list[0] + if isinstance(policy_list, list): + policy_list.sort(key=cmp_to_key(_py3cmp)) + return policy_list + + +def _py3cmp(a, b): + """ Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3.""" + try: + if a > b: + return 1 + elif a < b: + return -1 + else: + return 0 + except TypeError as e: + # check to see if they're tuple-string + # always say strings are less than tuples (to maintain compatibility with python2) + str_ind = to_text(e).find('str') + tup_ind = to_text(e).find('tuple') + if -1 not in (str_ind, tup_ind): + if str_ind < tup_ind: + return -1 + elif tup_ind < str_ind: + return 1 + raise + + +def compare_policies(current_policy, new_policy, default_version="2008-10-17"): + """ Compares the existing policy and the updated policy + Returns True if there is a difference between policies. + """ + # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html + if default_version: + if isinstance(current_policy, dict): + current_policy = current_policy.copy() + current_policy.setdefault("Version", default_version) + if isinstance(new_policy, dict): + new_policy = new_policy.copy() + new_policy.setdefault("Version", default_version) + + return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, [])) + + +def sort_json_policy_dict(policy_dict): + + """ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but + different orders will return true + Args: + policy_dict (dict): Dict representing IAM JSON policy. + Basic Usage: + >>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]} + >>> sort_json_policy_dict(my_iam_policy) + Returns: + Dict: Will return a copy of the policy as a Dict but any List will be sorted + { + 'Principle': { + 'AWS': [ '7', '14', '31', '101' ] + } + } + """ + + def value_is_list(my_list): + + checked_list = [] + for item in my_list: + if isinstance(item, dict): + checked_list.append(sort_json_policy_dict(item)) + elif isinstance(item, list): + checked_list.append(value_is_list(item)) + else: + checked_list.append(item) + + # Sort list. If it's a list of dictionaries, sort by tuple of key-value + # pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries. + checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x) + return checked_list + + ordered_policy_dict = {} + for key, value in policy_dict.items(): + if isinstance(value, dict): + ordered_policy_dict[key] = sort_json_policy_dict(value) + elif isinstance(value, list): + ordered_policy_dict[key] = value_is_list(value) + else: + ordered_policy_dict[key] = value + + return ordered_policy_dict diff --git a/ansible_collections/amazon/aws/plugins/module_utils/rds.py b/ansible_collections/amazon/aws/plugins/module_utils/rds.py new file mode 100644 index 000000000..8b5bcb67c --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/rds.py @@ -0,0 +1,390 @@ +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from collections import namedtuple +from time import sleep + +try: + from botocore.exceptions import BotoCoreError, ClientError, WaiterError +except ImportError: + pass + +from ansible.module_utils._text import to_text +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from .ec2 import AWSRetry +from .ec2 import ansible_dict_to_boto3_tag_list +from .ec2 import boto3_tag_list_to_ansible_dict +from .ec2 import compare_aws_tags +from .waiters import get_waiter + +Boto3ClientMethod = namedtuple('Boto3ClientMethod', ['name', 'waiter', 'operation_description', 'resource', 'retry_codes']) +# Whitelist boto3 client methods for cluster and instance resources +cluster_method_names = [ + 'create_db_cluster', 'restore_db_cluster_from_snapshot', 'restore_db_cluster_from_s3', + 'restore_db_cluster_to_point_in_time', 'modify_db_cluster', 'delete_db_cluster', 'add_tags_to_resource', + 'remove_tags_from_resource', 'list_tags_for_resource', 'promote_read_replica_db_cluster' +] +instance_method_names = [ + 'create_db_instance', 'restore_db_instance_to_point_in_time', 'restore_db_instance_from_s3', + 'restore_db_instance_from_db_snapshot', 'create_db_instance_read_replica', 'modify_db_instance', + 'delete_db_instance', 'add_tags_to_resource', 'remove_tags_from_resource', 'list_tags_for_resource', + 'promote_read_replica', 'stop_db_instance', 'start_db_instance', 'reboot_db_instance', 'add_role_to_db_instance', + 'remove_role_from_db_instance' +] + +cluster_snapshot_method_names = [ + 'create_db_cluster_snapshot', 'delete_db_cluster_snapshot', 'add_tags_to_resource', 'remove_tags_from_resource', + 'list_tags_for_resource', 'copy_db_cluster_snapshot' +] + +instance_snapshot_method_names = [ + 'create_db_snapshot', 'delete_db_snapshot', 'add_tags_to_resource', 'remove_tags_from_resource', + 'copy_db_snapshot', 'list_tags_for_resource' +] + + +def get_rds_method_attribute(method_name, module): + ''' + Returns rds attributes of the specified method. + + Parameters: + method_name (str): RDS method to call + module: AnsibleAWSModule + + Returns: + Boto3ClientMethod (dict): + name (str): Name of method + waiter (str): Name of waiter associated with given method + operation_description (str): Description of method + resource (str): Type of resource this method applies to + One of ['instance', 'cluster', 'instance_snapshot', 'cluster_snapshot'] + retry_codes (list): List of extra error codes to retry on + + Raises: + NotImplementedError if wait is True but no waiter can be found for specified method + ''' + waiter = '' + readable_op = method_name.replace('_', ' ').replace('db', 'DB') + resource = '' + retry_codes = [] + if method_name in cluster_method_names and 'new_db_cluster_identifier' in module.params: + resource = 'cluster' + if method_name == 'delete_db_cluster': + waiter = 'cluster_deleted' + else: + waiter = 'cluster_available' + # Handle retry codes + if method_name == 'restore_db_cluster_from_snapshot': + retry_codes = ['InvalidDBClusterSnapshotState'] + else: + retry_codes = ['InvalidDBClusterState'] + elif method_name in instance_method_names and 'new_db_instance_identifier' in module.params: + resource = 'instance' + if method_name == 'delete_db_instance': + waiter = 'db_instance_deleted' + elif method_name == 'stop_db_instance': + waiter = 'db_instance_stopped' + elif method_name == 'add_role_to_db_instance': + waiter = 'role_associated' + elif method_name == 'remove_role_from_db_instance': + waiter = 'role_disassociated' + elif method_name == 'promote_read_replica': + waiter = 'read_replica_promoted' + else: + waiter = 'db_instance_available' + # Handle retry codes + if method_name == 'restore_db_instance_from_db_snapshot': + retry_codes = ['InvalidDBSnapshotState'] + else: + retry_codes = ['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + elif method_name in cluster_snapshot_method_names and 'db_cluster_snapshot_identifier' in module.params: + resource = 'cluster_snapshot' + if method_name == 'delete_db_cluster_snapshot': + waiter = 'db_cluster_snapshot_deleted' + retry_codes = ['InvalidDBClusterSnapshotState'] + elif method_name == 'create_db_cluster_snapshot': + waiter = 'db_cluster_snapshot_available' + retry_codes = ['InvalidDBClusterState'] + else: + # Tagging + waiter = 'db_cluster_snapshot_available' + retry_codes = ['InvalidDBClusterSnapshotState'] + elif method_name in instance_snapshot_method_names and 'db_snapshot_identifier' in module.params: + resource = 'instance_snapshot' + if method_name == 'delete_db_snapshot': + waiter = 'db_snapshot_deleted' + retry_codes = ['InvalidDBSnapshotState'] + elif method_name == 'create_db_snapshot': + waiter = 'db_snapshot_available' + retry_codes = ['InvalidDBInstanceState'] + else: + # Tagging + waiter = 'db_snapshot_available' + retry_codes = ['InvalidDBSnapshotState'] + else: + if module.params.get('wait'): + raise NotImplementedError("method {0} hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py".format(method_name)) + + return Boto3ClientMethod(name=method_name, waiter=waiter, operation_description=readable_op, + resource=resource, retry_codes=retry_codes) + + +def get_final_identifier(method_name, module): + updated_identifier = None + apply_immediately = module.params.get('apply_immediately') + resource = get_rds_method_attribute(method_name, module).resource + if resource == 'cluster': + identifier = module.params['db_cluster_identifier'] + updated_identifier = module.params['new_db_cluster_identifier'] + elif resource == 'instance': + identifier = module.params['db_instance_identifier'] + updated_identifier = module.params['new_db_instance_identifier'] + elif resource == 'instance_snapshot': + identifier = module.params['db_snapshot_identifier'] + elif resource == 'cluster_snapshot': + identifier = module.params['db_cluster_snapshot_identifier'] + else: + raise NotImplementedError("method {0} hasn't been added to the list of accepted methods in module_utils/rds.py".format(method_name)) + if not module.check_mode and updated_identifier and apply_immediately: + identifier = updated_identifier + return identifier + + +def handle_errors(module, exception, method_name, parameters): + + if not isinstance(exception, ClientError): + module.fail_json_aws(exception, msg="Unexpected failure for method {0} with parameters {1}".format(method_name, parameters)) + + changed = True + error_code = exception.response['Error']['Code'] + if ( + method_name in ('modify_db_instance', 'modify_db_cluster') and + error_code == 'InvalidParameterCombination' + ): + if 'No modifications were requested' in to_text(exception): + changed = False + elif 'ModifyDbCluster API' in to_text(exception): + module.fail_json_aws(exception, msg='It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster') + else: + module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description)) + elif method_name == 'promote_read_replica' and error_code == 'InvalidDBInstanceState': + if 'DB Instance is not a read replica' in to_text(exception): + changed = False + else: + module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description)) + elif method_name == 'promote_read_replica_db_cluster' and error_code == 'InvalidDBClusterStateFault': + if 'DB Cluster that is not a read replica' in to_text(exception): + changed = False + else: + module.fail_json_aws( + exception, + msg="Unable to {0}".format(get_rds_method_attribute(method_name, module).operation_description), + ) + elif method_name == "create_db_cluster" and error_code == "InvalidParameterValue": + accepted_engines = ["aurora", "aurora-mysql", "aurora-postgresql", "mysql", "postgres"] + if parameters.get("Engine") not in accepted_engines: + module.fail_json_aws( + exception, msg="DB engine {0} should be one of {1}".format(parameters.get("Engine"), accepted_engines) + ) + else: + module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description)) + else: + module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description)) + + return changed + + +def call_method(client, module, method_name, parameters): + result = {} + changed = True + if not module.check_mode: + wait = module.params.get('wait') + retry_codes = get_rds_method_attribute(method_name, module).retry_codes + method = getattr(client, method_name) + try: + result = AWSRetry.jittered_backoff(catch_extra_error_codes=retry_codes)(method)(**parameters) + except (BotoCoreError, ClientError) as e: + changed = handle_errors(module, e, method_name, parameters) + + if wait and changed: + identifier = get_final_identifier(method_name, module) + wait_for_status(client, module, identifier, method_name) + return result, changed + + +def wait_for_instance_status(client, module, db_instance_id, waiter_name): + def wait(client, db_instance_id, waiter_name): + try: + waiter = client.get_waiter(waiter_name) + except ValueError: + # using a waiter in module_utils/waiters.py + waiter = get_waiter(client, waiter_name) + waiter.wait(WaiterConfig={'Delay': 60, 'MaxAttempts': 60}, DBInstanceIdentifier=db_instance_id) + + waiter_expected_status = { + 'db_instance_deleted': 'deleted', + 'db_instance_stopped': 'stopped', + } + expected_status = waiter_expected_status.get(waiter_name, 'available') + for _wait_attempts in range(0, 10): + try: + wait(client, db_instance_id, waiter_name) + break + except WaiterError as e: + # Instance may be renamed and AWSRetry doesn't handle WaiterError + if e.last_response.get('Error', {}).get('Code') == 'DBInstanceNotFound': + sleep(10) + continue + module.fail_json_aws(e, msg='Error while waiting for DB instance {0} to be {1}'.format(db_instance_id, expected_status)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Unexpected error while waiting for DB instance {0} to be {1}'.format( + db_instance_id, expected_status) + ) + + +def wait_for_cluster_status(client, module, db_cluster_id, waiter_name): + try: + get_waiter(client, waiter_name).wait(DBClusterIdentifier=db_cluster_id) + except WaiterError as e: + if waiter_name == 'cluster_deleted': + msg = "Failed to wait for DB cluster {0} to be deleted".format(db_cluster_id) + else: + msg = "Failed to wait for DB cluster {0} to be available".format(db_cluster_id) + module.fail_json_aws(e, msg=msg) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_cluster_id)) + + +def wait_for_instance_snapshot_status(client, module, db_snapshot_id, waiter_name): + try: + client.get_waiter(waiter_name).wait(DBSnapshotIdentifier=db_snapshot_id) + except WaiterError as e: + if waiter_name == 'db_snapshot_deleted': + msg = "Failed to wait for DB snapshot {0} to be deleted".format(db_snapshot_id) + else: + msg = "Failed to wait for DB snapshot {0} to be available".format(db_snapshot_id) + module.fail_json_aws(e, msg=msg) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB snapshot {0}".format(db_snapshot_id)) + + +def wait_for_cluster_snapshot_status(client, module, db_snapshot_id, waiter_name): + try: + client.get_waiter(waiter_name).wait(DBClusterSnapshotIdentifier=db_snapshot_id) + except WaiterError as e: + if waiter_name == 'db_cluster_snapshot_deleted': + msg = "Failed to wait for DB cluster snapshot {0} to be deleted".format(db_snapshot_id) + else: + msg = "Failed to wait for DB cluster snapshot {0} to be available".format(db_snapshot_id) + module.fail_json_aws(e, msg=msg) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster snapshot {0}".format(db_snapshot_id)) + + +def wait_for_status(client, module, identifier, method_name): + rds_method_attributes = get_rds_method_attribute(method_name, module) + waiter_name = rds_method_attributes.waiter + resource = rds_method_attributes.resource + + if resource == 'cluster': + wait_for_cluster_status(client, module, identifier, waiter_name) + elif resource == 'instance': + wait_for_instance_status(client, module, identifier, waiter_name) + elif resource == 'instance_snapshot': + wait_for_instance_snapshot_status(client, module, identifier, waiter_name) + elif resource == 'cluster_snapshot': + wait_for_cluster_snapshot_status(client, module, identifier, waiter_name) + + +def get_tags(client, module, resource_arn): + try: + return boto3_tag_list_to_ansible_dict( + client.list_tags_for_resource(ResourceName=resource_arn)['TagList'] + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to describe tags") + + +def arg_spec_to_rds_params(options_dict): + tags = options_dict.pop('tags') + has_processor_features = False + if 'processor_features' in options_dict: + has_processor_features = True + processor_features = options_dict.pop('processor_features') + camel_options = snake_dict_to_camel_dict(options_dict, capitalize_first=True) + for key in list(camel_options.keys()): + for old, new in (('Db', 'DB'), ('Iam', 'IAM'), ('Az', 'AZ')): + if old in key: + camel_options[key.replace(old, new)] = camel_options.pop(key) + camel_options['Tags'] = tags + if has_processor_features: + camel_options['ProcessorFeatures'] = processor_features + return camel_options + + +def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): + if tags is None: + return False + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags) + changed = bool(tags_to_add or tags_to_remove) + if tags_to_add: + call_method( + client, module, method_name='add_tags_to_resource', + parameters={'ResourceName': resource_arn, 'Tags': ansible_dict_to_boto3_tag_list(tags_to_add)} + ) + if tags_to_remove: + call_method( + client, module, method_name='remove_tags_from_resource', + parameters={'ResourceName': resource_arn, 'TagKeys': tags_to_remove} + ) + return changed + + +def compare_iam_roles(existing_roles, target_roles, purge_roles): + ''' + Returns differences between target and existing IAM roles + + Parameters: + existing_roles (list): Existing IAM roles + target_roles (list): Target IAM roles + purge_roles (bool): Remove roles not in target_roles if True + + Returns: + roles_to_add (list): List of IAM roles to add + roles_to_delete (list): List of IAM roles to delete + ''' + existing_roles = [dict((k, v) for k, v in role.items() if k != 'status') for role in existing_roles] + roles_to_add = [role for role in target_roles if role not in existing_roles] + roles_to_remove = [role for role in existing_roles if role not in target_roles] if purge_roles else [] + return roles_to_add, roles_to_remove + + +def update_iam_roles(client, module, instance_id, roles_to_add, roles_to_remove): + ''' + Update a DB instance's associated IAM roles + + Parameters: + client: RDS client + module: AnsibleAWSModule + instance_id (str): DB's instance ID + roles_to_add (list): List of IAM roles to add + roles_to_delete (list): List of IAM roles to delete + + Returns: + changed (bool): True if changes were successfully made to DB instance's IAM roles; False if not + ''' + for role in roles_to_remove: + params = {'DBInstanceIdentifier': instance_id, + 'RoleArn': role['role_arn'], + 'FeatureName': role['feature_name']} + _result, changed = call_method(client, module, method_name='remove_role_from_db_instance', parameters=params) + for role in roles_to_add: + params = {'DBInstanceIdentifier': instance_id, + 'RoleArn': role['role_arn'], + 'FeatureName': role['feature_name']} + _result, changed = call_method(client, module, method_name='add_role_to_db_instance', parameters=params) + return changed diff --git a/ansible_collections/amazon/aws/plugins/module_utils/retries.py b/ansible_collections/amazon/aws/plugins/module_utils/retries.py new file mode 100644 index 000000000..1bd214b6b --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/retries.py @@ -0,0 +1,78 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +try: + from botocore.exceptions import ClientError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from .cloud import CloudRetry + + +def _botocore_exception_maybe(): + """ + Allow for boto3 not being installed when using these utils by wrapping + botocore.exceptions instead of assigning from it directly. + """ + if HAS_BOTO3: + return ClientError + return type(None) + + +class AWSRetry(CloudRetry): + base_class = _botocore_exception_maybe() + + @staticmethod + def status_code_from_exception(error): + return error.response['Error']['Code'] + + @staticmethod + def found(response_code, catch_extra_error_codes=None): + # This list of failures is based on this API Reference + # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html + # + # TooManyRequestsException comes from inside botocore when it + # does retrys, unfortunately however it does not try long + # enough to allow some services such as API Gateway to + # complete configuration. At the moment of writing there is a + # botocore/boto3 bug open to fix this. + # + # https://github.com/boto/boto3/issues/876 (and linked PRs etc) + retry_on = [ + 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable', + 'InternalFailure', 'InternalError', 'TooManyRequestsException', + 'Throttling' + ] + if catch_extra_error_codes: + retry_on.extend(catch_extra_error_codes) + + return response_code in retry_on diff --git a/ansible_collections/amazon/aws/plugins/module_utils/route53.py b/ansible_collections/amazon/aws/plugins/module_utils/route53.py new file mode 100644 index 000000000..3e2940a53 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/route53.py @@ -0,0 +1,64 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + + +def manage_tags(module, client, resource_type, resource_id, new_tags, purge_tags): + if new_tags is None: + return False + + old_tags = get_tags(module, client, resource_type, resource_id) + tags_to_set, tags_to_delete = compare_aws_tags(old_tags, new_tags, purge_tags=purge_tags) + + change_params = dict() + if tags_to_set: + change_params['AddTags'] = ansible_dict_to_boto3_tag_list(tags_to_set) + if tags_to_delete: + change_params['RemoveTagKeys'] = tags_to_delete + + if not change_params: + return False + + if module.check_mode: + return True + + try: + client.change_tags_for_resource( + ResourceType=resource_type, + ResourceId=resource_id, + **change_params + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg='Failed to update tags on {0}'.format(resource_type), + resource_id=resource_id, change_params=change_params) + return True + + +def get_tags(module, client, resource_type, resource_id): + try: + tagset = client.list_tags_for_resource( + ResourceType=resource_type, + ResourceId=resource_id, + ) + except is_boto3_error_code('NoSuchHealthCheck'): + return {} + except is_boto3_error_code('NoSuchHostedZone'): # pylint: disable=duplicate-except + return {} + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to fetch tags on {0}'.format(resource_type), + resource_id=resource_id) + + tags = boto3_tag_list_to_ansible_dict(tagset['ResourceTagSet']['Tags']) + return tags diff --git a/ansible_collections/amazon/aws/plugins/module_utils/s3.py b/ansible_collections/amazon/aws/plugins/module_utils/s3.py new file mode 100644 index 000000000..c13c91f25 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/s3.py @@ -0,0 +1,102 @@ +# Copyright (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # Handled by the calling module + +HAS_MD5 = True +try: + from hashlib import md5 +except ImportError: + try: + from md5 import md5 + except ImportError: + HAS_MD5 = False + + +import string + + +def calculate_etag(module, filename, etag, s3, bucket, obj, version=None): + if not HAS_MD5: + return None + + if '-' in etag: + # Multi-part ETag; a hash of the hashes of each part. + parts = int(etag[1:-1].split('-')[1]) + digests = [] + + s3_kwargs = dict( + Bucket=bucket, + Key=obj, + ) + if version: + s3_kwargs['VersionId'] = version + + with open(filename, 'rb') as f: + for part_num in range(1, parts + 1): + s3_kwargs['PartNumber'] = part_num + try: + head = s3.head_object(**s3_kwargs) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to get head object") + digests.append(md5(f.read(int(head['ContentLength'])))) + + digest_squared = md5(b''.join(m.digest() for m in digests)) + return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests)) + else: # Compute the MD5 sum normally + return '"{0}"'.format(module.md5(filename)) + + +def calculate_etag_content(module, content, etag, s3, bucket, obj, version=None): + if not HAS_MD5: + return None + + if '-' in etag: + # Multi-part ETag; a hash of the hashes of each part. + parts = int(etag[1:-1].split('-')[1]) + digests = [] + offset = 0 + + s3_kwargs = dict( + Bucket=bucket, + Key=obj, + ) + if version: + s3_kwargs['VersionId'] = version + + for part_num in range(1, parts + 1): + s3_kwargs['PartNumber'] = part_num + try: + head = s3.head_object(**s3_kwargs) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Failed to get head object") + length = int(head['ContentLength']) + digests.append(md5(content[offset:offset + length])) + offset += length + + digest_squared = md5(b''.join(m.digest() for m in digests)) + return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests)) + else: # Compute the MD5 sum normally + return '"{0}"'.format(md5(content).hexdigest()) + + +def validate_bucket_name(module, name): + # See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html + if len(name) < 3: + module.fail_json(msg='the length of an S3 bucket must be at least 3 characters') + if len(name) > 63: + module.fail_json(msg='the length of an S3 bucket cannot exceed 63 characters') + + legal_characters = string.ascii_lowercase + ".-" + string.digits + illegal_characters = [c for c in name if c not in legal_characters] + if illegal_characters: + module.fail_json(msg='invalid character(s) found in the bucket name') + if name[-1] not in string.ascii_lowercase + string.digits: + module.fail_json(msg='bucket names must begin and end with a letter or number') + return True diff --git a/ansible_collections/amazon/aws/plugins/module_utils/tagging.py b/ansible_collections/amazon/aws/plugins/module_utils/tagging.py new file mode 100644 index 000000000..1568e4887 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/tagging.py @@ -0,0 +1,181 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils._text import to_native +from ansible.module_utils._text import to_text +from ansible.module_utils.six import string_types + + +def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None): + + """ Convert a boto3 list of resource tags to a flat dict of key:value pairs + Args: + tags_list (list): List of dicts representing AWS tags. + tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key") + tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value") + Basic Usage: + >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}] + >>> boto3_tag_list_to_ansible_dict(tags_list) + [ + { + 'Key': 'MyTagKey', + 'Value': 'MyTagValue' + } + ] + Returns: + Dict: Dict of key:value pairs representing AWS tags + { + 'MyTagKey': 'MyTagValue', + } + """ + + if tag_name_key_name and tag_value_key_name: + tag_candidates = {tag_name_key_name: tag_value_key_name} + else: + tag_candidates = {'key': 'value', 'Key': 'Value'} + + # minio seems to return [{}] as an empty tags_list + if not tags_list or not any(tag for tag in tags_list): + return {} + for k, v in tag_candidates.items(): + if k in tags_list[0] and v in tags_list[0]: + return dict((tag[k], tag[v]) for tag in tags_list) + raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list))) + + +def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'): + + """ Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts + Args: + tags_dict (dict): Dict representing AWS resource tags. + tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key") + tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value") + Basic Usage: + >>> tags_dict = {'MyTagKey': 'MyTagValue'} + >>> ansible_dict_to_boto3_tag_list(tags_dict) + { + 'MyTagKey': 'MyTagValue' + } + Returns: + List: List of dicts containing tag keys and values + [ + { + 'Key': 'MyTagKey', + 'Value': 'MyTagValue' + } + ] + """ + + if not tags_dict: + return [] + + tags_list = [] + for k, v in tags_dict.items(): + tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)}) + + return tags_list + + +def boto3_tag_specifications(tags_dict, types=None): + """ Converts a list of resource types and a flat dictionary of key:value pairs representing AWS + resource tags to a TagSpecification object. + + https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TagSpecification.html + + Args: + tags_dict (dict): Dict representing AWS resource tags. + types (list) A list of resource types to be tagged. + Basic Usage: + >>> tags_dict = {'MyTagKey': 'MyTagValue'} + >>> boto3_tag_specifications(tags_dict, ['instance']) + [ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MyTagKey', + 'Value': 'MyTagValue' + } + ] + } + ] + Returns: + List: List of dictionaries representing an AWS Tag Specification + """ + if not tags_dict: + return None + specifications = list() + tag_list = ansible_dict_to_boto3_tag_list(tags_dict) + + if not types: + specifications.append(dict(Tags=tag_list)) + return specifications + + if isinstance(types, string_types): + types = [types] + + for type_name in types: + specifications.append(dict(ResourceType=type_name, Tags=tag_list)) + + return specifications + + +def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True): + """ + Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function. + Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ + these may not be able to be used out of the box. + + :param current_tags_dict: + :param new_tags_dict: + :param purge_tags: + :return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty + :return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty + """ + + tag_key_value_pairs_to_set = {} + tag_keys_to_unset = [] + + if purge_tags: + for key in current_tags_dict.keys(): + if key in new_tags_dict: + continue + # Amazon have reserved 'aws:*' tags, we should avoid purging them as + # this probably isn't what people want to do... + if key.startswith('aws:'): + continue + tag_keys_to_unset.append(key) + + for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset): + if to_text(new_tags_dict[key]) != current_tags_dict.get(key): + tag_key_value_pairs_to_set[key] = new_tags_dict[key] + + return tag_key_value_pairs_to_set, tag_keys_to_unset diff --git a/ansible_collections/amazon/aws/plugins/module_utils/tower.py b/ansible_collections/amazon/aws/plugins/module_utils/tower.py new file mode 100644 index 000000000..dd7d9738a --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/tower.py @@ -0,0 +1,83 @@ +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import string +import textwrap + +from ansible.module_utils._text import to_native +from ansible.module_utils.six.moves.urllib import parse as urlparse + + +def _windows_callback_script(passwd=None): + script_url = 'https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1' + if passwd is not None: + passwd = passwd.replace("'", "''") + script_tpl = """\ + + $admin = [adsi]('WinNT://./administrator, user') + $admin.PSBase.Invoke('SetPassword', '${PASS}') + Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('${SCRIPT}')) + + """ + else: + script_tpl = """\ + + $admin = [adsi]('WinNT://./administrator, user') + Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('${SCRIPT}')) + + """ + + tpl = string.Template(textwrap.dedent(script_tpl)) + return tpl.safe_substitute(PASS=passwd, SCRIPT=script_url) + + +def _linux_callback_script(tower_address, template_id, host_config_key): + template_id = urlparse.quote(template_id) + tower_address = urlparse.quote(tower_address) + host_config_key = host_config_key.replace("'", "'\"'\"'") + + script_tpl = """\ + #!/bin/bash + set -x + + retry_attempts=10 + attempt=0 + while [[ $attempt -lt $retry_attempts ]] + do + status_code=$(curl --max-time 10 -v -k -s -i \ + --data 'host_config_key=${host_config_key}' \ + 'https://${tower_address}/api/v2/job_templates/${template_id}/callback/' \ + | head -n 1 \ + | awk '{print $2}') + if [[ $status_code == 404 ]] + then + status_code=$(curl --max-time 10 -v -k -s -i \ + --data 'host_config_key=${host_config_key}' \ + 'https://${tower_address}/api/v1/job_templates/${template_id}/callback/' \ + | head -n 1 \ + | awk '{print $2}') + # fall back to using V1 API for Tower 3.1 and below, since v2 API will always 404 + fi + if [[ $status_code == 201 ]] + then + exit 0 + fi + attempt=$(( attempt + 1 )) + echo "$${status_code} received... retrying in 1 minute. (Attempt $${attempt})" + sleep 60 + done + exit 1 + """ + tpl = string.Template(textwrap.dedent(script_tpl)) + return tpl.safe_substitute(tower_address=tower_address, + template_id=template_id, + host_config_key=host_config_key) + + +def tower_callback_script(tower_address, job_template_id, host_config_key, windows, passwd): + if windows: + return to_native(_windows_callback_script(passwd=passwd)) + return _linux_callback_script(tower_address, job_template_id, host_config_key) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/transformation.py b/ansible_collections/amazon/aws/plugins/module_utils/transformation.py new file mode 100644 index 000000000..70d38cd8a --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/transformation.py @@ -0,0 +1,140 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils.six import string_types +from ansible.module_utils.six import integer_types + + +def ansible_dict_to_boto3_filter_list(filters_dict): + + """ Convert an Ansible dict of filters to list of dicts that boto3 can use + Args: + filters_dict (dict): Dict of AWS filters. + Basic Usage: + >>> filters = {'some-aws-id': 'i-01234567'} + >>> ansible_dict_to_boto3_filter_list(filters) + { + 'some-aws-id': 'i-01234567' + } + Returns: + List: List of AWS filters and their values + [ + { + 'Name': 'some-aws-id', + 'Values': [ + 'i-01234567', + ] + } + ] + """ + + filters_list = [] + for k, v in filters_dict.items(): + filter_dict = {'Name': k} + if isinstance(v, bool): + filter_dict['Values'] = [str(v).lower()] + elif isinstance(v, integer_types): + filter_dict['Values'] = [str(v)] + elif isinstance(v, string_types): + filter_dict['Values'] = [v] + else: + filter_dict['Values'] = v + + filters_list.append(filter_dict) + + return filters_list + + +def map_complex_type(complex_type, type_map): + """ + Allows to cast elements within a dictionary to a specific type + Example of usage: + + DEPLOYMENT_CONFIGURATION_TYPE_MAP = { + 'maximum_percent': 'int', + 'minimum_healthy_percent': 'int' + } + + deployment_configuration = map_complex_type(module.params['deployment_configuration'], + DEPLOYMENT_CONFIGURATION_TYPE_MAP) + + This ensures all keys within the root element are casted and valid integers + """ + + if complex_type is None: + return + new_type = type(complex_type)() + if isinstance(complex_type, dict): + for key in complex_type: + if key in type_map: + if isinstance(type_map[key], list): + new_type[key] = map_complex_type( + complex_type[key], + type_map[key][0]) + else: + new_type[key] = map_complex_type( + complex_type[key], + type_map[key]) + else: + new_type[key] = complex_type[key] + elif isinstance(complex_type, list): + for i in range(len(complex_type)): + new_type.append(map_complex_type( + complex_type[i], + type_map)) + elif type_map: + return globals()['__builtins__'][type_map](complex_type) + return new_type + + +def scrub_none_parameters(parameters, descend_into_lists=True): + """ + Iterate over a dictionary removing any keys that have a None value + + Reference: https://github.com/ansible-collections/community.aws/issues/251 + Credit: https://medium.com/better-programming/how-to-remove-null-none-values-from-a-dictionary-in-python-1bedf1aab5e4 + + :param descend_into_lists: whether or not to descend in to lists to continue to remove None values + :param parameters: parameter dict + :return: parameter dict with all keys = None removed + """ + + clean_parameters = {} + + for k, v in parameters.items(): + if isinstance(v, dict): + clean_parameters[k] = scrub_none_parameters(v, descend_into_lists=descend_into_lists) + elif descend_into_lists and isinstance(v, list): + clean_parameters[k] = [scrub_none_parameters(vv, descend_into_lists=descend_into_lists) if isinstance(vv, dict) else vv for vv in v] + elif v is not None: + clean_parameters[k] = v + + return clean_parameters diff --git a/ansible_collections/amazon/aws/plugins/module_utils/urls.py b/ansible_collections/amazon/aws/plugins/module_utils/urls.py new file mode 100644 index 000000000..8011a1be9 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/urls.py @@ -0,0 +1,238 @@ +# Copyright: (c) 2018, Aaron Haaf +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import datetime +import hashlib +import hmac +import operator + +try: + from boto3 import session +except ImportError: + pass + +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import open_url + +from .ec2 import HAS_BOTO3 +from .ec2 import get_aws_connection_info + +import ansible.module_utils.common.warnings as ansible_warnings + + +def hexdigest(s): + """ + Returns the sha256 hexdigest of a string after encoding. + """ + + ansible_warnings.deprecate( + 'amazon.aws.module_utils.urls.hexdigest is unused and has been deprecated.', + version='7.0.0', collection_name='amazon.aws') + + return hashlib.sha256(s.encode("utf-8")).hexdigest() + + +def format_querystring(params=None): + """ + Returns properly url-encoded query string from the provided params dict. + + It's specially sorted for cannonical requests + """ + + ansible_warnings.deprecate( + 'amazon.aws.module_utils.urls.format_querystring is unused and has been deprecated.', + version='7.0.0', collection_name='amazon.aws') + + if not params: + return "" + + # Query string values must be URL-encoded (space=%20). The parameters must be sorted by name. + return urlencode(sorted(params.items(), operator.itemgetter(0))) + + +# Key derivation functions. See: +# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python +def sign(key, msg): + ''' + Return digest for key applied to msg + ''' + + ansible_warnings.deprecate( + 'amazon.aws.module_utils.urls.sign is unused and has been deprecated.', + version='7.0.0', collection_name='amazon.aws') + + return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest() + + +def get_signature_key(key, dateStamp, regionName, serviceName): + ''' + Returns signature key for AWS resource + ''' + + ansible_warnings.deprecate( + 'amazon.aws.module_utils.urls.get_signature_key is unused and has been deprecated.', + version='7.0.0', collection_name='amazon.aws') + + kDate = sign(("AWS4" + key).encode("utf-8"), dateStamp) + kRegion = sign(kDate, regionName) + kService = sign(kRegion, serviceName) + kSigning = sign(kService, "aws4_request") + return kSigning + + +def get_aws_credentials_object(module): + ''' + Returns aws_access_key_id, aws_secret_access_key, session_token for a module. + ''' + + ansible_warnings.deprecate( + 'amazon.aws.module_utils.urls.get_aws_credentials_object is unused and has been deprecated.', + version='7.0.0', collection_name='amazon.aws') + + if not HAS_BOTO3: + module.fail_json("get_aws_credentials_object requires boto3") + + dummy, dummy, boto_params = get_aws_connection_info(module, boto3=True) + s = session.Session(**boto_params) + + return s.get_credentials() + + +# Reference: https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html +def signed_request( + module=None, + method="GET", service=None, host=None, uri=None, + query=None, body="", headers=None, + session_in_header=True, session_in_query=False +): + """Generate a SigV4 request to an AWS resource for a module + + This is used if you wish to authenticate with AWS credentials to a secure endpoint like an elastisearch domain. + + Returns :class:`HTTPResponse` object. + + Example: + result = signed_request( + module=this, + service="es", + host="search-recipes1-xxxxxxxxx.us-west-2.es.amazonaws.com", + ) + + :kwarg host: endpoint to talk to + :kwarg service: AWS id of service (like `ec2` or `es`) + :kwarg module: An AnsibleAWSModule to gather connection info from + + :kwarg body: (optional) Payload to send + :kwarg method: (optional) HTTP verb to use + :kwarg query: (optional) dict of query params to handle + :kwarg uri: (optional) Resource path without query parameters + + :kwarg session_in_header: (optional) Add the session token to the headers + :kwarg session_in_query: (optional) Add the session token to the query parameters + + :returns: HTTPResponse + """ + + module.deprecate( + 'amazon.aws.module_utils.urls.signed_request is unused and has been deprecated.', + version='7.0.0', collection_name='amazon.aws') + + if not HAS_BOTO3: + module.fail_json("A sigv4 signed_request requires boto3") + + # "Constants" + + t = datetime.datetime.utcnow() + amz_date = t.strftime("%Y%m%dT%H%M%SZ") + datestamp = t.strftime("%Y%m%d") # Date w/o time, used in credential scope + algorithm = "AWS4-HMAC-SHA256" + + # AWS stuff + + region, dummy, dummy = get_aws_connection_info(module, boto3=True) + credentials = get_aws_credentials_object(module) + access_key = credentials.access_key + secret_key = credentials.secret_key + session_token = credentials.token + + if not access_key: + module.fail_json(msg="aws_access_key_id is missing") + if not secret_key: + module.fail_json(msg="aws_secret_access_key is missing") + + credential_scope = "/".join([datestamp, region, service, "aws4_request"]) + + # Argument Defaults + + uri = uri or "/" + query_string = format_querystring(query) if query else "" + + headers = headers or dict() + query = query or dict() + + headers.update({ + "host": host, + "x-amz-date": amz_date, + }) + + # Handle adding of session_token if present + if session_token: + if session_in_header: + headers["X-Amz-Security-Token"] = session_token + if session_in_query: + query["X-Amz-Security-Token"] = session_token + + if method == "GET": + body = "" + + # Derived data + + body_hash = hexdigest(body) + signed_headers = ";".join(sorted(headers.keys())) + + # Setup Cannonical request to generate auth token + + cannonical_headers = "\n".join([ + key.lower().strip() + ":" + value for key, value in headers.items() + ]) + "\n" # Note additional trailing newline + + cannonical_request = "\n".join([ + method, + uri, + query_string, + cannonical_headers, + signed_headers, + body_hash, + ]) + + string_to_sign = "\n".join([algorithm, amz_date, credential_scope, hexdigest(cannonical_request)]) + + # Sign the Cannonical request + + signing_key = get_signature_key(secret_key, datestamp, region, service) + signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest() + + # Make auth header with that info + + authorization_header = "{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}".format( + algorithm, access_key, credential_scope, signed_headers, signature + ) + + # PERFORM THE REQUEST! + + url = "https://" + host + uri + + if query_string != "": + url = url + "?" + query_string + + final_headers = { + "x-amz-date": amz_date, + "Authorization": authorization_header, + } + + final_headers.update(headers) + + return open_url(url, method=method, data=body, headers=final_headers) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/version.py b/ansible_collections/amazon/aws/plugins/module_utils/version.py new file mode 100644 index 000000000..8f4ca3638 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/version.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Felix Fontein +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Provide version object to compare version numbers.""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +# Once we drop support for Ansible 2.9, ansible-base 2.10, and ansible-core 2.11, we can +# remove the _version.py file, and replace the following import by +# +# from ansible.module_utils.compat.version import LooseVersion + +from ._version import LooseVersion # pylint: disable=unused-import diff --git a/ansible_collections/amazon/aws/plugins/module_utils/waf.py b/ansible_collections/amazon/aws/plugins/module_utils/waf.py new file mode 100644 index 000000000..226dca920 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/waf.py @@ -0,0 +1,224 @@ +# Copyright (c) 2017 Will Thames +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +""" +This module adds shared support for Web Application Firewall modules +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +try: + import botocore +except ImportError: + pass # caught by imported HAS_BOTO3 + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from .ec2 import AWSRetry +from .waiters import get_waiter + + +MATCH_LOOKUP = { + 'byte': { + 'method': 'byte_match_set', + 'conditionset': 'ByteMatchSet', + 'conditiontuple': 'ByteMatchTuple', + 'type': 'ByteMatch' + }, + 'geo': { + 'method': 'geo_match_set', + 'conditionset': 'GeoMatchSet', + 'conditiontuple': 'GeoMatchConstraint', + 'type': 'GeoMatch' + }, + 'ip': { + 'method': 'ip_set', + 'conditionset': 'IPSet', + 'conditiontuple': 'IPSetDescriptor', + 'type': 'IPMatch' + }, + 'regex': { + 'method': 'regex_match_set', + 'conditionset': 'RegexMatchSet', + 'conditiontuple': 'RegexMatchTuple', + 'type': 'RegexMatch' + }, + 'size': { + 'method': 'size_constraint_set', + 'conditionset': 'SizeConstraintSet', + 'conditiontuple': 'SizeConstraint', + 'type': 'SizeConstraint' + }, + 'sql': { + 'method': 'sql_injection_match_set', + 'conditionset': 'SqlInjectionMatchSet', + 'conditiontuple': 'SqlInjectionMatchTuple', + 'type': 'SqlInjectionMatch', + }, + 'xss': { + 'method': 'xss_match_set', + 'conditionset': 'XssMatchSet', + 'conditiontuple': 'XssMatchTuple', + 'type': 'XssMatch' + }, +} + + +@AWSRetry.jittered_backoff(delay=5) +def get_rule_with_backoff(client, rule_id): + return client.get_rule(RuleId=rule_id)['Rule'] + + +@AWSRetry.jittered_backoff(delay=5) +def get_byte_match_set_with_backoff(client, byte_match_set_id): + return client.get_byte_match_set(ByteMatchSetId=byte_match_set_id)['ByteMatchSet'] + + +@AWSRetry.jittered_backoff(delay=5) +def get_ip_set_with_backoff(client, ip_set_id): + return client.get_ip_set(IPSetId=ip_set_id)['IPSet'] + + +@AWSRetry.jittered_backoff(delay=5) +def get_size_constraint_set_with_backoff(client, size_constraint_set_id): + return client.get_size_constraint_set(SizeConstraintSetId=size_constraint_set_id)['SizeConstraintSet'] + + +@AWSRetry.jittered_backoff(delay=5) +def get_sql_injection_match_set_with_backoff(client, sql_injection_match_set_id): + return client.get_sql_injection_match_set(SqlInjectionMatchSetId=sql_injection_match_set_id)['SqlInjectionMatchSet'] + + +@AWSRetry.jittered_backoff(delay=5) +def get_xss_match_set_with_backoff(client, xss_match_set_id): + return client.get_xss_match_set(XssMatchSetId=xss_match_set_id)['XssMatchSet'] + + +def get_rule(client, module, rule_id): + try: + rule = get_rule_with_backoff(client, rule_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't obtain waf rule") + + match_sets = { + 'ByteMatch': get_byte_match_set_with_backoff, + 'IPMatch': get_ip_set_with_backoff, + 'SizeConstraint': get_size_constraint_set_with_backoff, + 'SqlInjectionMatch': get_sql_injection_match_set_with_backoff, + 'XssMatch': get_xss_match_set_with_backoff + } + if 'Predicates' in rule: + for predicate in rule['Predicates']: + if predicate['Type'] in match_sets: + predicate.update(match_sets[predicate['Type']](client, predicate['DataId'])) + # replaced by Id from the relevant MatchSet + del predicate['DataId'] + return rule + + +@AWSRetry.jittered_backoff(delay=5) +def get_web_acl_with_backoff(client, web_acl_id): + return client.get_web_acl(WebACLId=web_acl_id)['WebACL'] + + +def get_web_acl(client, module, web_acl_id): + try: + web_acl = get_web_acl_with_backoff(client, web_acl_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't obtain web acl") + + if web_acl: + try: + for rule in web_acl['Rules']: + rule.update(get_rule(client, module, rule['RuleId'])) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't obtain web acl rule") + return camel_dict_to_snake_dict(web_acl) + + +@AWSRetry.jittered_backoff(delay=5) +def list_rules_with_backoff(client): + paginator = client.get_paginator('list_rules') + return paginator.paginate().build_full_result()['Rules'] + + +@AWSRetry.jittered_backoff(delay=5) +def list_regional_rules_with_backoff(client): + resp = client.list_rules() + rules = [] + while resp: + rules += resp['Rules'] + resp = client.list_rules(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None + return rules + + +@AWSRetry.jittered_backoff(delay=5) +def list_web_acls_with_backoff(client): + paginator = client.get_paginator('list_web_acls') + return paginator.paginate().build_full_result()['WebACLs'] + + +@AWSRetry.jittered_backoff(delay=5) +def list_regional_web_acls_with_backoff(client): + resp = client.list_web_acls() + acls = [] + while resp: + acls += resp['WebACLs'] + resp = client.list_web_acls(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None + return acls + + +def list_web_acls(client, module): + try: + if client.__class__.__name__ == 'WAF': + return list_web_acls_with_backoff(client) + elif client.__class__.__name__ == 'WAFRegional': + return list_regional_web_acls_with_backoff(client) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't obtain web acls") + + +def get_change_token(client, module): + try: + token = client.get_change_token() + return token['ChangeToken'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't obtain change token") + + +@AWSRetry.jittered_backoff(backoff=2, catch_extra_error_codes=['WAFStaleDataException']) +def run_func_with_change_token_backoff(client, module, params, func, wait=False): + params['ChangeToken'] = get_change_token(client, module) + result = func(**params) + if wait: + get_waiter( + client, 'change_token_in_sync', + ).wait( + ChangeToken=result['ChangeToken'] + ) + return result diff --git a/ansible_collections/amazon/aws/plugins/module_utils/waiters.py b/ansible_collections/amazon/aws/plugins/module_utils/waiters.py new file mode 100644 index 000000000..2abf390cb --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/waiters.py @@ -0,0 +1,1265 @@ +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy + +try: + import botocore.waiter as core_waiter +except ImportError: + pass # caught by HAS_BOTO3 + +from ansible_collections.amazon.aws.plugins.module_utils.modules import _RetryingBotoClientWrapper + + +ec2_data = { + "version": 2, + "waiters": { + "ImageAvailable": { + "operation": "DescribeImages", + "maxAttempts": 80, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "Images[].State", + "expected": "available" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "Images[].State", + "expected": "failed" + } + ] + }, + "InternetGatewayExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeInternetGateways", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(InternetGateways) > `0`", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidInternetGatewayID.NotFound", + "state": "retry" + }, + ] + }, + "InternetGatewayAttached": { + "operation": "DescribeInternetGateways", + "delay": 5, + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "InternetGateways[].Attachments[].State" + }, + { + "matcher": "error", + "expected": "InvalidInternetGatewayID.NotFound", + "state": "retry" + }, + ] + }, + "NetworkInterfaceAttached": { + "operation": "DescribeNetworkInterfaces", + "delay": 5, + "maxAttempts": 40, + "acceptors": [ + { + "expected": "attached", + "matcher": "pathAll", + "state": "success", + "argument": "NetworkInterfaces[].Attachment.Status" + }, + { + "expected": "InvalidNetworkInterfaceID.NotFound", + "matcher": "error", + "state": "failure" + }, + ] + }, + "NetworkInterfaceAvailable": { + "operation": "DescribeNetworkInterfaces", + "delay": 5, + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "NetworkInterfaces[].Status" + }, + { + "expected": "InvalidNetworkInterfaceID.NotFound", + "matcher": "error", + "state": "retry" + }, + ] + }, + "NetworkInterfaceDeleted": { + "operation": "DescribeNetworkInterfaces", + "delay": 5, + "maxAttempts": 40, + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(NetworkInterfaces[]) > `0`", + "state": "retry" + }, + { + "matcher": "path", + "expected": True, + "argument": "length(NetworkInterfaces[]) == `0`", + "state": "success" + }, + { + "expected": "InvalidNetworkInterfaceID.NotFound", + "matcher": "error", + "state": "success" + }, + ] + }, + "NetworkInterfaceDeleteOnTerminate": { + "operation": "DescribeNetworkInterfaces", + "delay": 5, + "maxAttempts": 10, + "acceptors": [ + { + "expected": True, + "matcher": "pathAll", + "state": "success", + "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination" + }, + { + "expected": "InvalidNetworkInterfaceID.NotFound", + "matcher": "error", + "state": "failure" + }, + ] + }, + "NetworkInterfaceNoDeleteOnTerminate": { + "operation": "DescribeNetworkInterfaces", + "delay": 5, + "maxAttempts": 10, + "acceptors": [ + { + "expected": False, + "matcher": "pathAll", + "state": "success", + "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination" + }, + { + "expected": "InvalidNetworkInterfaceID.NotFound", + "matcher": "error", + "state": "failure" + }, + ] + }, + "RouteTableExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeRouteTables", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(RouteTables[]) > `0`", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidRouteTableID.NotFound", + "state": "retry" + }, + ] + }, + "SecurityGroupExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeSecurityGroups", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(SecurityGroups[]) > `0`", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidGroup.NotFound", + "state": "retry" + }, + ] + }, + "SnapshotCompleted": { + "delay": 15, + "operation": "DescribeSnapshots", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "completed", + "matcher": "pathAll", + "state": "success", + "argument": "Snapshots[].State" + } + ] + }, + "SubnetAvailable": { + "delay": 15, + "operation": "DescribeSubnets", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Subnets[].State" + } + ] + }, + "SubnetExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeSubnets", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(Subnets[]) > `0`", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidSubnetID.NotFound", + "state": "retry" + }, + ] + }, + "SubnetHasMapPublic": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeSubnets", + "acceptors": [ + { + "matcher": "pathAll", + "expected": True, + "argument": "Subnets[].MapPublicIpOnLaunch", + "state": "success" + }, + ] + }, + "SubnetNoMapPublic": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeSubnets", + "acceptors": [ + { + "matcher": "pathAll", + "expected": False, + "argument": "Subnets[].MapPublicIpOnLaunch", + "state": "success" + }, + ] + }, + "SubnetHasAssignIpv6": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeSubnets", + "acceptors": [ + { + "matcher": "pathAll", + "expected": True, + "argument": "Subnets[].AssignIpv6AddressOnCreation", + "state": "success" + }, + ] + }, + "SubnetNoAssignIpv6": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeSubnets", + "acceptors": [ + { + "matcher": "pathAll", + "expected": False, + "argument": "Subnets[].AssignIpv6AddressOnCreation", + "state": "success" + }, + ] + }, + "SubnetDeleted": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeSubnets", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(Subnets[]) > `0`", + "state": "retry" + }, + { + "matcher": "error", + "expected": "InvalidSubnetID.NotFound", + "state": "success" + }, + ] + }, + "VpcAvailable": { + "delay": 15, + "operation": "DescribeVpcs", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Vpcs[].State" + } + ] + }, + "VpcExists": { + "operation": "DescribeVpcs", + "delay": 1, + "maxAttempts": 5, + "acceptors": [ + { + "matcher": "status", + "expected": 200, + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidVpcID.NotFound", + "state": "retry" + } + ] + }, + "VpcEndpointExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeVpcEndpoints", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(VpcEndpoints[]) > `0`", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidVpcEndpointId.NotFound", + "state": "retry" + }, + ] + }, + "VpnGatewayExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeVpnGateways", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(VpnGateways[]) > `0`", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidVpnGatewayID.NotFound", + "state": "retry" + }, + ] + }, + "VpnGatewayDetached": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeVpnGateways", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "VpnGateways[0].State == 'available'", + "state": "success" + }, + ] + }, + "NatGatewayDeleted": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeNatGateways", + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "expected": "deleted", + "argument": "NatGateways[].State" + }, + { + "state": "success", + "matcher": "error", + "expected": "NatGatewayNotFound" + } + ] + }, + "NatGatewayAvailable": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeNatGateways", + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "expected": "available", + "argument": "NatGateways[].State" + }, + { + "state": "retry", + "matcher": "error", + "expected": "NatGatewayNotFound" + } + ] + }, + } +} + + +waf_data = { + "version": 2, + "waiters": { + "ChangeTokenInSync": { + "delay": 20, + "maxAttempts": 60, + "operation": "GetChangeTokenStatus", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "ChangeTokenStatus == 'INSYNC'", + "state": "success" + }, + { + "matcher": "error", + "expected": "WAFInternalErrorException", + "state": "retry" + } + ] + } + } +} + +eks_data = { + "version": 2, + "waiters": { + "ClusterActive": { + "delay": 20, + "maxAttempts": 60, + "operation": "DescribeCluster", + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "cluster.status", + "expected": "ACTIVE" + }, + { + "state": "retry", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + }, + "ClusterDeleted": { + "delay": 20, + "maxAttempts": 60, + "operation": "DescribeCluster", + "acceptors": [ + { + "state": "retry", + "matcher": "path", + "argument": "cluster.status != 'DELETED'", + "expected": True + }, + { + "state": "success", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + }, + "FargateProfileActive": { + "delay": 20, + "maxAttempts": 30, + "operation": "DescribeFargateProfile", + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "fargateProfile.status", + "expected": "ACTIVE" + }, + { + "state": "retry", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + }, + "FargateProfileDeleted": { + "delay": 20, + "maxAttempts": 30, + "operation": "DescribeFargateProfile", + "acceptors": [ + { + "state": "retry", + "matcher": "path", + "argument": "fargateProfile.status == 'DELETING'", + "expected": True + }, + { + "state": "success", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + }, + "NodegroupActive": { + "delay": 20, + "maxAttempts": 60, + "operation": "DescribeNodegroup", + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "nodegroup.status", + "expected": "ACTIVE" + }, + { + "state": "retry", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + }, + "NodegroupDeleted": { + "delay": 20, + "maxAttempts": 60, + "operation": "DescribeNodegroup", + "acceptors": [ + { + "state": "retry", + "matcher": "path", + "argument": "nodegroup.status == 'DELETING'", + "expected": True + }, + { + "state": "success", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + } + } +} + + +elb_data = { + "version": 2, + "waiters": { + "AnyInstanceInService": { + "acceptors": [ + { + "argument": "InstanceStates[].State", + "expected": "InService", + "matcher": "pathAny", + "state": "success" + } + ], + "delay": 15, + "maxAttempts": 40, + "operation": "DescribeInstanceHealth" + }, + "InstanceDeregistered": { + "delay": 15, + "operation": "DescribeInstanceHealth", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "OutOfService", + "matcher": "pathAll", + "state": "success", + "argument": "InstanceStates[].State" + }, + { + "matcher": "error", + "expected": "InvalidInstance", + "state": "success" + } + ] + }, + "InstanceInService": { + "acceptors": [ + { + "argument": "InstanceStates[].State", + "expected": "InService", + "matcher": "pathAll", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidInstance", + "state": "retry" + } + ], + "delay": 15, + "maxAttempts": 40, + "operation": "DescribeInstanceHealth" + }, + "LoadBalancerCreated": { + "delay": 10, + "maxAttempts": 60, + "operation": "DescribeLoadBalancers", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(LoadBalancerDescriptions[]) > `0`", + "state": "success", + }, + { + "matcher": "error", + "expected": "LoadBalancerNotFound", + "state": "retry", + }, + ], + }, + "LoadBalancerDeleted": { + "delay": 10, + "maxAttempts": 60, + "operation": "DescribeLoadBalancers", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(LoadBalancerDescriptions[]) > `0`", + "state": "retry", + }, + { + "matcher": "error", + "expected": "LoadBalancerNotFound", + "state": "success", + }, + ], + }, + } +} + +elbv2_data = { + "version": 2, + "waiters": { + "LoadBalancerAvailable": { + "delay": 15, + "operation": "DescribeLoadBalancers", + "maxAttempts": 40, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "LoadBalancers[].State.Code", + "expected": "active" + }, + { + "state": "retry", + "matcher": "pathAny", + "argument": "LoadBalancers[].State.Code", + "expected": "provisioning" + }, + { + "state": "retry", + "matcher": "error", + "expected": "LoadBalancerNotFound" + } + ] + }, + "LoadBalancerIpAddressTypeIpv4": { + "delay": 15, + "operation": "DescribeLoadBalancers", + "maxAttempts": 40, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "LoadBalancers[].IpAddressType", + "expected": "ipv4" + }, + { + "state": "retry", + "matcher": "pathAny", + "argument": "LoadBalancers[].IpAddressType", + "expected": "dualstack" + }, + { + "state": "failure", + "matcher": "error", + "expected": "LoadBalancerNotFound" + } + ] + }, + "LoadBalancerIpAddressTypeDualStack": { + "delay": 15, + "operation": "DescribeLoadBalancers", + "maxAttempts": 40, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "LoadBalancers[].IpAddressType", + "expected": "dualstack" + }, + { + "state": "retry", + "matcher": "pathAny", + "argument": "LoadBalancers[].IpAddressType", + "expected": "ipv4" + }, + { + "state": "failure", + "matcher": "error", + "expected": "LoadBalancerNotFound" + } + ] + }, + "LoadBalancersDeleted": { + "delay": 15, + "operation": "DescribeLoadBalancers", + "maxAttempts": 40, + "acceptors": [ + { + "state": "retry", + "matcher": "pathAll", + "argument": "LoadBalancers[].State.Code", + "expected": "active" + }, + { + "matcher": "error", + "expected": "LoadBalancerNotFound", + "state": "success" + } + ] + }, + } +} + + +rds_data = { + "version": 2, + "waiters": { + "DBInstanceStopped": { + "delay": 20, + "maxAttempts": 60, + "operation": "DescribeDBInstances", + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "DBInstances[].DBInstanceStatus", + "expected": "stopped" + }, + ] + }, + "DBClusterAvailable": { + "delay": 20, + "maxAttempts": 60, + "operation": "DescribeDBClusters", + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "DBClusters[].Status", + "expected": "available" + }, + { + "state": "retry", + "matcher": "error", + "expected": "DBClusterNotFoundFault" + } + ] + }, + "DBClusterDeleted": { + "delay": 20, + "maxAttempts": 60, + "operation": "DescribeDBClusters", + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "DBClusters[].Status", + "expected": "stopped" + }, + { + "state": "success", + "matcher": "error", + "expected": "DBClusterNotFoundFault" + } + ] + }, + "ReadReplicaPromoted": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeDBInstances", + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "length(DBInstances[].StatusInfos) == `0`", + "expected": True + }, + { + "state": "retry", + "matcher": "pathAny", + "argument": "DBInstances[].StatusInfos[].Status", + "expected": "replicating" + } + ] + }, + "RoleAssociated": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeDBInstances", + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "DBInstances[].AssociatedRoles[].Status", + "expected": "ACTIVE" + }, + { + "state": "retry", + "matcher": "pathAny", + "argument": "DBInstances[].AssociatedRoles[].Status", + "expected": "PENDING" + } + ] + }, + "RoleDisassociated": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeDBInstances", + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "DBInstances[].AssociatedRoles[].Status", + "expected": "ACTIVE" + }, + { + "state": "retry", + "matcher": "pathAny", + "argument": "DBInstances[].AssociatedRoles[].Status", + "expected": "PENDING" + }, + { + "state": "success", + "matcher": "path", + "argument": "length(DBInstances[].AssociatedRoles[]) == `0`", + "expected": True + }, + ] + } + } +} + + +route53_data = { + "version": 2, + "waiters": { + "ResourceRecordSetsChanged": { + "delay": 30, + "maxAttempts": 60, + "operation": "GetChange", + "acceptors": [ + { + "matcher": "path", + "expected": "INSYNC", + "argument": "ChangeInfo.Status", + "state": "success" + } + ] + } + } +} + + +def _inject_limit_retries(model): + + extra_retries = [ + 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable', + 'InternalFailure', 'InternalError', 'TooManyRequestsException', + 'Throttling'] + + acceptors = [] + for error in extra_retries: + acceptors.append({"state": "success", "matcher": "error", "expected": error}) + + _model = copy.deepcopy(model) + + for waiter in model["waiters"]: + _model["waiters"][waiter]["acceptors"].extend(acceptors) + + return _model + + +def ec2_model(name): + ec2_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(ec2_data)) + return ec2_models.get_waiter(name) + + +def waf_model(name): + waf_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(waf_data)) + return waf_models.get_waiter(name) + + +def eks_model(name): + eks_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(eks_data)) + return eks_models.get_waiter(name) + + +def elbv2_model(name): + elbv2_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(elbv2_data)) + return elbv2_models.get_waiter(name) + + +def elb_model(name): + elb_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(elb_data)) + return elb_models.get_waiter(name) + + +def rds_model(name): + rds_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(rds_data)) + return rds_models.get_waiter(name) + + +def route53_model(name): + route53_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(route53_data)) + return route53_models.get_waiter(name) + + +waiters_by_name = { + ('EC2', 'image_available'): lambda ec2: core_waiter.Waiter( + 'image_available', + ec2_model('ImageAvailable'), + core_waiter.NormalizedOperationMethod( + ec2.describe_images + )), + ('EC2', 'internet_gateway_exists'): lambda ec2: core_waiter.Waiter( + 'internet_gateway_exists', + ec2_model('InternetGatewayExists'), + core_waiter.NormalizedOperationMethod( + ec2.describe_internet_gateways + )), + ('EC2', 'internet_gateway_attached'): lambda ec2: core_waiter.Waiter( + 'internet_gateway_attached', + ec2_model('InternetGatewayAttached'), + core_waiter.NormalizedOperationMethod( + ec2.describe_internet_gateways + )), + ('EC2', 'network_interface_attached'): lambda ec2: core_waiter.Waiter( + 'network_interface_attached', + ec2_model('NetworkInterfaceAttached'), + core_waiter.NormalizedOperationMethod( + ec2.describe_network_interfaces + )), + ('EC2', 'network_interface_deleted'): lambda ec2: core_waiter.Waiter( + 'network_interface_deleted', + ec2_model('NetworkInterfaceDeleted'), + core_waiter.NormalizedOperationMethod( + ec2.describe_network_interfaces + )), + ('EC2', 'network_interface_available'): lambda ec2: core_waiter.Waiter( + 'network_interface_available', + ec2_model('NetworkInterfaceAvailable'), + core_waiter.NormalizedOperationMethod( + ec2.describe_network_interfaces + )), + ('EC2', 'network_interface_delete_on_terminate'): lambda ec2: core_waiter.Waiter( + 'network_interface_delete_on_terminate', + ec2_model('NetworkInterfaceDeleteOnTerminate'), + core_waiter.NormalizedOperationMethod( + ec2.describe_network_interfaces + )), + ('EC2', 'network_interface_no_delete_on_terminate'): lambda ec2: core_waiter.Waiter( + 'network_interface_no_delete_on_terminate', + ec2_model('NetworkInterfaceNoDeleteOnTerminate'), + core_waiter.NormalizedOperationMethod( + ec2.describe_network_interfaces + )), + ('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter( + 'route_table_exists', + ec2_model('RouteTableExists'), + core_waiter.NormalizedOperationMethod( + ec2.describe_route_tables + )), + ('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter( + 'security_group_exists', + ec2_model('SecurityGroupExists'), + core_waiter.NormalizedOperationMethod( + ec2.describe_security_groups + )), + ('EC2', 'snapshot_completed'): lambda ec2: core_waiter.Waiter( + 'snapshot_completed', + ec2_model('SnapshotCompleted'), + core_waiter.NormalizedOperationMethod( + ec2.describe_snapshots + )), + ('EC2', 'subnet_available'): lambda ec2: core_waiter.Waiter( + 'subnet_available', + ec2_model('SubnetAvailable'), + core_waiter.NormalizedOperationMethod( + ec2.describe_subnets + )), + ('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter( + 'subnet_exists', + ec2_model('SubnetExists'), + core_waiter.NormalizedOperationMethod( + ec2.describe_subnets + )), + ('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter( + 'subnet_has_map_public', + ec2_model('SubnetHasMapPublic'), + core_waiter.NormalizedOperationMethod( + ec2.describe_subnets + )), + ('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter( + 'subnet_no_map_public', + ec2_model('SubnetNoMapPublic'), + core_waiter.NormalizedOperationMethod( + ec2.describe_subnets + )), + ('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter( + 'subnet_has_assign_ipv6', + ec2_model('SubnetHasAssignIpv6'), + core_waiter.NormalizedOperationMethod( + ec2.describe_subnets + )), + ('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter( + 'subnet_no_assign_ipv6', + ec2_model('SubnetNoAssignIpv6'), + core_waiter.NormalizedOperationMethod( + ec2.describe_subnets + )), + ('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter( + 'subnet_deleted', + ec2_model('SubnetDeleted'), + core_waiter.NormalizedOperationMethod( + ec2.describe_subnets + )), + ('EC2', 'vpc_available'): lambda ec2: core_waiter.Waiter( + 'vpc_available', + ec2_model('VpcAvailable'), + core_waiter.NormalizedOperationMethod( + ec2.describe_vpcs + )), + ('EC2', 'vpc_exists'): lambda ec2: core_waiter.Waiter( + 'vpc_exists', + ec2_model('VpcExists'), + core_waiter.NormalizedOperationMethod( + ec2.describe_vpcs + )), + ('EC2', 'vpc_endpoint_exists'): lambda ec2: core_waiter.Waiter( + 'vpc_endpoint_exists', + ec2_model('VpcEndpointExists'), + core_waiter.NormalizedOperationMethod( + ec2.describe_vpc_endpoints + )), + ('EC2', 'vpn_gateway_exists'): lambda ec2: core_waiter.Waiter( + 'vpn_gateway_exists', + ec2_model('VpnGatewayExists'), + core_waiter.NormalizedOperationMethod( + ec2.describe_vpn_gateways + )), + ('EC2', 'vpn_gateway_detached'): lambda ec2: core_waiter.Waiter( + 'vpn_gateway_detached', + ec2_model('VpnGatewayDetached'), + core_waiter.NormalizedOperationMethod( + ec2.describe_vpn_gateways + )), + ('EC2', 'nat_gateway_deleted'): lambda ec2: core_waiter.Waiter( + 'nat_gateway_deleted', + ec2_model('NatGatewayDeleted'), + core_waiter.NormalizedOperationMethod( + ec2.describe_nat_gateways + )), + ('EC2', 'nat_gateway_available'): lambda ec2: core_waiter.Waiter( + 'nat_gateway_available', + ec2_model('NatGatewayAvailable'), + core_waiter.NormalizedOperationMethod( + ec2.describe_nat_gateways + )), + ('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter( + 'change_token_in_sync', + waf_model('ChangeTokenInSync'), + core_waiter.NormalizedOperationMethod( + waf.get_change_token_status + )), + ('WAFRegional', 'change_token_in_sync'): lambda waf: core_waiter.Waiter( + 'change_token_in_sync', + waf_model('ChangeTokenInSync'), + core_waiter.NormalizedOperationMethod( + waf.get_change_token_status + )), + ('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter( + 'cluster_active', + eks_model('ClusterActive'), + core_waiter.NormalizedOperationMethod( + eks.describe_cluster + )), + ('EKS', 'cluster_deleted'): lambda eks: core_waiter.Waiter( + 'cluster_deleted', + eks_model('ClusterDeleted'), + core_waiter.NormalizedOperationMethod( + eks.describe_cluster + )), + ('EKS', 'fargate_profile_active'): lambda eks: core_waiter.Waiter( + 'fargate_profile_active', + eks_model('FargateProfileActive'), + core_waiter.NormalizedOperationMethod( + eks.describe_fargate_profile + )), + ('EKS', 'fargate_profile_deleted'): lambda eks: core_waiter.Waiter( + 'fargate_profile_deleted', + eks_model('FargateProfileDeleted'), + core_waiter.NormalizedOperationMethod( + eks.describe_fargate_profile + )), + ('EKS', 'nodegroup_active'): lambda eks: core_waiter.Waiter( + 'nodegroup_active', + eks_model('NodegroupActive'), + core_waiter.NormalizedOperationMethod( + eks.describe_nodegroup + )), + ('EKS', 'nodegroup_deleted'): lambda eks: core_waiter.Waiter( + 'nodegroup_deleted', + eks_model('NodegroupDeleted'), + core_waiter.NormalizedOperationMethod( + eks.describe_nodegroup + )), + ('ElasticLoadBalancing', 'any_instance_in_service'): lambda elb: core_waiter.Waiter( + 'any_instance_in_service', + elb_model('AnyInstanceInService'), + core_waiter.NormalizedOperationMethod( + elb.describe_instance_health + )), + ('ElasticLoadBalancing', 'instance_deregistered'): lambda elb: core_waiter.Waiter( + 'instance_deregistered', + elb_model('InstanceDeregistered'), + core_waiter.NormalizedOperationMethod( + elb.describe_instance_health + )), + ('ElasticLoadBalancing', 'instance_in_service'): lambda elb: core_waiter.Waiter( + 'load_balancer_created', + elb_model('InstanceInService'), + core_waiter.NormalizedOperationMethod( + elb.describe_instance_health + )), + ('ElasticLoadBalancing', 'load_balancer_created'): lambda elb: core_waiter.Waiter( + 'load_balancer_created', + elb_model('LoadBalancerCreated'), + core_waiter.NormalizedOperationMethod( + elb.describe_load_balancers + )), + ('ElasticLoadBalancing', 'load_balancer_deleted'): lambda elb: core_waiter.Waiter( + 'load_balancer_deleted', + elb_model('LoadBalancerDeleted'), + core_waiter.NormalizedOperationMethod( + elb.describe_load_balancers + )), + ('ElasticLoadBalancingv2', 'load_balancer_available'): lambda elbv2: core_waiter.Waiter( + 'load_balancer_available', + elbv2_model('LoadBalancerAvailable'), + core_waiter.NormalizedOperationMethod( + elbv2.describe_load_balancers + )), + ('ElasticLoadBalancingv2', 'load_balancer_ip_address_type_ipv4'): lambda elbv2: core_waiter.Waiter( + 'load_balancer_ip_address_type_ipv4', + elbv2_model('LoadBalancerIpAddressTypeIpv4'), + core_waiter.NormalizedOperationMethod( + elbv2.describe_load_balancers + )), + ('ElasticLoadBalancingv2', 'load_balancer_ip_address_type_dualstack'): lambda elbv2: core_waiter.Waiter( + 'load_balancers_ip_address_type_dualstack', + elbv2_model('LoadBalancerIpAddressTypeDualStack'), + core_waiter.NormalizedOperationMethod( + elbv2.describe_load_balancers + )), + ('ElasticLoadBalancingv2', 'load_balancers_deleted'): lambda elbv2: core_waiter.Waiter( + 'load_balancers_deleted', + elbv2_model('LoadBalancersDeleted'), + core_waiter.NormalizedOperationMethod( + elbv2.describe_load_balancers + )), + ('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter( + 'db_instance_stopped', + rds_model('DBInstanceStopped'), + core_waiter.NormalizedOperationMethod( + rds.describe_db_instances + )), + ('RDS', 'cluster_available'): lambda rds: core_waiter.Waiter( + 'cluster_available', + rds_model('DBClusterAvailable'), + core_waiter.NormalizedOperationMethod( + rds.describe_db_clusters + )), + ('RDS', 'cluster_deleted'): lambda rds: core_waiter.Waiter( + 'cluster_deleted', + rds_model('DBClusterDeleted'), + core_waiter.NormalizedOperationMethod( + rds.describe_db_clusters + )), + ('RDS', 'read_replica_promoted'): lambda rds: core_waiter.Waiter( + 'read_replica_promoted', + rds_model('ReadReplicaPromoted'), + core_waiter.NormalizedOperationMethod( + rds.describe_db_instances + )), + ('RDS', 'role_associated'): lambda rds: core_waiter.Waiter( + 'role_associated', + rds_model('RoleAssociated'), + core_waiter.NormalizedOperationMethod( + rds.describe_db_instances + )), + ('RDS', 'role_disassociated'): lambda rds: core_waiter.Waiter( + 'role_disassociated', + rds_model('RoleDisassociated'), + core_waiter.NormalizedOperationMethod( + rds.describe_db_instances + )), + ('Route53', 'resource_record_sets_changed'): lambda route53: core_waiter.Waiter( + 'resource_record_sets_changed', + route53_model('ResourceRecordSetsChanged'), + core_waiter.NormalizedOperationMethod( + route53.get_change + )), +} + + +def get_waiter(client, waiter_name): + if isinstance(client, _RetryingBotoClientWrapper): + return get_waiter(client.client, waiter_name) + try: + return waiters_by_name[(client.__class__.__name__, waiter_name)](client) + except KeyError: + raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format( + waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys()))) diff --git a/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py new file mode 100644 index 000000000..aefe46570 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py @@ -0,0 +1,1966 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: autoscaling_group +version_added: 5.0.0 +short_description: Create or delete AWS AutoScaling Groups (ASGs) +description: + - Can create or delete AWS AutoScaling Groups. + - Can be used with the M(community.aws.autoscaling_launch_config) module to manage Launch Configurations. + - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg). + The usage did not change. + - This module was originally added to C(community.aws) in release 1.0.0. +author: + - "Gareth Rushgrove (@garethr)" +options: + state: + description: + - Register or deregister the instance. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Unique name for group to be created or deleted. + required: true + type: str + load_balancers: + description: + - List of ELB names to use for the group. Use for classic load balancers. + type: list + elements: str + target_group_arns: + description: + - List of target group ARNs to use for the group. Use for application load balancers. + type: list + elements: str + availability_zones: + description: + - List of availability zone names in which to create the group. + - Defaults to all the availability zones in the region if I(vpc_zone_identifier) is not set. + type: list + elements: str + launch_config_name: + description: + - Name of the Launch configuration to use for the group. See the community.aws.autoscaling_launch_config) module for managing these. + - If unspecified then the current group value will be used. One of I(launch_config_name) or I(launch_template) must be provided. + type: str + launch_template: + description: + - Dictionary describing the Launch Template to use + suboptions: + version: + description: + - The version number of the launch template to use. + - Defaults to latest version if not provided. + type: str + launch_template_name: + description: + - The name of the launch template. Only one of I(launch_template_name) or I(launch_template_id) is required. + type: str + launch_template_id: + description: + - The id of the launch template. Only one of I(launch_template_name) or I(launch_template_id) is required. + type: str + type: dict + min_size: + description: + - Minimum number of instances in group, if unspecified then the current group value will be used. + type: int + max_size: + description: + - Maximum number of instances in group, if unspecified then the current group value will be used. + type: int + max_instance_lifetime: + description: + - The maximum amount of time, in seconds, that an instance can be in service. + - Maximum instance lifetime must be equal to 0, between 604800 and 31536000 seconds (inclusive), or not specified. + - Value of 0 removes lifetime restriction. + type: int + mixed_instances_policy: + description: + - A mixed instance policy to use for the ASG. + - Only used when the ASG is configured to use a Launch Template (I(launch_template)). + - 'See also U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-mixedinstancespolicy.html)' + required: false + suboptions: + instance_types: + description: + - A list of instance_types. + type: list + elements: str + required: false + instances_distribution: + description: + - >- + Specifies the distribution of On-Demand Instances and Spot Instances, the maximum price + to pay for Spot Instances, and how the Auto Scaling group allocates instance types + to fulfill On-Demand and Spot capacity. + - 'See also U(https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_InstancesDistribution.html)' + required: false + type: dict + version_added: 1.5.0 + version_added_collection: community.aws + suboptions: + on_demand_allocation_strategy: + description: + - Indicates how to allocate instance types to fulfill On-Demand capacity. + type: str + required: false + version_added: 1.5.0 + version_added_collection: community.aws + on_demand_base_capacity: + description: + - >- + The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand + Instances. This base portion is provisioned first as your group scales. + - >- + Default if not set is 0. If you leave it set to 0, On-Demand Instances are launched as a + percentage of the Auto Scaling group's desired capacity, per the OnDemandPercentageAboveBaseCapacity setting. + type: int + required: false + version_added: 1.5.0 + version_added_collection: community.aws + on_demand_percentage_above_base_capacity: + description: + - Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity. + - Default if not set is 100. If you leave it set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances. + - 'Valid range: 0 to 100' + type: int + required: false + version_added: 1.5.0 + version_added_collection: community.aws + spot_allocation_strategy: + description: + - Indicates how to allocate instances across Spot Instance pools. + type: str + required: false + version_added: 1.5.0 + version_added_collection: community.aws + spot_instance_pools: + description: + - >- + The number of Spot Instance pools across which to allocate your Spot Instances. The Spot pools are determined from + the different instance types in the Overrides array of LaunchTemplate. Default if not set is 2. + - Used only when the Spot allocation strategy is lowest-price. + - 'Valid Range: Minimum value of 1. Maximum value of 20.' + type: int + required: false + version_added: 1.5.0 + version_added_collection: community.aws + spot_max_price: + description: + - The maximum price per unit hour that you are willing to pay for a Spot Instance. + - If you leave the value of this parameter blank (which is the default), the maximum Spot price is set at the On-Demand price. + - To remove a value that you previously set, include the parameter but leave the value blank. + type: str + required: false + version_added: 1.5.0 + version_added_collection: community.aws + type: dict + placement_group: + description: + - Physical location of your cluster placement group created in Amazon EC2. + type: str + desired_capacity: + description: + - Desired number of instances in group, if unspecified then the current group value will be used. + type: int + replace_all_instances: + description: + - In a rolling fashion, replace all instances that used the old launch configuration with one from the new launch configuration. + It increases the ASG size by I(replace_batch_size), waits for the new instances to be up and running. + After that, it terminates a batch of old instances, waits for the replacements, and repeats, until all old instances are replaced. + Once that's done the ASG size is reduced back to the expected size. + default: false + type: bool + replace_batch_size: + description: + - Number of instances you'd like to replace at a time. Used with I(replace_all_instances). + required: false + default: 1 + type: int + replace_instances: + description: + - List of I(instance_ids) belonging to the named AutoScalingGroup that you would like to terminate and be replaced with instances + matching the current launch configuration. + type: list + elements: str + default: [] + detach_instances: + description: + - Removes one or more instances from the specified AutoScalingGroup. + - If I(decrement_desired_capacity) flag is not set, new instance(s) are launched to replace the detached instance(s). + - If a Classic Load Balancer is attached to the AutoScalingGroup, the instances are also deregistered from the load balancer. + - If there are target groups attached to the AutoScalingGroup, the instances are also deregistered from the target groups. + type: list + elements: str + default: [] + version_added: 3.2.0 + version_added_collection: community.aws + decrement_desired_capacity: + description: + - Indicates whether the AutoScalingGroup decrements the desired capacity value by the number of instances detached. + default: false + type: bool + version_added: 3.2.0 + version_added_collection: community.aws + lc_check: + description: + - Check to make sure instances that are being replaced with I(replace_instances) do not already have the current I(launch_config). + default: true + type: bool + lt_check: + description: + - Check to make sure instances that are being replaced with I(replace_instances) do not already have the current + I(launch_template or I(launch_template) I(version). + default: true + type: bool + vpc_zone_identifier: + description: + - List of VPC subnets to use + type: list + elements: str + tags: + description: + - A list of tags to add to the Auto Scale Group. + - Optional key is I(propagate_at_launch), which defaults to true. + - When I(propagate_at_launch) is true the tags will be propagated to the Instances created. + type: list + elements: dict + default: [] + purge_tags: + description: + - If C(true), existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. + - If the I(tags) parameter is not set then tags will not be modified. + default: false + type: bool + version_added: 3.2.0 + version_added_collection: community.aws + health_check_period: + description: + - Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. + required: false + default: 300 + type: int + health_check_type: + description: + - The service you want the health status from, Amazon EC2 or Elastic Load Balancer. + required: false + default: EC2 + choices: ['EC2', 'ELB'] + type: str + default_cooldown: + description: + - The number of seconds after a scaling activity completes before another can begin. + default: 300 + type: int + wait_timeout: + description: + - How long to wait for instances to become viable when replaced. If you experience the error "Waited too long for ELB instances to be healthy", + try increasing this value. + default: 300 + type: int + wait_for_instances: + description: + - Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all + instances have a lifecycle_state of "InService" and a health_status of "Healthy". + default: true + type: bool + termination_policies: + description: + - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity. + - Using I(termination_policies=Default) when modifying an existing AutoScalingGroup will result in the existing policy being retained + instead of changed to C(Default). + - 'Valid values include: C(Default), C(OldestInstance), C(NewestInstance), C(OldestLaunchConfiguration), C(ClosestToNextInstanceHour)' + - 'Full documentation of valid values can be found in the AWS documentation:' + - 'U(https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#custom-termination-policy)' + default: Default + type: list + elements: str + notification_topic: + description: + - A SNS topic ARN to send auto scaling notifications to. + type: str + notification_types: + description: + - A list of auto scaling events to trigger notifications on. + default: + - 'autoscaling:EC2_INSTANCE_LAUNCH' + - 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR' + - 'autoscaling:EC2_INSTANCE_TERMINATE' + - 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR' + required: false + type: list + elements: str + suspend_processes: + description: + - A list of scaling processes to suspend. + - 'Valid values include:' + - C(Launch), C(Terminate), C(HealthCheck), C(ReplaceUnhealthy), C(AZRebalance), C(AlarmNotification), C(ScheduledActions), C(AddToLoadBalancer) + - 'Full documentation of valid values can be found in the AWS documentation:' + - 'U(https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html)' + default: [] + type: list + elements: str + metrics_collection: + description: + - Enable ASG metrics collection. + type: bool + default: false + metrics_granularity: + description: + - When I(metrics_collection=true) this will determine the granularity of metrics collected by CloudWatch. + default: "1Minute" + type: str + metrics_list: + description: + - List of autoscaling metrics to collect when I(metrics_collection=true). + default: + - 'GroupMinSize' + - 'GroupMaxSize' + - 'GroupDesiredCapacity' + - 'GroupInServiceInstances' + - 'GroupPendingInstances' + - 'GroupStandbyInstances' + - 'GroupTerminatingInstances' + - 'GroupTotalInstances' + type: list + elements: str +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 +''' + +EXAMPLES = r''' +# Basic configuration with Launch Configuration + +- amazon.aws.autoscaling_group: + name: special + load_balancers: [ 'lb1', 'lb2' ] + availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] + launch_config_name: 'lc-1' + min_size: 1 + max_size: 10 + desired_capacity: 5 + vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] + tags: + - environment: production + propagate_at_launch: false + +# Rolling ASG Updates + +# Below is an example of how to assign a new launch config to an ASG and terminate old instances. +# +# All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in +# a rolling fashion with instances using the current launch configuration, "my_new_lc". +# +# This could also be considered a rolling deploy of a pre-baked AMI. +# +# If this is a newly created group, the instances will not be replaced since all instances +# will have the current launch configuration. + +- name: create launch config + community.aws.autoscaling_launch_config: + name: my_new_lc + image_id: ami-lkajsf + key_name: mykey + region: us-east-1 + security_groups: sg-23423 + instance_type: m1.small + assign_public_ip: true + +- amazon.aws.autoscaling_group: + name: myasg + launch_config_name: my_new_lc + health_check_period: 60 + health_check_type: ELB + replace_all_instances: true + min_size: 5 + max_size: 5 + desired_capacity: 5 + region: us-east-1 + +# To only replace a couple of instances instead of all of them, supply a list +# to "replace_instances": + +- amazon.aws.autoscaling_group: + name: myasg + launch_config_name: my_new_lc + health_check_period: 60 + health_check_type: ELB + replace_instances: + - i-b345231 + - i-24c2931 + min_size: 5 + max_size: 5 + desired_capacity: 5 + region: us-east-1 + +# Basic Configuration with Launch Template + +- amazon.aws.autoscaling_group: + name: special + load_balancers: [ 'lb1', 'lb2' ] + availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] + launch_template: + version: '1' + launch_template_name: 'lt-example' + launch_template_id: 'lt-123456' + min_size: 1 + max_size: 10 + desired_capacity: 5 + vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] + tags: + - environment: production + propagate_at_launch: false + +# Basic Configuration with Launch Template using mixed instance policy + +- amazon.aws.autoscaling_group: + name: special + load_balancers: [ 'lb1', 'lb2' ] + availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] + launch_template: + version: '1' + launch_template_name: 'lt-example' + launch_template_id: 'lt-123456' + mixed_instances_policy: + instance_types: + - t3a.large + - t3.large + - t2.large + instances_distribution: + on_demand_percentage_above_base_capacity: 0 + spot_allocation_strategy: capacity-optimized + min_size: 1 + max_size: 10 + desired_capacity: 5 + vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] + tags: + - environment: production + propagate_at_launch: false +''' + +RETURN = r''' +--- +auto_scaling_group_name: + description: The unique name of the auto scaling group + returned: success + type: str + sample: "myasg" +auto_scaling_group_arn: + description: The unique ARN of the autoscaling group + returned: success + type: str + sample: "arn:aws:autoscaling:us-east-1:123456789012:autoScalingGroup:6a09ad6d-eeee-1234-b987-ee123ced01ad:autoScalingGroupName/myasg" +availability_zones: + description: The availability zones for the auto scaling group + returned: success + type: list + sample: [ + "us-east-1d" + ] +created_time: + description: Timestamp of create time of the auto scaling group + returned: success + type: str + sample: "2017-11-08T14:41:48.272000+00:00" +default_cooldown: + description: The default cooldown time in seconds. + returned: success + type: int + sample: 300 +desired_capacity: + description: The number of EC2 instances that should be running in this group. + returned: success + type: int + sample: 3 +healthcheck_period: + description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. + returned: success + type: int + sample: 30 +healthcheck_type: + description: The service you want the health status from, one of "EC2" or "ELB". + returned: success + type: str + sample: "ELB" +healthy_instances: + description: Number of instances in a healthy state + returned: success + type: int + sample: 5 +in_service_instances: + description: Number of instances in service + returned: success + type: int + sample: 3 +instance_facts: + description: Dictionary of EC2 instances and their status as it relates to the ASG. + returned: success + type: dict + sample: { + "i-0123456789012": { + "health_status": "Healthy", + "launch_config_name": "public-webapp-production-1", + "lifecycle_state": "InService" + } + } +instances: + description: list of instance IDs in the ASG + returned: success + type: list + sample: [ + "i-0123456789012" + ] +launch_config_name: + description: > + Name of launch configuration associated with the ASG. Same as launch_configuration_name, + provided for compatibility with M(amazon.aws.autoscaling_group) module. + returned: success + type: str + sample: "public-webapp-production-1" +load_balancers: + description: List of load balancers names attached to the ASG. + returned: success + type: list + sample: ["elb-webapp-prod"] +max_instance_lifetime: + description: The maximum amount of time, in seconds, that an instance can be in service. + returned: success + type: int + sample: 604800 +max_size: + description: Maximum size of group + returned: success + type: int + sample: 3 +min_size: + description: Minimum size of group + returned: success + type: int + sample: 1 +mixed_instances_policy: + description: Returns the list of instance types if a mixed instances policy is set. + returned: success + type: list + sample: ["t3.micro", "t3a.micro"] +mixed_instances_policy_full: + description: Returns the full dictionary representation of the mixed instances policy if a mixed instances policy is set. + returned: success + type: dict + sample: { + "instances_distribution": { + "on_demand_allocation_strategy": "prioritized", + "on_demand_base_capacity": 0, + "on_demand_percentage_above_base_capacity": 0, + "spot_allocation_strategy": "capacity-optimized" + }, + "launch_template": { + "launch_template_specification": { + "launch_template_id": "lt-53c2425cffa544c23", + "launch_template_name": "random-LaunchTemplate", + "version": "2" + }, + "overrides": [ + { + "instance_type": "m5.xlarge" + }, + { + "instance_type": "m5a.xlarge" + }, + ] + } + } +pending_instances: + description: Number of instances in pending state + returned: success + type: int + sample: 1 +tags: + description: List of tags for the ASG, and whether or not each tag propagates to instances at launch. + returned: success + type: list + sample: [ + { + "key": "Name", + "value": "public-webapp-production-1", + "resource_id": "public-webapp-production-1", + "resource_type": "auto-scaling-group", + "propagate_at_launch": "true" + }, + { + "key": "env", + "value": "production", + "resource_id": "public-webapp-production-1", + "resource_type": "auto-scaling-group", + "propagate_at_launch": "true" + } + ] +target_group_arns: + description: List of ARNs of the target groups that the ASG populates + returned: success + type: list + sample: [ + "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b", + "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234" + ] +target_group_names: + description: List of names of the target groups that the ASG populates + returned: success + type: list + sample: [ + "target-group-host-hello", + "target-group-path-world" + ] +termination_policies: + description: A list of termination policies for the group. + returned: success + type: list + sample: ["Default"] +unhealthy_instances: + description: Number of instances in an unhealthy state + returned: success + type: int + sample: 0 +viable_instances: + description: Number of instances in a viable state + returned: success + type: int + sample: 1 +vpc_zone_identifier: + description: VPC zone ID / subnet id for the auto scaling group + returned: success + type: str + sample: "subnet-a31ef45f" +metrics_collection: + description: List of enabled AutosSalingGroup metrics + returned: success + type: list + sample: [ + { + "Granularity": "1Minute", + "Metric": "GroupInServiceInstances" + } + ] +''' + +import time + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict + +ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', + 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName', + 'LoadBalancerNames', 'MaxInstanceLifetime', 'MaxSize', 'MinSize', + 'AutoScalingGroupName', 'PlacementGroup', 'TerminationPolicies', + 'VPCZoneIdentifier') + +INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') + +backoff_params = dict(retries=10, delay=3, backoff=1.5) + + +@AWSRetry.jittered_backoff(**backoff_params) +def describe_autoscaling_groups(connection, group_name): + pg = connection.get_paginator('describe_auto_scaling_groups') + return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get('AutoScalingGroups', []) + + +@AWSRetry.jittered_backoff(**backoff_params) +def deregister_lb_instances(connection, lb_name, instance_id): + connection.deregister_instances_from_load_balancer(LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)]) + + +@AWSRetry.jittered_backoff(**backoff_params) +def describe_instance_health(connection, lb_name, instances): + params = dict(LoadBalancerName=lb_name) + if instances: + params.update(Instances=instances) + return connection.describe_instance_health(**params) + + +@AWSRetry.jittered_backoff(**backoff_params) +def describe_target_health(connection, target_group_arn, instances): + return connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=instances) + + +@AWSRetry.jittered_backoff(**backoff_params) +def suspend_asg_processes(connection, asg_name, processes): + connection.suspend_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes) + + +@AWSRetry.jittered_backoff(**backoff_params) +def resume_asg_processes(connection, asg_name, processes): + connection.resume_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes) + + +@AWSRetry.jittered_backoff(**backoff_params) +def describe_launch_configurations(connection, launch_config_name): + pg = connection.get_paginator('describe_launch_configurations') + return pg.paginate(LaunchConfigurationNames=[launch_config_name]).build_full_result() + + +@AWSRetry.jittered_backoff(**backoff_params) +def describe_launch_templates(connection, launch_template): + if launch_template['launch_template_id'] is not None: + try: + lt = connection.describe_launch_templates(LaunchTemplateIds=[launch_template['launch_template_id']]) + return lt + except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException'): + module.fail_json(msg="No launch template found matching: %s" % launch_template) + else: + try: + lt = connection.describe_launch_templates(LaunchTemplateNames=[launch_template['launch_template_name']]) + return lt + except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException'): + module.fail_json(msg="No launch template found matching: %s" % launch_template) + + +@AWSRetry.jittered_backoff(**backoff_params) +def create_asg(connection, **params): + connection.create_auto_scaling_group(**params) + + +@AWSRetry.jittered_backoff(**backoff_params) +def put_notification_config(connection, asg_name, topic_arn, notification_types): + connection.put_notification_configuration( + AutoScalingGroupName=asg_name, + TopicARN=topic_arn, + NotificationTypes=notification_types + ) + + +@AWSRetry.jittered_backoff(**backoff_params) +def del_notification_config(connection, asg_name, topic_arn): + connection.delete_notification_configuration( + AutoScalingGroupName=asg_name, + TopicARN=topic_arn + ) + + +@AWSRetry.jittered_backoff(**backoff_params) +def attach_load_balancers(connection, asg_name, load_balancers): + connection.attach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers) + + +@AWSRetry.jittered_backoff(**backoff_params) +def detach_load_balancers(connection, asg_name, load_balancers): + connection.detach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers) + + +@AWSRetry.jittered_backoff(**backoff_params) +def attach_lb_target_groups(connection, asg_name, target_group_arns): + connection.attach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns) + + +@AWSRetry.jittered_backoff(**backoff_params) +def detach_lb_target_groups(connection, asg_name, target_group_arns): + connection.detach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns) + + +@AWSRetry.jittered_backoff(**backoff_params) +def update_asg(connection, **params): + connection.update_auto_scaling_group(**params) + + +@AWSRetry.jittered_backoff(catch_extra_error_codes=['ScalingActivityInProgress'], **backoff_params) +def delete_asg(connection, asg_name, force_delete): + connection.delete_auto_scaling_group(AutoScalingGroupName=asg_name, ForceDelete=force_delete) + + +@AWSRetry.jittered_backoff(**backoff_params) +def terminate_asg_instance(connection, instance_id, decrement_capacity): + connection.terminate_instance_in_auto_scaling_group(InstanceId=instance_id, + ShouldDecrementDesiredCapacity=decrement_capacity) + + +@AWSRetry.jittered_backoff(**backoff_params) +def detach_asg_instances(connection, instance_ids, as_group_name, decrement_capacity): + connection.detach_instances(InstanceIds=instance_ids, AutoScalingGroupName=as_group_name, + ShouldDecrementDesiredCapacity=decrement_capacity) + + +def enforce_required_arguments_for_create(): + ''' As many arguments are not required for autoscale group deletion + they cannot be mandatory arguments for the module, so we enforce + them here ''' + missing_args = [] + if module.params.get('launch_config_name') is None and module.params.get('launch_template') is None: + module.fail_json(msg="Missing either launch_config_name or launch_template for autoscaling group create") + for arg in ('min_size', 'max_size'): + if module.params[arg] is None: + missing_args.append(arg) + if missing_args: + module.fail_json(msg="Missing required arguments for autoscaling group create: %s" % ",".join(missing_args)) + + +def get_properties(autoscaling_group): + properties = dict( + healthy_instances=0, + in_service_instances=0, + unhealthy_instances=0, + pending_instances=0, + viable_instances=0, + terminating_instances=0 + ) + instance_facts = dict() + autoscaling_group_instances = autoscaling_group.get('Instances') + + if autoscaling_group_instances: + properties['instances'] = [i['InstanceId'] for i in autoscaling_group_instances] + for i in autoscaling_group_instances: + instance_facts[i['InstanceId']] = { + 'health_status': i['HealthStatus'], + 'lifecycle_state': i['LifecycleState'] + } + if 'LaunchConfigurationName' in i: + instance_facts[i['InstanceId']]['launch_config_name'] = i['LaunchConfigurationName'] + elif 'LaunchTemplate' in i: + instance_facts[i['InstanceId']]['launch_template'] = i['LaunchTemplate'] + + if i['HealthStatus'] == 'Healthy' and i['LifecycleState'] == 'InService': + properties['viable_instances'] += 1 + + if i['HealthStatus'] == 'Healthy': + properties['healthy_instances'] += 1 + else: + properties['unhealthy_instances'] += 1 + + if i['LifecycleState'] == 'InService': + properties['in_service_instances'] += 1 + if i['LifecycleState'] == 'Terminating': + properties['terminating_instances'] += 1 + if i['LifecycleState'] == 'Pending': + properties['pending_instances'] += 1 + else: + properties['instances'] = [] + + properties['auto_scaling_group_name'] = autoscaling_group.get('AutoScalingGroupName') + properties['auto_scaling_group_arn'] = autoscaling_group.get('AutoScalingGroupARN') + properties['availability_zones'] = autoscaling_group.get('AvailabilityZones') + properties['created_time'] = autoscaling_group.get('CreatedTime') + properties['instance_facts'] = instance_facts + properties['load_balancers'] = autoscaling_group.get('LoadBalancerNames') + if 'LaunchConfigurationName' in autoscaling_group: + properties['launch_config_name'] = autoscaling_group.get('LaunchConfigurationName') + else: + properties['launch_template'] = autoscaling_group.get('LaunchTemplate') + properties['tags'] = autoscaling_group.get('Tags') + properties['max_instance_lifetime'] = autoscaling_group.get('MaxInstanceLifetime') + properties['min_size'] = autoscaling_group.get('MinSize') + properties['max_size'] = autoscaling_group.get('MaxSize') + properties['desired_capacity'] = autoscaling_group.get('DesiredCapacity') + properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown') + properties['healthcheck_grace_period'] = autoscaling_group.get('HealthCheckGracePeriod') + properties['healthcheck_type'] = autoscaling_group.get('HealthCheckType') + properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown') + properties['termination_policies'] = autoscaling_group.get('TerminationPolicies') + properties['target_group_arns'] = autoscaling_group.get('TargetGroupARNs') + properties['vpc_zone_identifier'] = autoscaling_group.get('VPCZoneIdentifier') + raw_mixed_instance_object = autoscaling_group.get('MixedInstancesPolicy') + if raw_mixed_instance_object: + properties['mixed_instances_policy_full'] = camel_dict_to_snake_dict(raw_mixed_instance_object) + properties['mixed_instances_policy'] = [x['InstanceType'] for x in raw_mixed_instance_object.get('LaunchTemplate').get('Overrides')] + + metrics = autoscaling_group.get('EnabledMetrics') + if metrics: + metrics.sort(key=lambda x: x["Metric"]) + properties['metrics_collection'] = metrics + + if properties["target_group_arns"]: + elbv2_connection = module.client("elbv2") + tg_paginator = elbv2_connection.get_paginator("describe_target_groups") + # Limit of 20 similar to https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html + tg_chunk_size = 20 + properties["target_group_names"] = [] + tg_chunks = [ + properties["target_group_arns"][i: i + tg_chunk_size] + for i in range(0, len(properties["target_group_arns"]), tg_chunk_size) + ] + for chunk in tg_chunks: + tg_result = tg_paginator.paginate(TargetGroupArns=chunk).build_full_result() + properties["target_group_names"].extend([tg["TargetGroupName"] for tg in tg_result["TargetGroups"]]) + else: + properties["target_group_names"] = [] + + return properties + + +def get_launch_object(connection, ec2_connection): + launch_object = dict() + launch_config_name = module.params.get('launch_config_name') + launch_template = module.params.get('launch_template') + mixed_instances_policy = module.params.get('mixed_instances_policy') + if launch_config_name is None and launch_template is None: + return launch_object + elif launch_config_name: + try: + launch_configs = describe_launch_configurations(connection, launch_config_name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe launch configurations") + if len(launch_configs['LaunchConfigurations']) == 0: + module.fail_json(msg="No launch config found with name %s" % launch_config_name) + launch_object = {"LaunchConfigurationName": launch_configs['LaunchConfigurations'][0]['LaunchConfigurationName']} + return launch_object + elif launch_template: + lt = describe_launch_templates(ec2_connection, launch_template)['LaunchTemplates'][0] + if launch_template['version'] is not None: + launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": launch_template['version']}} + else: + launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": str(lt['LatestVersionNumber'])}} + + if mixed_instances_policy: + instance_types = mixed_instances_policy.get('instance_types', []) + instances_distribution = mixed_instances_policy.get('instances_distribution', {}) + policy = { + 'LaunchTemplate': { + 'LaunchTemplateSpecification': launch_object['LaunchTemplate'] + } + } + if instance_types: + policy['LaunchTemplate']['Overrides'] = [] + for instance_type in instance_types: + instance_type_dict = {'InstanceType': instance_type} + policy['LaunchTemplate']['Overrides'].append(instance_type_dict) + if instances_distribution: + instances_distribution_params = scrub_none_parameters(instances_distribution) + policy['InstancesDistribution'] = snake_dict_to_camel_dict(instances_distribution_params, capitalize_first=True) + launch_object['MixedInstancesPolicy'] = policy + return launch_object + + +def elb_dreg(asg_connection, group_name, instance_id): + as_group = describe_autoscaling_groups(asg_connection, group_name)[0] + wait_timeout = module.params.get('wait_timeout') + count = 1 + if as_group['LoadBalancerNames'] and as_group['HealthCheckType'] == 'ELB': + elb_connection = module.client('elb') + else: + return + + for lb in as_group['LoadBalancerNames']: + deregister_lb_instances(elb_connection, lb, instance_id) + module.debug("De-registering %s from ELB %s" % (instance_id, lb)) + + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time() and count > 0: + count = 0 + for lb in as_group['LoadBalancerNames']: + lb_instances = describe_instance_health(elb_connection, lb, []) + for i in lb_instances['InstanceStates']: + if i['InstanceId'] == instance_id and i['State'] == "InService": + count += 1 + module.debug("%s: %s, %s" % (i['InstanceId'], i['State'], i['Description'])) + time.sleep(10) + + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="Waited too long for instance to deregister. {0}".format(time.asctime())) + + +def elb_healthy(asg_connection, elb_connection, group_name): + healthy_instances = set() + as_group = describe_autoscaling_groups(asg_connection, group_name)[0] + props = get_properties(as_group) + # get healthy, inservice instances from ASG + instances = [] + for instance, settings in props['instance_facts'].items(): + if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': + instances.append(dict(InstanceId=instance)) + module.debug("ASG considers the following instances InService and Healthy: %s" % instances) + module.debug("ELB instance status:") + lb_instances = list() + for lb in as_group.get('LoadBalancerNames'): + # we catch a race condition that sometimes happens if the instance exists in the ASG + # but has not yet show up in the ELB + try: + lb_instances = describe_instance_health(elb_connection, lb, instances) + except is_boto3_error_code('InvalidInstance'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get load balancer.") + + for i in lb_instances.get('InstanceStates'): + if i['State'] == "InService": + healthy_instances.add(i['InstanceId']) + module.debug("ELB Health State %s: %s" % (i['InstanceId'], i['State'])) + return len(healthy_instances) + + +def tg_healthy(asg_connection, elbv2_connection, group_name): + healthy_instances = set() + as_group = describe_autoscaling_groups(asg_connection, group_name)[0] + props = get_properties(as_group) + # get healthy, inservice instances from ASG + instances = [] + for instance, settings in props['instance_facts'].items(): + if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': + instances.append(dict(Id=instance)) + module.debug("ASG considers the following instances InService and Healthy: %s" % instances) + module.debug("Target Group instance status:") + tg_instances = list() + for tg in as_group.get('TargetGroupARNs'): + # we catch a race condition that sometimes happens if the instance exists in the ASG + # but has not yet show up in the ELB + try: + tg_instances = describe_target_health(elbv2_connection, tg, instances) + except is_boto3_error_code('InvalidInstance'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get target group.") + + for i in tg_instances.get('TargetHealthDescriptions'): + if i['TargetHealth']['State'] == "healthy": + healthy_instances.add(i['Target']['Id']) + module.debug("Target Group Health State %s: %s" % (i['Target']['Id'], i['TargetHealth']['State'])) + return len(healthy_instances) + + +def wait_for_elb(asg_connection, group_name): + wait_timeout = module.params.get('wait_timeout') + + # if the health_check_type is ELB, we want to query the ELBs directly for instance + # status as to avoid health_check_grace period that is awarded to ASG instances + as_group = describe_autoscaling_groups(asg_connection, group_name)[0] + + if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB': + module.debug("Waiting for ELB to consider instances healthy.") + elb_connection = module.client('elb') + + wait_timeout = time.time() + wait_timeout + healthy_instances = elb_healthy(asg_connection, elb_connection, group_name) + + while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time(): + healthy_instances = elb_healthy(asg_connection, elb_connection, group_name) + module.debug("ELB thinks %s instances are healthy." % healthy_instances) + time.sleep(10) + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) + module.debug("Waiting complete. ELB thinks %s instances are healthy." % healthy_instances) + + +def wait_for_target_group(asg_connection, group_name): + wait_timeout = module.params.get('wait_timeout') + + # if the health_check_type is ELB, we want to query the ELBs directly for instance + # status as to avoid health_check_grace period that is awarded to ASG instances + as_group = describe_autoscaling_groups(asg_connection, group_name)[0] + + if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB': + module.debug("Waiting for Target Group to consider instances healthy.") + elbv2_connection = module.client('elbv2') + + wait_timeout = time.time() + wait_timeout + healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name) + + while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time(): + healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name) + module.debug("Target Group thinks %s instances are healthy." % healthy_instances) + time.sleep(10) + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) + module.debug("Waiting complete. Target Group thinks %s instances are healthy." % healthy_instances) + + +def suspend_processes(ec2_connection, as_group): + suspend_processes = set(module.params.get('suspend_processes')) + + try: + suspended_processes = set([p['ProcessName'] for p in as_group['SuspendedProcesses']]) + except AttributeError: + # New ASG being created, no suspended_processes defined yet + suspended_processes = set() + + if suspend_processes == suspended_processes: + return False + + resume_processes = list(suspended_processes - suspend_processes) + if resume_processes: + resume_asg_processes(ec2_connection, module.params.get('name'), resume_processes) + + if suspend_processes: + suspend_asg_processes(ec2_connection, module.params.get('name'), list(suspend_processes)) + + return True + + +def create_autoscaling_group(connection): + group_name = module.params.get('name') + load_balancers = module.params['load_balancers'] + target_group_arns = module.params['target_group_arns'] + availability_zones = module.params['availability_zones'] + launch_config_name = module.params.get('launch_config_name') + launch_template = module.params.get('launch_template') + mixed_instances_policy = module.params.get('mixed_instances_policy') + min_size = module.params['min_size'] + max_size = module.params['max_size'] + max_instance_lifetime = module.params.get('max_instance_lifetime') + placement_group = module.params.get('placement_group') + desired_capacity = module.params.get('desired_capacity') + vpc_zone_identifier = module.params.get('vpc_zone_identifier') + set_tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') + health_check_period = module.params.get('health_check_period') + health_check_type = module.params.get('health_check_type') + default_cooldown = module.params.get('default_cooldown') + wait_for_instances = module.params.get('wait_for_instances') + wait_timeout = module.params.get('wait_timeout') + termination_policies = module.params.get('termination_policies') + notification_topic = module.params.get('notification_topic') + notification_types = module.params.get('notification_types') + metrics_collection = module.params.get('metrics_collection') + metrics_granularity = module.params.get('metrics_granularity') + metrics_list = module.params.get('metrics_list') + + try: + as_groups = describe_autoscaling_groups(connection, group_name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe auto scaling groups.") + + ec2_connection = module.client('ec2') + + if vpc_zone_identifier: + vpc_zone_identifier = ','.join(vpc_zone_identifier) + + asg_tags = [] + for tag in set_tags: + for k, v in tag.items(): + if k != 'propagate_at_launch': + asg_tags.append(dict(Key=k, + Value=to_native(v), + PropagateAtLaunch=bool(tag.get('propagate_at_launch', True)), + ResourceType='auto-scaling-group', + ResourceId=group_name)) + if not as_groups: + if module.check_mode: + module.exit_json(changed=True, msg="Would have created AutoScalingGroup if not in check_mode.") + + if not vpc_zone_identifier and not availability_zones: + availability_zones = module.params['availability_zones'] = [zone['ZoneName'] for + zone in ec2_connection.describe_availability_zones()['AvailabilityZones']] + + enforce_required_arguments_for_create() + + if desired_capacity is None: + desired_capacity = min_size + ag = dict( + AutoScalingGroupName=group_name, + MinSize=min_size, + MaxSize=max_size, + DesiredCapacity=desired_capacity, + Tags=asg_tags, + HealthCheckGracePeriod=health_check_period, + HealthCheckType=health_check_type, + DefaultCooldown=default_cooldown, + TerminationPolicies=termination_policies) + if vpc_zone_identifier: + ag['VPCZoneIdentifier'] = vpc_zone_identifier + if availability_zones: + ag['AvailabilityZones'] = availability_zones + if placement_group: + ag['PlacementGroup'] = placement_group + if load_balancers: + ag['LoadBalancerNames'] = load_balancers + if target_group_arns: + ag['TargetGroupARNs'] = target_group_arns + if max_instance_lifetime: + ag['MaxInstanceLifetime'] = max_instance_lifetime + + launch_object = get_launch_object(connection, ec2_connection) + if 'LaunchConfigurationName' in launch_object: + ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName'] + elif 'LaunchTemplate' in launch_object: + if 'MixedInstancesPolicy' in launch_object: + ag['MixedInstancesPolicy'] = launch_object['MixedInstancesPolicy'] + else: + ag['LaunchTemplate'] = launch_object['LaunchTemplate'] + else: + module.fail_json_aws(e, msg="Missing LaunchConfigurationName or LaunchTemplate") + + try: + create_asg(connection, **ag) + if metrics_collection: + connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list) + + all_ag = describe_autoscaling_groups(connection, group_name) + if len(all_ag) == 0: + module.fail_json(msg="No auto scaling group found with the name %s" % group_name) + as_group = all_ag[0] + suspend_processes(connection, as_group) + if wait_for_instances: + wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances') + if load_balancers: + wait_for_elb(connection, group_name) + # Wait for target group health if target group(s)defined + if target_group_arns: + wait_for_target_group(connection, group_name) + if notification_topic: + put_notification_config(connection, group_name, notification_topic, notification_types) + as_group = describe_autoscaling_groups(connection, group_name)[0] + asg_properties = get_properties(as_group) + changed = True + return changed, asg_properties + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create Autoscaling Group.") + else: + if module.check_mode: + module.exit_json(changed=True, msg="Would have modified AutoScalingGroup if required if not in check_mode.") + + as_group = as_groups[0] + initial_asg_properties = get_properties(as_group) + changed = False + + if suspend_processes(connection, as_group): + changed = True + + # process tag changes + have_tags = as_group.get('Tags') + want_tags = asg_tags + if purge_tags and not want_tags and have_tags: + connection.delete_tags(Tags=list(have_tags)) + + if len(set_tags) > 0: + if have_tags: + have_tags.sort(key=lambda x: x["Key"]) + if want_tags: + want_tags.sort(key=lambda x: x["Key"]) + dead_tags = [] + have_tag_keyvals = [x['Key'] for x in have_tags] + want_tag_keyvals = [x['Key'] for x in want_tags] + + for dead_tag in set(have_tag_keyvals).difference(want_tag_keyvals): + changed = True + if purge_tags: + dead_tags.append(dict( + ResourceId=as_group['AutoScalingGroupName'], ResourceType='auto-scaling-group', Key=dead_tag)) + have_tags = [have_tag for have_tag in have_tags if have_tag['Key'] != dead_tag] + + if dead_tags: + connection.delete_tags(Tags=dead_tags) + + zipped = zip(have_tags, want_tags) + if len(have_tags) != len(want_tags) or not all(x == y for x, y in zipped): + changed = True + connection.create_or_update_tags(Tags=asg_tags) + + # Handle load balancer attachments/detachments + # Attach load balancers if they are specified but none currently exist + if load_balancers and not as_group['LoadBalancerNames']: + changed = True + try: + attach_load_balancers(connection, group_name, load_balancers) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update Autoscaling Group.") + + # Update load balancers if they are specified and one or more already exists + elif as_group['LoadBalancerNames']: + change_load_balancers = load_balancers is not None + # Get differences + if not load_balancers: + load_balancers = list() + wanted_elbs = set(load_balancers) + + has_elbs = set(as_group['LoadBalancerNames']) + # check if all requested are already existing + if has_elbs - wanted_elbs and change_load_balancers: + # if wanted contains less than existing, then we need to delete some + elbs_to_detach = has_elbs.difference(wanted_elbs) + if elbs_to_detach: + changed = True + try: + detach_load_balancers(connection, group_name, list(elbs_to_detach)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to detach load balancers {0}".format(elbs_to_detach)) + if wanted_elbs - has_elbs: + # if has contains less than wanted, then we need to add some + elbs_to_attach = wanted_elbs.difference(has_elbs) + if elbs_to_attach: + changed = True + try: + attach_load_balancers(connection, group_name, list(elbs_to_attach)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to attach load balancers {0}".format(elbs_to_attach)) + + # Handle target group attachments/detachments + # Attach target groups if they are specified but none currently exist + if target_group_arns and not as_group['TargetGroupARNs']: + changed = True + try: + attach_lb_target_groups(connection, group_name, target_group_arns) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update Autoscaling Group.") + # Update target groups if they are specified and one or more already exists + elif target_group_arns is not None and as_group['TargetGroupARNs']: + # Get differences + wanted_tgs = set(target_group_arns) + has_tgs = set(as_group['TargetGroupARNs']) + + tgs_to_detach = has_tgs.difference(wanted_tgs) + if tgs_to_detach: + changed = True + try: + detach_lb_target_groups(connection, group_name, list(tgs_to_detach)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to detach load balancer target groups {0}".format(tgs_to_detach)) + + tgs_to_attach = wanted_tgs.difference(has_tgs) + if tgs_to_attach: + changed = True + try: + attach_lb_target_groups(connection, group_name, list(tgs_to_attach)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(msg="Failed to attach load balancer target groups {0}".format(tgs_to_attach)) + + # check for attributes that aren't required for updating an existing ASG + # check if min_size/max_size/desired capacity have been specified and if not use ASG values + if min_size is None: + min_size = as_group['MinSize'] + if max_size is None: + max_size = as_group['MaxSize'] + if desired_capacity is None: + desired_capacity = as_group['DesiredCapacity'] + ag = dict( + AutoScalingGroupName=group_name, + MinSize=min_size, + MaxSize=max_size, + DesiredCapacity=desired_capacity, + HealthCheckGracePeriod=health_check_period, + HealthCheckType=health_check_type, + DefaultCooldown=default_cooldown, + TerminationPolicies=termination_policies) + + # Get the launch object (config or template) if one is provided in args or use the existing one attached to ASG if not. + launch_object = get_launch_object(connection, ec2_connection) + if 'LaunchConfigurationName' in launch_object: + ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName'] + elif 'LaunchTemplate' in launch_object: + if 'MixedInstancesPolicy' in launch_object: + ag['MixedInstancesPolicy'] = launch_object['MixedInstancesPolicy'] + else: + ag['LaunchTemplate'] = launch_object['LaunchTemplate'] + else: + try: + ag['LaunchConfigurationName'] = as_group['LaunchConfigurationName'] + except Exception: + launch_template = as_group['LaunchTemplate'] + # Prefer LaunchTemplateId over Name as it's more specific. Only one can be used for update_asg. + ag['LaunchTemplate'] = {"LaunchTemplateId": launch_template['LaunchTemplateId'], "Version": launch_template['Version']} + + if availability_zones: + ag['AvailabilityZones'] = availability_zones + if vpc_zone_identifier: + ag['VPCZoneIdentifier'] = vpc_zone_identifier + if max_instance_lifetime is not None: + ag['MaxInstanceLifetime'] = max_instance_lifetime + + try: + update_asg(connection, **ag) + + if metrics_collection: + connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list) + else: + connection.disable_metrics_collection(AutoScalingGroupName=group_name, Metrics=metrics_list) + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update autoscaling group") + + if notification_topic: + try: + put_notification_config(connection, group_name, notification_topic, notification_types) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update Autoscaling Group notifications.") + if wait_for_instances: + wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances') + # Wait for ELB health if ELB(s)defined + if load_balancers: + module.debug('\tWAITING FOR ELB HEALTH') + wait_for_elb(connection, group_name) + # Wait for target group health if target group(s)defined + + if target_group_arns: + module.debug('\tWAITING FOR TG HEALTH') + wait_for_target_group(connection, group_name) + + try: + as_group = describe_autoscaling_groups(connection, group_name)[0] + asg_properties = get_properties(as_group) + if asg_properties != initial_asg_properties: + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to read existing Autoscaling Groups.") + return changed, asg_properties + + +def delete_autoscaling_group(connection): + group_name = module.params.get('name') + notification_topic = module.params.get('notification_topic') + wait_for_instances = module.params.get('wait_for_instances') + wait_timeout = module.params.get('wait_timeout') + + if notification_topic: + del_notification_config(connection, group_name, notification_topic) + groups = describe_autoscaling_groups(connection, group_name) + if groups: + if module.check_mode: + module.exit_json(changed=True, msg="Would have deleted AutoScalingGroup if not in check_mode.") + wait_timeout = time.time() + wait_timeout + if not wait_for_instances: + delete_asg(connection, group_name, force_delete=True) + else: + updated_params = dict(AutoScalingGroupName=group_name, MinSize=0, MaxSize=0, DesiredCapacity=0) + update_asg(connection, **updated_params) + instances = True + while instances and wait_for_instances and wait_timeout >= time.time(): + tmp_groups = describe_autoscaling_groups(connection, group_name) + if tmp_groups: + tmp_group = tmp_groups[0] + if not tmp_group.get('Instances'): + instances = False + time.sleep(10) + + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime()) + + delete_asg(connection, group_name, force_delete=False) + while describe_autoscaling_groups(connection, group_name) and wait_timeout >= time.time(): + time.sleep(5) + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="Waited too long for ASG to delete. %s" % time.asctime()) + return True + + return False + + +def get_chunks(l, n): + for i in range(0, len(l), n): + yield l[i:i + n] + + +def update_size(connection, group, max_size, min_size, dc): + module.debug("setting ASG sizes") + module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size)) + updated_group = dict() + updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName'] + updated_group['MinSize'] = min_size + updated_group['MaxSize'] = max_size + updated_group['DesiredCapacity'] = dc + update_asg(connection, **updated_group) + + +def replace(connection): + batch_size = module.params.get('replace_batch_size') + wait_timeout = module.params.get('wait_timeout') + wait_for_instances = module.params.get('wait_for_instances') + group_name = module.params.get('name') + max_size = module.params.get('max_size') + min_size = module.params.get('min_size') + desired_capacity = module.params.get('desired_capacity') + launch_config_name = module.params.get('launch_config_name') + + # Required to maintain the default value being set to 'true' + if launch_config_name: + lc_check = module.params.get('lc_check') + else: + lc_check = False + # Mirror above behavior for Launch Templates + launch_template = module.params.get('launch_template') + if launch_template: + lt_check = module.params.get('lt_check') + else: + lt_check = False + replace_instances = module.params.get('replace_instances') + replace_all_instances = module.params.get('replace_all_instances') + + as_group = describe_autoscaling_groups(connection, group_name)[0] + if desired_capacity is None: + desired_capacity = as_group['DesiredCapacity'] + + if wait_for_instances: + wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances') + + props = get_properties(as_group) + instances = props['instances'] + if replace_all_instances: + # If replacing all instances, then set replace_instances to current set + # This allows replace_instances and replace_all_instances to behave same + replace_instances = instances + if replace_instances: + instances = replace_instances + + # check to see if instances are replaceable if checking launch configs + if launch_config_name: + new_instances, old_instances = get_instances_by_launch_config(props, lc_check, instances) + elif launch_template: + new_instances, old_instances = get_instances_by_launch_template(props, lt_check, instances) + + num_new_inst_needed = desired_capacity - len(new_instances) + + if lc_check or lt_check: + if num_new_inst_needed == 0 and old_instances: + module.debug("No new instances needed, but old instances are present. Removing old instances") + terminate_batch(connection, old_instances, instances, True) + as_group = describe_autoscaling_groups(connection, group_name)[0] + props = get_properties(as_group) + changed = True + return changed, props + + # we don't want to spin up extra instances if not necessary + if num_new_inst_needed < batch_size: + module.debug("Overriding batch size to %s" % num_new_inst_needed) + batch_size = num_new_inst_needed + + if not old_instances: + changed = False + return changed, props + + # check if min_size/max_size/desired capacity have been specified and if not use ASG values + if min_size is None: + min_size = as_group['MinSize'] + if max_size is None: + max_size = as_group['MaxSize'] + + # set temporary settings and wait for them to be reached + # This should get overwritten if the number of instances left is less than the batch size. + + as_group = describe_autoscaling_groups(connection, group_name)[0] + update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size) + + if wait_for_instances: + wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'] + batch_size, 'viable_instances') + wait_for_elb(connection, group_name) + wait_for_target_group(connection, group_name) + + as_group = describe_autoscaling_groups(connection, group_name)[0] + props = get_properties(as_group) + instances = props['instances'] + if replace_instances: + instances = replace_instances + + module.debug("beginning main loop") + for i in get_chunks(instances, batch_size): + # break out of this loop if we have enough new instances + break_early, desired_size, term_instances = terminate_batch(connection, i, instances, False) + + if wait_for_instances: + wait_for_term_inst(connection, term_instances) + wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances') + wait_for_elb(connection, group_name) + wait_for_target_group(connection, group_name) + + if break_early: + module.debug("breaking loop") + break + + update_size(connection, as_group, max_size, min_size, desired_capacity) + as_group = describe_autoscaling_groups(connection, group_name)[0] + asg_properties = get_properties(as_group) + module.debug("Rolling update complete.") + changed = True + return changed, asg_properties + + +def detach(connection): + group_name = module.params.get('name') + detach_instances = module.params.get('detach_instances') + as_group = describe_autoscaling_groups(connection, group_name)[0] + decrement_desired_capacity = module.params.get('decrement_desired_capacity') + min_size = module.params.get('min_size') + props = get_properties(as_group) + instances = props['instances'] + + # check if provided instance exists in asg, create list of instances to detach which exist in asg + instances_to_detach = [] + for instance_id in detach_instances: + if instance_id in instances: + instances_to_detach.append(instance_id) + + # check if setting decrement_desired_capacity will make desired_capacity smaller + # than the currently set minimum size in ASG configuration + if decrement_desired_capacity: + decremented_desired_capacity = len(instances) - len(instances_to_detach) + if min_size and min_size > decremented_desired_capacity: + module.fail_json( + msg="Detaching instance(s) with 'decrement_desired_capacity' flag set reduces number of instances to {0}\ + which is below current min_size {1}, please update AutoScalingGroup Sizes properly.".format(decremented_desired_capacity, min_size)) + + if instances_to_detach: + try: + detach_asg_instances(connection, instances_to_detach, group_name, decrement_desired_capacity) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to detach instances from AutoScaling Group") + + asg_properties = get_properties(as_group) + return True, asg_properties + + +def get_instances_by_launch_config(props, lc_check, initial_instances): + new_instances = [] + old_instances = [] + # old instances are those that have the old launch config + if lc_check: + for i in props['instances']: + # Check if migrating from launch_template to launch_config first + if 'launch_template' in props['instance_facts'][i]: + old_instances.append(i) + elif props['instance_facts'][i].get('launch_config_name') == props['launch_config_name']: + new_instances.append(i) + else: + old_instances.append(i) + + else: + module.debug("Comparing initial instances with current: %s" % initial_instances) + for i in props['instances']: + if i not in initial_instances: + new_instances.append(i) + else: + old_instances.append(i) + + module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) + module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) + + return new_instances, old_instances + + +def get_instances_by_launch_template(props, lt_check, initial_instances): + new_instances = [] + old_instances = [] + # old instances are those that have the old launch template or version of the same launch template + if lt_check: + for i in props['instances']: + # Check if migrating from launch_config_name to launch_template_name first + if 'launch_config_name' in props['instance_facts'][i]: + old_instances.append(i) + elif props['instance_facts'][i].get('launch_template') == props['launch_template']: + new_instances.append(i) + else: + old_instances.append(i) + else: + module.debug("Comparing initial instances with current: %s" % initial_instances) + for i in props['instances']: + if i not in initial_instances: + new_instances.append(i) + else: + old_instances.append(i) + + module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) + module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) + + return new_instances, old_instances + + +def list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances): + instances_to_terminate = [] + instances = (inst_id for inst_id in replace_instances if inst_id in props['instances']) + # check to make sure instances given are actually in the given ASG + # and they have a non-current launch config + if 'launch_config_name' in module.params: + if lc_check: + for i in instances: + if ( + 'launch_template' in props['instance_facts'][i] + or props['instance_facts'][i]['launch_config_name'] != props['launch_config_name'] + ): + instances_to_terminate.append(i) + else: + for i in instances: + if i in initial_instances: + instances_to_terminate.append(i) + elif 'launch_template' in module.params: + if lt_check: + for i in instances: + if ( + 'launch_config_name' in props['instance_facts'][i] + or props['instance_facts'][i]['launch_template'] != props['launch_template'] + ): + instances_to_terminate.append(i) + else: + for i in instances: + if i in initial_instances: + instances_to_terminate.append(i) + + return instances_to_terminate + + +def terminate_batch(connection, replace_instances, initial_instances, leftovers=False): + batch_size = module.params.get('replace_batch_size') + min_size = module.params.get('min_size') + desired_capacity = module.params.get('desired_capacity') + group_name = module.params.get('name') + lc_check = module.params.get('lc_check') + lt_check = module.params.get('lt_check') + decrement_capacity = False + break_loop = False + + as_group = describe_autoscaling_groups(connection, group_name)[0] + if desired_capacity is None: + desired_capacity = as_group['DesiredCapacity'] + + props = get_properties(as_group) + desired_size = as_group['MinSize'] + if module.params.get('launch_config_name'): + new_instances, old_instances = get_instances_by_launch_config(props, lc_check, initial_instances) + else: + new_instances, old_instances = get_instances_by_launch_template(props, lt_check, initial_instances) + num_new_inst_needed = desired_capacity - len(new_instances) + + # check to make sure instances given are actually in the given ASG + # and they have a non-current launch config + instances_to_terminate = list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances) + + module.debug("new instances needed: %s" % num_new_inst_needed) + module.debug("new instances: %s" % new_instances) + module.debug("old instances: %s" % old_instances) + module.debug("batch instances: %s" % ",".join(instances_to_terminate)) + + if num_new_inst_needed == 0: + decrement_capacity = True + if as_group['MinSize'] != min_size: + if min_size is None: + min_size = as_group['MinSize'] + updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size) + update_asg(connection, **updated_params) + module.debug("Updating minimum size back to original of %s" % min_size) + # if are some leftover old instances, but we are already at capacity with new ones + # we don't want to decrement capacity + if leftovers: + decrement_capacity = False + break_loop = True + instances_to_terminate = old_instances + desired_size = min_size + module.debug("No new instances needed") + + if num_new_inst_needed < batch_size and num_new_inst_needed != 0: + instances_to_terminate = instances_to_terminate[:num_new_inst_needed] + decrement_capacity = False + break_loop = False + module.debug("%s new instances needed" % num_new_inst_needed) + + module.debug("decrementing capacity: %s" % decrement_capacity) + + for instance_id in instances_to_terminate: + elb_dreg(connection, group_name, instance_id) + module.debug("terminating instance: %s" % instance_id) + terminate_asg_instance(connection, instance_id, decrement_capacity) + + # we wait to make sure the machines we marked as Unhealthy are + # no longer in the list + + return break_loop, desired_size, instances_to_terminate + + +def wait_for_term_inst(connection, term_instances): + wait_timeout = module.params.get('wait_timeout') + group_name = module.params.get('name') + as_group = describe_autoscaling_groups(connection, group_name)[0] + count = 1 + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time() and count > 0: + module.debug("waiting for instances to terminate") + count = 0 + as_group = describe_autoscaling_groups(connection, group_name)[0] + props = get_properties(as_group) + instance_facts = props['instance_facts'] + instances = (i for i in instance_facts if i in term_instances) + for i in instances: + lifecycle = instance_facts[i]['lifecycle_state'] + health = instance_facts[i]['health_status'] + module.debug("Instance %s has state of %s,%s" % (i, lifecycle, health)) + if lifecycle.startswith('Terminating') or health == 'Unhealthy': + count += 1 + time.sleep(10) + + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime()) + + +def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop): + # make sure we have the latest stats after that last loop. + as_group = describe_autoscaling_groups(connection, group_name)[0] + props = get_properties(as_group) + module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop])) + # now we make sure that we have enough instances in a viable state + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time() and desired_size > props[prop]: + module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop])) + time.sleep(10) + as_group = describe_autoscaling_groups(connection, group_name)[0] + props = get_properties(as_group) + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime()) + module.debug("Reached %s: %s" % (prop, desired_size)) + return props + + +def asg_exists(connection): + group_name = module.params.get('name') + as_group = describe_autoscaling_groups(connection, group_name) + return bool(len(as_group)) + + +def main(): + argument_spec = dict( + name=dict(required=True, type='str'), + load_balancers=dict(type='list', elements='str'), + target_group_arns=dict(type='list', elements='str'), + availability_zones=dict(type='list', elements='str'), + launch_config_name=dict(type='str'), + launch_template=dict( + type='dict', + default=None, + options=dict( + version=dict(type='str'), + launch_template_name=dict(type='str'), + launch_template_id=dict(type='str'), + ) + ), + min_size=dict(type='int'), + max_size=dict(type='int'), + max_instance_lifetime=dict(type='int'), + mixed_instances_policy=dict( + type='dict', + default=None, + options=dict( + instance_types=dict( + type='list', + elements='str' + ), + instances_distribution=dict( + type='dict', + default=None, + options=dict( + on_demand_allocation_strategy=dict(type='str'), + on_demand_base_capacity=dict(type='int'), + on_demand_percentage_above_base_capacity=dict(type='int'), + spot_allocation_strategy=dict(type='str'), + spot_instance_pools=dict(type='int'), + spot_max_price=dict(type='str'), + ) + ) + ) + ), + placement_group=dict(type='str'), + desired_capacity=dict(type='int'), + vpc_zone_identifier=dict(type='list', elements='str'), + replace_batch_size=dict(type='int', default=1), + replace_all_instances=dict(type='bool', default=False), + replace_instances=dict(type='list', default=[], elements='str'), + detach_instances=dict(type='list', default=[], elements='str'), + decrement_desired_capacity=dict(type='bool', default=False), + lc_check=dict(type='bool', default=True), + lt_check=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=300), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(type='list', default=[], elements='dict'), + purge_tags=dict(type='bool', default=False), + health_check_period=dict(type='int', default=300), + health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), + default_cooldown=dict(type='int', default=300), + wait_for_instances=dict(type='bool', default=True), + termination_policies=dict(type='list', default='Default', elements='str'), + notification_topic=dict(type='str', default=None), + notification_types=dict( + type='list', + default=[ + 'autoscaling:EC2_INSTANCE_LAUNCH', + 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', + 'autoscaling:EC2_INSTANCE_TERMINATE', + 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR' + ], + elements='str' + ), + suspend_processes=dict(type='list', default=[], elements='str'), + metrics_collection=dict(type='bool', default=False), + metrics_granularity=dict(type='str', default='1Minute'), + metrics_list=dict( + type='list', + default=[ + 'GroupMinSize', + 'GroupMaxSize', + 'GroupDesiredCapacity', + 'GroupInServiceInstances', + 'GroupPendingInstances', + 'GroupStandbyInstances', + 'GroupTerminatingInstances', + 'GroupTotalInstances' + ], + elements='str' + ) + ) + + global module + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['replace_all_instances', 'replace_instances'], + ['replace_all_instances', 'detach_instances'], + ['launch_config_name', 'launch_template'], + ] + ) + + state = module.params.get('state') + replace_instances = module.params.get('replace_instances') + replace_all_instances = module.params.get('replace_all_instances') + detach_instances = module.params.get('detach_instances') + + connection = module.client('autoscaling') + changed = create_changed = replace_changed = detach_changed = False + exists = asg_exists(connection) + + if state == 'present': + create_changed, asg_properties = create_autoscaling_group(connection) + elif state == 'absent': + changed = delete_autoscaling_group(connection) + module.exit_json(changed=changed) + + # Only replace instances if asg existed at start of call + if ( + exists + and (replace_all_instances or replace_instances) + and (module.params.get('launch_config_name') or module.params.get('launch_template')) + ): + replace_changed, asg_properties = replace(connection) + + # Only detach instances if asg existed at start of call + if ( + exists + and (detach_instances) + and (module.params.get('launch_config_name') or module.params.get('launch_template')) + ): + detach_changed, asg_properties = detach(connection) + + if create_changed or replace_changed or detach_changed: + changed = True + + module.exit_json(changed=changed, **asg_properties) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/autoscaling_group_info.py b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group_info.py new file mode 100644 index 000000000..c33d0352f --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group_info.py @@ -0,0 +1,473 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: autoscaling_group_info +version_added: 5.0.0 +short_description: Gather information about EC2 Auto Scaling Groups (ASGs) in AWS +description: + - Gather information about EC2 Auto Scaling Groups (ASGs) in AWS. + - Prior to release 5.0.0 this module was called C(community.aws.ec2_asg_info). + The usage did not change. + - This module was originally added to C(community.aws) in release 1.0.0. +author: + - "Rob White (@wimnat)" +options: + name: + description: + - The prefix or name of the auto scaling group(s) you are searching for. + - "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match." + type: str + required: false + tags: + description: + - > + A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling + group(s) you are searching for. + required: false + type: dict +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Find all groups + amazon.aws.autoscaling_group_info: + register: asgs + +- name: Find a group with matching name/prefix + amazon.aws.autoscaling_group_info: + name: public-webserver-asg + register: asgs + +- name: Find a group with matching tags + amazon.aws.autoscaling_group_info: + tags: + project: webapp + env: production + register: asgs + +- name: Find a group with matching name/prefix and tags + amazon.aws.autoscaling_group_info: + name: myproject + tags: + env: production + register: asgs + +- name: Fail if no groups are found + amazon.aws.autoscaling_group_info: + name: public-webserver-asg + register: asgs + failed_when: "{{ asgs.results | length == 0 }}" + +- name: Fail if more than 1 group is found + amazon.aws.autoscaling_group_info: + name: public-webserver-asg + register: asgs + failed_when: "{{ asgs.results | length > 1 }}" +''' + +RETURN = ''' +--- +auto_scaling_group_arn: + description: The Amazon Resource Name of the ASG + returned: success + type: str + sample: "arn:aws:autoscaling:us-west-2:123456789012:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1" +auto_scaling_group_name: + description: Name of autoscaling group + returned: success + type: str + sample: "public-webapp-production-1" +availability_zones: + description: List of Availability Zones that are enabled for this ASG. + returned: success + type: list + sample: ["us-west-2a", "us-west-2b", "us-west-2a"] +created_time: + description: The date and time this ASG was created, in ISO 8601 format. + returned: success + type: str + sample: "2015-11-25T00:05:36.309Z" +default_cooldown: + description: The default cooldown time in seconds. + returned: success + type: int + sample: 300 +desired_capacity: + description: The number of EC2 instances that should be running in this group. + returned: success + type: int + sample: 3 +health_check_period: + description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. + returned: success + type: int + sample: 30 +health_check_type: + description: The service you want the health status from, one of "EC2" or "ELB". + returned: success + type: str + sample: "ELB" +instances: + description: List of EC2 instances and their status as it relates to the ASG. + returned: success + type: list + sample: [ + { + "availability_zone": "us-west-2a", + "health_status": "Healthy", + "instance_id": "i-es22ad25", + "launch_configuration_name": "public-webapp-production-1", + "lifecycle_state": "InService", + "protected_from_scale_in": "false" + } + ] +launch_config_name: + description: > + Name of launch configuration associated with the ASG. Same as launch_configuration_name, + provided for compatibility with M(amazon.aws.autoscaling_group) module. + returned: success + type: str + sample: "public-webapp-production-1" +launch_configuration_name: + description: Name of launch configuration associated with the ASG. + returned: success + type: str + sample: "public-webapp-production-1" +lifecycle_hooks: + description: List of lifecycle hooks for the ASG. + returned: success + type: list + sample: [ + { + "AutoScalingGroupName": "public-webapp-production-1", + "DefaultResult": "ABANDON", + "GlobalTimeout": 172800, + "HeartbeatTimeout": 3600, + "LifecycleHookName": "instance-launch", + "LifecycleTransition": "autoscaling:EC2_INSTANCE_LAUNCHING" + }, + { + "AutoScalingGroupName": "public-webapp-production-1", + "DefaultResult": "ABANDON", + "GlobalTimeout": 172800, + "HeartbeatTimeout": 3600, + "LifecycleHookName": "instance-terminate", + "LifecycleTransition": "autoscaling:EC2_INSTANCE_TERMINATING" + } + ] +load_balancer_names: + description: List of load balancers names attached to the ASG. + returned: success + type: list + sample: ["elb-webapp-prod"] +max_size: + description: Maximum size of group + returned: success + type: int + sample: 3 +min_size: + description: Minimum size of group + returned: success + type: int + sample: 1 +new_instances_protected_from_scale_in: + description: Whether or not new instances a protected from automatic scaling in. + returned: success + type: bool + sample: "false" +placement_group: + description: Placement group into which instances are launched, if any. + returned: success + type: str + sample: None +status: + description: The current state of the group when DeleteAutoScalingGroup is in progress. + returned: success + type: str + sample: None +tags: + description: List of tags for the ASG, and whether or not each tag propagates to instances at launch. + returned: success + type: list + sample: [ + { + "key": "Name", + "value": "public-webapp-production-1", + "resource_id": "public-webapp-production-1", + "resource_type": "auto-scaling-group", + "propagate_at_launch": "true" + }, + { + "key": "env", + "value": "production", + "resource_id": "public-webapp-production-1", + "resource_type": "auto-scaling-group", + "propagate_at_launch": "true" + } + ] +target_group_arns: + description: List of ARNs of the target groups that the ASG populates + returned: success + type: list + sample: [ + "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b", + "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234" + ] +target_group_names: + description: List of names of the target groups that the ASG populates + returned: success + type: list + sample: [ + "target-group-host-hello", + "target-group-path-world" + ] +termination_policies: + description: A list of termination policies for the group. + returned: success + type: str + sample: ["Default"] +''' + +import re + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + + +def match_asg_tags(tags_to_match, asg): + for key, value in tags_to_match.items(): + for tag in asg['Tags']: + if key == tag['Key'] and value == tag['Value']: + break + else: + return False + return True + + +def find_asgs(conn, module, name=None, tags=None): + """ + Args: + conn (boto3.AutoScaling.Client): Valid Boto3 ASG client. + name (str): Optional name of the ASG you are looking for. + tags (dict): Optional dictionary of tags and values to search for. + + Basic Usage: + >>> name = 'public-webapp-production' + >>> tags = { 'env': 'production' } + >>> conn = boto3.client('autoscaling', region_name='us-west-2') + >>> results = find_asgs(name, conn) + + Returns: + List + [ + { + "auto_scaling_group_arn": ( + "arn:aws:autoscaling:us-west-2:123456789012:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:" + "autoScalingGroupName/public-webapp-production" + ), + "auto_scaling_group_name": "public-webapp-production", + "availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"], + "created_time": "2016-02-02T23:28:42.481000+00:00", + "default_cooldown": 300, + "desired_capacity": 2, + "enabled_metrics": [], + "health_check_grace_period": 300, + "health_check_type": "ELB", + "instances": + [ + { + "availability_zone": "us-west-2c", + "health_status": "Healthy", + "instance_id": "i-047a12cb", + "launch_configuration_name": "public-webapp-production-1", + "lifecycle_state": "InService", + "protected_from_scale_in": false + }, + { + "availability_zone": "us-west-2a", + "health_status": "Healthy", + "instance_id": "i-7a29df2c", + "launch_configuration_name": "public-webapp-production-1", + "lifecycle_state": "InService", + "protected_from_scale_in": false + } + ], + "launch_config_name": "public-webapp-production-1", + "launch_configuration_name": "public-webapp-production-1", + "lifecycle_hooks": + [ + { + "AutoScalingGroupName": "public-webapp-production-1", + "DefaultResult": "ABANDON", + "GlobalTimeout": 172800, + "HeartbeatTimeout": 3600, + "LifecycleHookName": "instance-launch", + "LifecycleTransition": "autoscaling:EC2_INSTANCE_LAUNCHING" + }, + { + "AutoScalingGroupName": "public-webapp-production-1", + "DefaultResult": "ABANDON", + "GlobalTimeout": 172800, + "HeartbeatTimeout": 3600, + "LifecycleHookName": "instance-terminate", + "LifecycleTransition": "autoscaling:EC2_INSTANCE_TERMINATING" + } + ], + "load_balancer_names": ["public-webapp-production-lb"], + "max_size": 4, + "min_size": 2, + "new_instances_protected_from_scale_in": false, + "placement_group": None, + "status": None, + "suspended_processes": [], + "tags": + [ + { + "key": "Name", + "propagate_at_launch": true, + "resource_id": "public-webapp-production", + "resource_type": "auto-scaling-group", + "value": "public-webapp-production" + }, + { + "key": "env", + "propagate_at_launch": true, + "resource_id": "public-webapp-production", + "resource_type": "auto-scaling-group", + "value": "production" + } + ], + "target_group_names": [], + "target_group_arns": [], + "termination_policies": + [ + "Default" + ], + "vpc_zone_identifier": + [ + "subnet-a1b1c1d1", + "subnet-a2b2c2d2", + "subnet-a3b3c3d3" + ] + } + ] + """ + + try: + asgs_paginator = conn.get_paginator('describe_auto_scaling_groups') + asgs = asgs_paginator.paginate().build_full_result() + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to describe AutoScalingGroups') + + if not asgs: + return asgs + + try: + elbv2 = module.client('elbv2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): + # This is nice to have, not essential + elbv2 = None + matched_asgs = [] + + if name is not None: + # if the user didn't specify a name + name_prog = re.compile(r'^' + name) + + for asg in asgs['AutoScalingGroups']: + if name: + matched_name = name_prog.search(asg['AutoScalingGroupName']) + else: + matched_name = True + + if tags: + matched_tags = match_asg_tags(tags, asg) + else: + matched_tags = True + + if matched_name and matched_tags: + asg = camel_dict_to_snake_dict(asg) + # compatibility with autoscaling_group module + if 'launch_configuration_name' in asg: + asg['launch_config_name'] = asg['launch_configuration_name'] + # workaround for https://github.com/ansible/ansible/pull/25015 + if 'target_group_ar_ns' in asg: + asg['target_group_arns'] = asg['target_group_ar_ns'] + del asg['target_group_ar_ns'] + if asg.get('target_group_arns'): + if elbv2: + try: + tg_paginator = elbv2.get_paginator("describe_target_groups") + # Limit of 20 similar to https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html + tg_chunk_size = 20 + asg["target_group_names"] = [] + tg_chunks = [ + asg["target_group_arns"][i: i + tg_chunk_size] + for i in range(0, len(asg["target_group_arns"]), tg_chunk_size) + ] + for chunk in tg_chunks: + tg_result = tg_paginator.paginate(TargetGroupArns=chunk).build_full_result() + asg["target_group_names"].extend( + [tg["TargetGroupName"] for tg in tg_result["TargetGroups"]] + ) + except is_boto3_error_code("TargetGroupNotFound"): + asg["target_group_names"] = [] + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to describe Target Groups") + else: + asg['target_group_names'] = [] + # get asg lifecycle hooks if any + try: + asg_lifecyclehooks = conn.describe_lifecycle_hooks(AutoScalingGroupName=asg['auto_scaling_group_name']) + asg['lifecycle_hooks'] = asg_lifecyclehooks['LifecycleHooks'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to fetch information about ASG lifecycle hooks") + matched_asgs.append(asg) + + return matched_asgs + + +def main(): + + argument_spec = dict( + name=dict(type='str'), + tags=dict(type='dict'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + asg_name = module.params.get('name') + asg_tags = module.params.get('tags') + + autoscaling = module.client('autoscaling') + + results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags) + module.exit_json(results=results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py b/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py new file mode 100644 index 000000000..246321b56 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py @@ -0,0 +1,186 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +module: aws_az_info +short_description: Gather information about availability zones in AWS +version_added: 1.0.0 +description: + - Gather information about availability zones in AWS. +author: 'Henrique Rodrigues (@Sodki)' +options: + filters: + description: + - A dict of filters to apply. + - Each dict item consists of a filter key and a filter value. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) for possible filters. + - Filter names and values are case sensitive. + - You can use underscores instead of dashes (-) in the filter keys. + - Filter keys with underscores will take precedence in case of conflict. + required: false + default: {} + type: dict +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all availability zones + amazon.aws.aws_az_info: + +- name: Gather information about a single availability zone + amazon.aws.aws_az_info: + filters: + zone-name: eu-west-1a +''' + +RETURN = ''' +availability_zones: + returned: on success + description: > + Availability zones that match the provided filters. Each element consists of a dict with all the information + related to that available zone. + type: list + elements: dict + contains: + state: + description: + - The state of the availability zone. + - The value is always C(available). + type: str + returned: on success + sample: 'available' + opt_in_status: + description: + - The opt-in status. + - The value is always C(opt-in-not-required) for availability zones. + type: str + returned: on success + sample: 'opt-in-not-required' + messages: + description: List of messages about the availability zone. + type: list + elements: dict + contains: + message: + description: The message about the availability zone. + type: str + returned: on success + sample: 'msg' + returned: on success + sample: [ + { + 'message': 'message_one' + }, + { + 'message': 'message_two' + } + ] + region_name: + description: The name of the region. + type: str + returned: on success + sample: 'us-east-1' + zone_name: + description: The name of the availability zone. + type: str + returned: on success + sample: 'us-east-1e' + zone_id: + description: The ID of the availability zone. + type: str + returned: on success + sample: 'use1-az5' + group_name: + description: + - The name of the associated group. + - For availability zones, this will be the same as I(region_name). + type: str + returned: on success + sample: 'us-east-1' + network_border_group: + description: The name of the network border group. + type: str + returned: on success + sample: 'us-east-1' + zone_type: + description: The type of zone. + type: str + returned: on success + sample: 'availability-zone' + sample: [ + { + "group_name": "us-east-1", + "messages": [], + "network_border_group": "us-east-1", + "opt_in_status": "opt-in-not-required", + "region_name": "us-east-1", + "state": "available", + "zone_id": "use1-az6", + "zone_name": "us-east-1a", + "zone_type": "availability-zone" + }, + { + "group_name": "us-east-1", + "messages": [], + "network_border_group": "us-east-1", + "opt_in_status": "opt-in-not-required", + "region_name": "us-east-1", + "state": "available", + "zone_id": "use1-az1", + "zone_name": "us-east-1b", + "zone_type": "availability-zone" + } + ] +''' + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list + + +def main(): + argument_spec = dict( + filters=dict(default={}, type='dict') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + # Replace filter key underscores with dashes, for compatibility + sanitized_filters = dict(module.params.get('filters')) + for k in module.params.get('filters').keys(): + if "_" in k: + sanitized_filters[k.replace('_', '-')] = sanitized_filters[k] + del sanitized_filters[k] + + try: + availability_zones = connection.describe_availability_zones(aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to describe availability zones.") + + # Turn the boto3 result into ansible_friendly_snaked_names + snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']] + + module.exit_json(availability_zones=snaked_availability_zones) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py b/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py new file mode 100644 index 000000000..3c6691606 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py @@ -0,0 +1,108 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: aws_caller_info +version_added: 1.0.0 +short_description: Get information about the user and account being used to make AWS calls +description: + - This module returns information about the account and user / role from which the AWS access tokens originate. + - The primary use of this is to get the account id for templating into ARNs or similar to avoid needing to specify this information in inventory. + +author: + - Ed Costello (@orthanc) + - Stijn Dubrul (@sdubrul) + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Get the current caller identity information + amazon.aws.aws_caller_info: + register: caller_info +''' + +RETURN = ''' +account: + description: The account id the access credentials are associated with. + returned: success + type: str + sample: "123456789012" +account_alias: + description: The account alias the access credentials are associated with. + returned: when caller has the iam:ListAccountAliases permission + type: str + sample: "acme-production" +arn: + description: The arn identifying the user the credentials are associated with. + returned: success + type: str + sample: arn:aws:sts::123456789012:federated-user/my-federated-user-name +user_id: + description: | + The user id the access credentials are associated with. Note that this may not correspond to + anything you can look up in the case of roles or federated identities. + returned: success + type: str + sample: 123456789012:my-federated-user-name +''' + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +def main(): + module = AnsibleAWSModule( + argument_spec={}, + supports_check_mode=True, + ) + + client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff()) + + try: + caller_info = client.get_caller_identity(aws_retry=True) + caller_info.pop('ResponseMetadata', None) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to retrieve caller identity') + + iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + + try: + # Although a list is returned by list_account_aliases AWS supports maximum one alias per account. + # If an alias is defined it will be returned otherwise a blank string is filled in as account_alias. + # see https://docs.aws.amazon.com/cli/latest/reference/iam/list-account-aliases.html#output + response = iam_client.list_account_aliases(aws_retry=True) + if response and response['AccountAliases']: + caller_info['account_alias'] = response['AccountAliases'][0] + else: + caller_info['account_alias'] = '' + except (BotoCoreError, ClientError): + # The iam:ListAccountAliases permission is required for this operation to succeed. + # Lacking this permission is handled gracefully by not returning the account_alias. + pass + + module.exit_json( + changed=False, + **camel_dict_to_snake_dict(caller_info)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudformation.py b/ansible_collections/amazon/aws/plugins/modules/cloudformation.py new file mode 100644 index 000000000..f953a75d2 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/cloudformation.py @@ -0,0 +1,794 @@ +#!/usr/bin/python + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: cloudformation +version_added: 1.0.0 +short_description: Create or delete an AWS CloudFormation stack +description: + - Launches or updates an AWS CloudFormation stack and waits for it complete. +options: + stack_name: + description: + - Name of the CloudFormation stack. + required: true + type: str + disable_rollback: + description: + - If a stacks fails to form, rollback will remove the stack. + default: false + type: bool + on_create_failure: + description: + - Action to take upon failure of stack creation. Incompatible with the I(disable_rollback) option. + choices: + - DO_NOTHING + - ROLLBACK + - DELETE + type: str + create_timeout: + description: + - The amount of time (in minutes) that can pass before the stack status becomes CREATE_FAILED + type: int + template_parameters: + description: + - A list of hashes of all the template variables for the stack. The value can be a string or a dict. + - Dict can be used to set additional template parameter attributes like UsePreviousValue (see example). + default: {} + type: dict + state: + description: + - If I(state=present), stack will be created. + - If I(state=present) and if stack exists and template has changed, it will be updated. + - If I(state=absent), stack will be removed. + default: present + choices: [ present, absent ] + type: str + template: + description: + - The local path of the CloudFormation template. + - This must be the full path to the file, relative to the working directory. If using roles this may look + like C(roles/cloudformation/files/cloudformation-example.json). + - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) + must be specified (but only one of them). + - If I(state=present), the stack does exist, and neither I(template), + I(template_body) nor I(template_url) are specified, the previous template will be reused. + type: path + notification_arns: + description: + - A comma separated list of Simple Notification Service (SNS) topic ARNs to publish stack related events. + type: str + stack_policy: + description: + - The path of the file containing the CloudFormation stack policy. A policy cannot be removed once placed, but it can be modified. + for instance, allow all updates U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051) + type: str + stack_policy_body: + description: + - The CloudFormation stack policy in JSON. A policy cannot be removed once placed, but it can be modified. + for instance, allow all updates U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051) + type: json + version_added: 1.5.0 + stack_policy_on_update_body: + description: + - the body of the cloudformation stack policy only applied during this update. + type: json + version_added: 1.5.0 + tags: + description: + - Dictionary of tags to associate with stack and its resources during stack creation. + - Can be updated later, updating tags removes previous entries. + type: dict + template_url: + description: + - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an + S3 bucket in the same region as the stack. + - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) + must be specified (but only one of them). + - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) are specified, + the previous template will be reused. + type: str + create_changeset: + description: + - "If stack already exists create a changeset instead of directly applying changes. See the AWS Change Sets docs + U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)." + - "WARNING: if the stack does not exist, it will be created without changeset. If I(state=absent), the stack will be + deleted immediately with no changeset." + type: bool + default: false + changeset_name: + description: + - Name given to the changeset when creating a changeset. + - Only used when I(create_changeset=true). + - By default a name prefixed with Ansible-STACKNAME is generated based on input parameters. + See the AWS Change Sets docs for more information + U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html) + type: str + role_arn: + description: + - The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role + docs U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html) + type: str + termination_protection: + description: + - Enable or disable termination protection on the stack. + type: bool + template_body: + description: + - Template body. Use this to pass in the actual body of the CloudFormation template. + - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url) + must be specified (but only one of them). + - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) + are specified, the previous template will be reused. + type: str + events_limit: + description: + - Maximum number of CloudFormation events to fetch from a stack when creating or updating it. + default: 200 + type: int + backoff_delay: + description: + - Number of seconds to wait for the next retry. + default: 3 + type: int + required: False + backoff_max_delay: + description: + - Maximum amount of time to wait between retries. + default: 30 + type: int + required: False + backoff_retries: + description: + - Number of times to retry operation. + - AWS API throttling mechanism fails CloudFormation module so we have to retry a couple of times. + default: 10 + type: int + required: False + capabilities: + description: + - Specify capabilities that stack template contains. + - Valid values are C(CAPABILITY_IAM), C(CAPABILITY_NAMED_IAM) and C(CAPABILITY_AUTO_EXPAND). + type: list + elements: str + default: [ CAPABILITY_IAM, CAPABILITY_NAMED_IAM ] + +author: + - "James S. Martin (@jsmartin)" +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 +''' + +EXAMPLES = ''' +- name: create a cloudformation stack + amazon.aws.cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + disable_rollback: true + template: "files/cloudformation-example.json" + template_parameters: + KeyName: "jmartin" + DiskType: "ephemeral" + InstanceType: "m1.small" + ClusterSize: 3 + tags: + Stack: "ansible-cloudformation" + +# Basic role example +- name: create a stack, specify role that cloudformation assumes + amazon.aws.cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + disable_rollback: true + template: "roles/cloudformation/files/cloudformation-example.json" + role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role' + +- name: delete a stack + amazon.aws.cloudformation: + stack_name: "ansible-cloudformation-old" + state: "absent" + +# Create a stack, pass in template from a URL, disable rollback if stack creation fails, +# pass in some parameters to the template, provide tags for resources created +- name: create a stack, pass in the template via an URL + amazon.aws.cloudformation: + stack_name: "ansible-cloudformation" + state: present + region: us-east-1 + disable_rollback: true + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + template_parameters: + KeyName: jmartin + DiskType: ephemeral + InstanceType: m1.small + ClusterSize: 3 + tags: + Stack: ansible-cloudformation + +# Create a stack, passing in template body using lookup of Jinja2 template, disable rollback if stack creation fails, +# pass in some parameters to the template, provide tags for resources created +- name: create a stack, pass in the template body via lookup template + amazon.aws.cloudformation: + stack_name: "ansible-cloudformation" + state: present + region: us-east-1 + disable_rollback: true + template_body: "{{ lookup('template', 'cloudformation.j2') }}" + template_parameters: + KeyName: jmartin + DiskType: ephemeral + InstanceType: m1.small + ClusterSize: 3 + tags: + Stack: ansible-cloudformation + +# Pass a template parameter which uses CloudFormation's UsePreviousValue attribute +# When use_previous_value is set to True, the given value will be ignored and +# CloudFormation will use the value from a previously submitted template. +# If use_previous_value is set to False (default) the given value is used. +- amazon.aws.cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + template: "files/cloudformation-example.json" + template_parameters: + DBSnapshotIdentifier: + use_previous_value: True + value: arn:aws:rds:es-east-1:123456789012:snapshot:rds:my-db-snapshot + DBName: + use_previous_value: True + tags: + Stack: "ansible-cloudformation" + +# Enable termination protection on a stack. +# If the stack already exists, this will update its termination protection +- name: enable termination protection during stack creation + amazon.aws.cloudformation: + stack_name: my_stack + state: present + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + termination_protection: true + +# Configure TimeoutInMinutes before the stack status becomes CREATE_FAILED +# In this case, if disable_rollback is not set or is set to false, the stack will be rolled back. +- name: enable termination protection during stack creation + amazon.aws.cloudformation: + stack_name: my_stack + state: present + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + create_timeout: 5 + +# Configure rollback behaviour on the unsuccessful creation of a stack allowing +# CloudFormation to clean up, or do nothing in the event of an unsuccessful +# deployment +# In this case, if on_create_failure is set to "DELETE", it will clean up the stack if +# it fails to create +- name: create stack which will delete on creation failure + amazon.aws.cloudformation: + stack_name: my_stack + state: present + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + on_create_failure: DELETE +''' + +RETURN = ''' +events: + type: list + description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases. + returned: always + sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"] +log: + description: Debugging logs. Useful when modifying or finding an error. + returned: always + type: list + sample: ["updating stack"] +change_set_id: + description: The ID of the stack change set if one was created + returned: I(state=present) and I(create_changeset=true) + type: str + sample: "arn:aws:cloudformation:us-east-1:123456789012:changeSet/Ansible-StackName-f4496805bd1b2be824d1e315c6884247ede41eb0" +stack_resources: + description: AWS stack resources and their status. List of dictionaries, one dict per resource. + returned: state == present + type: list + sample: [ + { + "last_updated_time": "2016-10-11T19:40:14.979000+00:00", + "logical_resource_id": "CFTestSg", + "physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F", + "resource_type": "AWS::EC2::SecurityGroup", + "status": "UPDATE_COMPLETE", + "status_reason": null + } + ] +stack_outputs: + type: dict + description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary. + returned: state == present + sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"} +''' # NOQA + +import json +import time +import traceback +import uuid +from hashlib import sha1 + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_bytes +from ansible.module_utils._text import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception + +# Set a default, mostly for our integration tests. This will be overridden in +# the main() loop to match the parameters we're passed +retry_decorator = AWSRetry.jittered_backoff() + + +def get_stack_events(cfn, stack_name, events_limit, token_filter=None): + '''This event data was never correct, it worked as a side effect. So the v2.3 format is different.''' + ret = {'events': [], 'log': []} + + try: + pg = cfn.get_paginator( + 'describe_stack_events' + ).paginate( + StackName=stack_name, + PaginationConfig={'MaxItems': events_limit} + ) + if token_filter is not None: + events = list(retry_decorator(pg.search)( + "StackEvents[?ClientRequestToken == '{0}']".format(token_filter) + )) + else: + events = list(pg.search("StackEvents[*]")) + except is_boto3_error_message('does not exist'): + ret['log'].append('Stack does not exist.') + return ret + except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: # pylint: disable=duplicate-except + error_msg = boto_exception(err) + ret['log'].append('Unknown error: ' + str(error_msg)) + return ret + + for e in events: + eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e) + ret['events'].append(eventline) + + if e['ResourceStatus'].endswith('FAILED'): + failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e) + ret['log'].append(failline) + + return ret + + +def create_stack(module, stack_params, cfn, events_limit): + if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: + module.fail_json(msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist.") + + # 'DisableRollback', 'TimeoutInMinutes', 'EnableTerminationProtection' and + # 'OnFailure' only apply on creation, not update. + if module.params.get('on_create_failure') is not None: + stack_params['OnFailure'] = module.params['on_create_failure'] + else: + stack_params['DisableRollback'] = module.params['disable_rollback'] + + if module.params.get('create_timeout') is not None: + stack_params['TimeoutInMinutes'] = module.params['create_timeout'] + if module.params.get('termination_protection') is not None: + stack_params['EnableTerminationProtection'] = bool(module.params.get('termination_protection')) + + try: + response = cfn.create_stack(aws_retry=True, **stack_params) + # Use stack ID to follow stack state in case of on_create_failure = DELETE + result = stack_operation(module, cfn, response['StackId'], 'CREATE', events_limit, stack_params.get('ClientRequestToken', None)) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err: + module.fail_json_aws(err, msg="Failed to create stack {0}".format(stack_params.get('StackName'))) + if not result: + module.fail_json(msg="empty result") + return result + + +def list_changesets(cfn, stack_name): + res = cfn.list_change_sets(aws_retry=True, StackName=stack_name) + return [cs['ChangeSetName'] for cs in res['Summaries']] + + +def create_changeset(module, stack_params, cfn, events_limit): + if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: + module.fail_json(msg="Either 'template' or 'template_url' is required.") + if module.params['changeset_name'] is not None: + stack_params['ChangeSetName'] = module.params['changeset_name'] + + # changesets don't accept ClientRequestToken parameters + stack_params.pop('ClientRequestToken', None) + + try: + changeset_name = build_changeset_name(stack_params) + stack_params['ChangeSetName'] = changeset_name + + # Determine if this changeset already exists + pending_changesets = list_changesets(cfn, stack_params['StackName']) + if changeset_name in pending_changesets: + warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets) + result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning]) + else: + cs = cfn.create_change_set(aws_retry=True, **stack_params) + # Make sure we don't enter an infinite loop + time_end = time.time() + 600 + while time.time() < time_end: + try: + newcs = cfn.describe_change_set(aws_retry=True, ChangeSetName=cs['Id']) + except botocore.exceptions.BotoCoreError as err: + module.fail_json_aws(err) + if newcs['Status'] == 'CREATE_PENDING' or newcs['Status'] == 'CREATE_IN_PROGRESS': + time.sleep(1) + elif newcs['Status'] == 'FAILED' and ("The submitted information didn't contain changes" in newcs['StatusReason'] + or "No updates are to be performed" in newcs['StatusReason']): + cfn.delete_change_set(aws_retry=True, ChangeSetName=cs['Id']) + result = dict(changed=False, + output='The created Change Set did not contain any changes to this stack and was deleted.') + # a failed change set does not trigger any stack events so we just want to + # skip any further processing of result and just return it directly + return result + else: + break + # Lets not hog the cpu/spam the AWS API + time.sleep(1) + result = stack_operation(module, cfn, stack_params['StackName'], 'CREATE_CHANGESET', events_limit) + result['change_set_id'] = cs['Id'] + result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']), + 'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'], + 'NOTE that dependencies on this stack might fail due to pending changes!'] + except is_boto3_error_message('No updates are to be performed.'): + result = dict(changed=False, output='Stack is already up-to-date.') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err: + module.fail_json_aws(err, msg='Failed to create change set') + + if not result: + module.fail_json(msg="empty result") + return result + + +def update_stack(module, stack_params, cfn, events_limit): + if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: + stack_params['UsePreviousTemplate'] = True + + if module.params['stack_policy_on_update_body'] is not None: + stack_params['StackPolicyDuringUpdateBody'] = module.params['stack_policy_on_update_body'] + + # if the state is present and the stack already exists, we try to update it. + # AWS will tell us if the stack template and parameters are the same and + # don't need to be updated. + try: + cfn.update_stack(aws_retry=True, **stack_params) + result = stack_operation(module, cfn, stack_params['StackName'], 'UPDATE', events_limit, stack_params.get('ClientRequestToken', None)) + except is_boto3_error_message('No updates are to be performed.'): + result = dict(changed=False, output='Stack is already up-to-date.') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err: + module.fail_json_aws(err, msg="Failed to update stack {0}".format(stack_params.get('StackName'))) + if not result: + module.fail_json(msg="empty result") + return result + + +def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state): + '''updates termination protection of a stack''' + stack = get_stack_facts(module, cfn, stack_name) + if stack: + if stack['EnableTerminationProtection'] is not desired_termination_protection_state: + try: + cfn.update_termination_protection( + aws_retry=True, + EnableTerminationProtection=desired_termination_protection_state, + StackName=stack_name) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + +def stack_operation(module, cfn, stack_name, operation, events_limit, op_token=None): + '''gets the status of a stack while it is created/updated/deleted''' + existed = [] + while True: + try: + stack = get_stack_facts(module, cfn, stack_name, raise_errors=True) + existed.append('yes') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError): + # If the stack previously existed, and now can't be found then it's + # been deleted successfully. + if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways. + ret = get_stack_events(cfn, stack_name, events_limit, op_token) + ret.update({'changed': True, 'output': 'Stack Deleted'}) + return ret + else: + return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()} + ret = get_stack_events(cfn, stack_name, events_limit, op_token) + if not stack: + if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways. + ret = get_stack_events(cfn, stack_name, events_limit, op_token) + ret.update({'changed': True, 'output': 'Stack Deleted'}) + return ret + else: + ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'}) + return ret + # it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE + # Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13 + elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET': + ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation}) + return ret + elif stack['StackStatus'] == 'DELETE_COMPLETE' and operation == 'CREATE': + ret.update({'changed': True, 'failed': True, 'output': 'Stack create failed. Delete complete.'}) + return ret + # note the ordering of ROLLBACK_COMPLETE, DELETE_COMPLETE, and COMPLETE, because otherwise COMPLETE will match all cases. + elif stack['StackStatus'].endswith('_COMPLETE'): + ret.update({'changed': True, 'output': 'Stack %s complete' % operation}) + return ret + elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'): + ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation}) + return ret + # note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases. + elif stack['StackStatus'].endswith('_FAILED'): + ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation}) + return ret + else: + # this can loop forever :/ + time.sleep(5) + return {'failed': True, 'output': 'Failed for unknown reasons.'} + + +def build_changeset_name(stack_params): + if 'ChangeSetName' in stack_params: + return stack_params['ChangeSetName'] + + json_params = json.dumps(stack_params, sort_keys=True) + + return 'Ansible-{0}-{1}'.format( + stack_params['StackName'], + sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest() + ) + + +def check_mode_changeset(module, stack_params, cfn): + """Create a change set, describe it and delete it before returning check mode outputs.""" + stack_params['ChangeSetName'] = build_changeset_name(stack_params) + # changesets don't accept ClientRequestToken parameters + stack_params.pop('ClientRequestToken', None) + + try: + change_set = cfn.create_change_set(aws_retry=True, **stack_params) + for _i in range(60): # total time 5 min + description = cfn.describe_change_set(aws_retry=True, ChangeSetName=change_set['Id']) + if description['Status'] in ('CREATE_COMPLETE', 'FAILED'): + break + time.sleep(5) + else: + # if the changeset doesn't finish in 5 mins, this `else` will trigger and fail + module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName']) + + cfn.delete_change_set(aws_retry=True, ChangeSetName=change_set['Id']) + + reason = description.get('StatusReason') + + if description['Status'] == 'FAILED' and ("didn't contain changes" in reason or "No updates are to be performed" in reason): + return {'changed': False, 'msg': reason, 'meta': reason} + return {'changed': True, 'msg': reason, 'meta': description['Changes']} + + except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: + module.fail_json_aws(err) + + +def get_stack_facts(module, cfn, stack_name, raise_errors=False): + try: + stack_response = cfn.describe_stacks(aws_retry=True, StackName=stack_name) + stack_info = stack_response['Stacks'][0] + except is_boto3_error_message('does not exist'): + return None + except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: # pylint: disable=duplicate-except + if raise_errors: + raise err + module.fail_json_aws(err, msg="Failed to describe stack") + + if stack_response and stack_response.get('Stacks', None): + stacks = stack_response['Stacks'] + if len(stacks): + stack_info = stacks[0] + + return stack_info + + +def main(): + argument_spec = dict( + stack_name=dict(required=True), + template_parameters=dict(required=False, type='dict', default={}), + state=dict(default='present', choices=['present', 'absent']), + template=dict(default=None, required=False, type='path'), + notification_arns=dict(default=None, required=False), + stack_policy=dict(default=None, required=False), + stack_policy_body=dict(default=None, required=False, type='json'), + stack_policy_on_update_body=dict(default=None, required=False, type='json'), + disable_rollback=dict(default=False, type='bool'), + on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']), + create_timeout=dict(default=None, type='int'), + template_url=dict(default=None, required=False), + template_body=dict(default=None, required=False), + create_changeset=dict(default=False, type='bool'), + changeset_name=dict(default=None, required=False), + role_arn=dict(default=None, required=False), + tags=dict(default=None, type='dict'), + termination_protection=dict(default=None, type='bool'), + events_limit=dict(default=200, type='int'), + backoff_retries=dict(type='int', default=10, required=False), + backoff_delay=dict(type='int', default=3, required=False), + backoff_max_delay=dict(type='int', default=30, required=False), + capabilities=dict(type='list', elements='str', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[['template_url', 'template', 'template_body'], + ['disable_rollback', 'on_create_failure']], + supports_check_mode=True + ) + + invalid_capabilities = [] + user_capabilities = module.params.get('capabilities') + for user_cap in user_capabilities: + if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']: + invalid_capabilities.append(user_cap) + + if invalid_capabilities: + module.fail_json(msg="Specified capabilities are invalid : %r," + " please check documentation for valid capabilities" % invalid_capabilities) + + # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. + stack_params = { + 'Capabilities': user_capabilities, + 'ClientRequestToken': to_native(uuid.uuid4()), + } + state = module.params['state'] + stack_params['StackName'] = module.params['stack_name'] + + if module.params['template'] is not None: + with open(module.params['template'], 'r') as template_fh: + stack_params['TemplateBody'] = template_fh.read() + elif module.params['template_body'] is not None: + stack_params['TemplateBody'] = module.params['template_body'] + elif module.params['template_url'] is not None: + stack_params['TemplateURL'] = module.params['template_url'] + + if module.params.get('notification_arns'): + stack_params['NotificationARNs'] = module.params['notification_arns'].split(',') + else: + stack_params['NotificationARNs'] = [] + + # can't check the policy when verifying. + if module.params['stack_policy_body'] is not None and not module.check_mode and not module.params['create_changeset']: + stack_params['StackPolicyBody'] = module.params['stack_policy_body'] + elif module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']: + with open(module.params['stack_policy'], 'r') as stack_policy_fh: + stack_params['StackPolicyBody'] = stack_policy_fh.read() + + template_parameters = module.params['template_parameters'] + + stack_params['Parameters'] = [] + for k, v in template_parameters.items(): + if isinstance(v, dict): + # set parameter based on a dict to allow additional CFN Parameter Attributes + param = dict(ParameterKey=k) + + if 'value' in v: + param['ParameterValue'] = str(v['value']) + + if 'use_previous_value' in v and bool(v['use_previous_value']): + param['UsePreviousValue'] = True + param.pop('ParameterValue', None) + + stack_params['Parameters'].append(param) + else: + # allow default k/v configuration to set a template parameter + stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)}) + + if isinstance(module.params.get('tags'), dict): + stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags']) + + if module.params.get('role_arn'): + stack_params['RoleARN'] = module.params['role_arn'] + + result = {} + + # Wrap the cloudformation client methods that this module uses with + # automatic backoff / retry for throttling error codes + retry_decorator = AWSRetry.jittered_backoff( + retries=module.params.get('backoff_retries'), + delay=module.params.get('backoff_delay'), + max_delay=module.params.get('backoff_max_delay') + ) + cfn = module.client('cloudformation', retry_decorator=retry_decorator) + + stack_info = get_stack_facts(module, cfn, stack_params['StackName']) + + if module.check_mode: + if state == 'absent' and stack_info: + module.exit_json(changed=True, msg='Stack would be deleted', meta=[]) + elif state == 'absent' and not stack_info: + module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[]) + elif state == 'present' and not stack_info: + module.exit_json(changed=True, msg='New stack would be created', meta=[]) + else: + module.exit_json(**check_mode_changeset(module, stack_params, cfn)) + + if state == 'present': + if not stack_info: + result = create_stack(module, stack_params, cfn, module.params.get('events_limit')) + elif module.params.get('create_changeset'): + result = create_changeset(module, stack_params, cfn, module.params.get('events_limit')) + else: + if module.params.get('termination_protection') is not None: + update_termination_protection(module, cfn, stack_params['StackName'], + bool(module.params.get('termination_protection'))) + result = update_stack(module, stack_params, cfn, module.params.get('events_limit')) + + # format the stack output + + stack = get_stack_facts(module, cfn, stack_params['StackName']) + if stack is not None: + if result.get('stack_outputs') is None: + # always define stack_outputs, but it may be empty + result['stack_outputs'] = {} + for output in stack.get('Outputs', []): + result['stack_outputs'][output['OutputKey']] = output['OutputValue'] + stack_resources = [] + reslist = cfn.list_stack_resources(aws_retry=True, StackName=stack_params['StackName']) + for res in reslist.get('StackResourceSummaries', []): + stack_resources.append({ + "logical_resource_id": res['LogicalResourceId'], + "physical_resource_id": res.get('PhysicalResourceId', ''), + "resource_type": res['ResourceType'], + "last_updated_time": res['LastUpdatedTimestamp'], + "status": res['ResourceStatus'], + "status_reason": res.get('ResourceStatusReason') # can be blank, apparently + }) + result['stack_resources'] = stack_resources + + elif state == 'absent': + # absent state is different because of the way delete_stack works. + # problem is it it doesn't give an error if stack isn't found + # so must describe the stack first + + try: + stack = get_stack_facts(module, cfn, stack_params['StackName']) + if not stack: + result = {'changed': False, 'output': 'Stack not found.'} + else: + if stack_params.get('RoleARN') is None: + cfn.delete_stack(aws_retry=True, StackName=stack_params['StackName']) + else: + cfn.delete_stack(aws_retry=True, StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN']) + result = stack_operation(module, cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'), + stack_params.get('ClientRequestToken', None)) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err: + module.fail_json_aws(err) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py b/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py new file mode 100644 index 000000000..89ba80bf7 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py @@ -0,0 +1,461 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: cloudformation_info +version_added: 1.0.0 +short_description: Obtain information about an AWS CloudFormation stack +description: + - Gets information about an AWS CloudFormation stack. +author: + - Justin Menga (@jmenga) + - Kevin Coming (@waffie1) +options: + stack_name: + description: + - The name or id of the CloudFormation stack. Gathers information on all stacks by default. + type: str + all_facts: + description: + - Get all stack information for the stack. + type: bool + default: false + stack_events: + description: + - Get stack events for the stack. + type: bool + default: false + stack_template: + description: + - Get stack template body for the stack. + type: bool + default: false + stack_resources: + description: + - Get stack resources for the stack. + type: bool + default: false + stack_policy: + description: + - Get stack policy for the stack. + type: bool + default: false + stack_change_sets: + description: + - Get stack change sets for the stack + type: bool + default: false +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Get information on all stacks + amazon.aws.cloudformation_info: + register: all_stacks_output + +- name: Get summary information about a stack + amazon.aws.cloudformation_info: + stack_name: my-cloudformation-stack + register: output + +- debug: + msg: "{{ output['cloudformation']['my-cloudformation-stack'] }}" + +# Get stack outputs, when you have the stack name available as a fact +- set_fact: + stack_name: my-awesome-stack + +- amazon.aws.cloudformation_info: + stack_name: "{{ stack_name }}" + register: my_stack + +- debug: + msg: "{{ my_stack.cloudformation[stack_name].stack_outputs }}" + +# Get all stack information about a stack +- amazon.aws.cloudformation_info: + stack_name: my-cloudformation-stack + all_facts: true + +# Get stack resource and stack policy information about a stack +- amazon.aws.cloudformation_info: + stack_name: my-cloudformation-stack + stack_resources: true + stack_policy: true + +# Fail if the stack doesn't exist +- name: try to get info about a stack but fail if it doesn't exist + amazon.aws.cloudformation_info: + stack_name: nonexistent-stack + all_facts: true + failed_when: cloudformation['nonexistent-stack'] is undefined +''' + +RETURN = ''' +cloudformation: + description: + - Dictionary of dictionaries containing info of stack(s). + - Keys are I(stack_name)s. + returned: always + type: dict + contains: + stack_description: + description: Summary facts about the stack. + returned: if the stack exists + type: dict + contains: + capabilities: + description: The capabilities allowed in the stack. + returned: always + type: list + elements: str + creation_time: + description: The time at which the stack was created. + returned: if stack exists + type: str + deletion_time: + description: The time at which the stack was deleted. + returned: if stack was deleted + type: str + description: + description: The user-defined description associated with the stack. + returned: always + type: str + disable_rollback: + description: Whether or not rollback on stack creation failures is enabled. + returned: always + type: bool + drift_information: + description: Information about whether a stack's actual configuration differs, or has drifted, from it's expected configuration, + as defined in the stack template and any values specified as template parameters. + returned: always + type: dict + contains: + stack_drift_status: + description: Status of the stack's actual configuration compared to its expected template configuration. + returned: always + type: str + last_check_timestamp: + description: Most recent time when a drift detection operation was initiated on the stack, + or any of its individual resources that support drift detection. + returned: if a drift was detected + type: str + enable_termination_protection: + description: Whether termination protection is enabled for the stack. + returned: always + type: bool + notification_arns: + description: Amazon SNS topic ARNs to which stack related events are published. + returned: always + type: list + elements: str + outputs: + description: A list of output dicts. + returned: always + type: list + elements: dict + contains: + output_key: + description: The key associated with the output. + returned: always + type: str + output_value: + description: The value associated with the output. + returned: always + type: str + parameters: + description: A list of parameter dicts. + returned: always + type: list + elements: dict + contains: + parameter_key: + description: The key associated with the parameter. + returned: always + type: str + parameter_value: + description: The value associated with the parameter. + returned: always + type: str + rollback_configuration: + description: The rollback triggers for CloudFormation to monitor during stack creation and updating operations. + returned: always + type: dict + contains: + rollback_triggers: + description: The triggers to monitor during stack creation or update actions. + returned: when rollback triggers exist + type: list + elements: dict + contains: + arn: + description: The ARN of the rollback trigger. + returned: always + type: str + type: + description: The resource type of the rollback trigger. + returned: always + type: str + stack_id: + description: The unique ID of the stack. + returned: always + type: str + stack_name: + description: The name of the stack. + returned: always + type: str + stack_status: + description: The status of the stack. + returned: always + type: str + tags: + description: A list of tags associated with the stack. + returned: always + type: list + elements: dict + contains: + key: + description: Key of tag. + returned: always + type: str + value: + description: Value of tag. + returned: always + type: str + stack_outputs: + description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each + output 'OutputValue' parameter. + returned: if the stack exists + type: dict + sample: { ApplicationDatabaseName: dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com } + stack_parameters: + description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of + each parameter 'ParameterValue' parameter. + returned: if the stack exists + type: dict + sample: + { + DatabaseEngine: mysql, + DatabasePassword: "***" + } + stack_events: + description: All stack events for the stack. + returned: only if all_facts or stack_events is true and the stack exists + type: list + stack_policy: + description: Describes the stack policy for the stack. + returned: only if all_facts or stack_policy is true and the stack exists + type: dict + stack_template: + description: Describes the stack template for the stack. + returned: only if all_facts or stack_template is true and the stack exists + type: dict + stack_resource_list: + description: Describes stack resources for the stack. + returned: only if all_facts or stack_resources is true and the stack exists + type: list + stack_resources: + description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each + resource 'PhysicalResourceId' parameter. + returned: only if all_facts or stack_resources is true and the stack exists + type: dict + sample: { + "AutoScalingGroup": "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7", + "AutoScalingSecurityGroup": "sg-abcd1234", + "ApplicationDatabase": "dazvlpr01xj55a" + } + stack_change_sets: + description: A list of stack change sets. Each item in the list represents the details of a specific changeset. + returned: only if all_facts or stack_change_sets is true and the stack exists + type: list + stack_tags: + description: Dictionary of key value pairs of tags. + returned: only if all_facts or stack_resources is true and the stack exists + type: dict + sample: { + 'TagOne': 'ValueOne', + 'TagTwo': 'ValueTwo' + } +''' + +import json + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +class CloudFormationServiceManager: + """Handles CloudFormation Services""" + + def __init__(self, module): + self.module = module + self.client = module.client('cloudformation') + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_stacks_with_backoff(self, **kwargs): + paginator = self.client.get_paginator('describe_stacks') + return paginator.paginate(**kwargs).build_full_result()['Stacks'] + + def describe_stacks(self, stack_name=None): + try: + kwargs = {'StackName': stack_name} if stack_name else {} + response = self.describe_stacks_with_backoff(**kwargs) + if response is not None: + return response + self.module.fail_json(msg="Error describing stack(s) - an empty response was returned") + except is_boto3_error_message('does not exist'): + return {} + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Error describing stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def list_stack_resources_with_backoff(self, stack_name): + paginator = self.client.get_paginator('list_stack_resources') + return paginator.paginate(StackName=stack_name).build_full_result()['StackResourceSummaries'] + + def list_stack_resources(self, stack_name): + try: + return self.list_stack_resources_with_backoff(stack_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error listing stack resources for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_stack_events_with_backoff(self, stack_name): + paginator = self.client.get_paginator('describe_stack_events') + return paginator.paginate(StackName=stack_name).build_full_result()['StackEvents'] + + def describe_stack_events(self, stack_name): + try: + return self.describe_stack_events_with_backoff(stack_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error listing stack events for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def list_stack_change_sets_with_backoff(self, stack_name): + paginator = self.client.get_paginator('list_change_sets') + return paginator.paginate(StackName=stack_name).build_full_result()['Summaries'] + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_stack_change_set_with_backoff(self, **kwargs): + paginator = self.client.get_paginator('describe_change_set') + return paginator.paginate(**kwargs).build_full_result() + + def describe_stack_change_sets(self, stack_name): + changes = [] + try: + change_sets = self.list_stack_change_sets_with_backoff(stack_name) + for item in change_sets: + changes.append(self.describe_stack_change_set_with_backoff( + StackName=stack_name, + ChangeSetName=item['ChangeSetName'])) + return changes + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error describing stack change sets for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def get_stack_policy_with_backoff(self, stack_name): + return self.client.get_stack_policy(StackName=stack_name) + + def get_stack_policy(self, stack_name): + try: + response = self.get_stack_policy_with_backoff(stack_name) + stack_policy = response.get('StackPolicyBody') + if stack_policy: + return json.loads(stack_policy) + return dict() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error getting stack policy for stack " + stack_name) + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def get_template_with_backoff(self, stack_name): + return self.client.get_template(StackName=stack_name) + + def get_template(self, stack_name): + try: + response = self.get_template_with_backoff(stack_name) + return response.get('TemplateBody') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Error getting stack template for stack " + stack_name) + + +def to_dict(items, key, value): + ''' Transforms a list of items to a Key/Value dictionary ''' + if items: + return dict(zip([i.get(key) for i in items], [i.get(value) for i in items])) + else: + return dict() + + +def main(): + argument_spec = dict( + stack_name=dict(), + all_facts=dict(required=False, default=False, type='bool'), + stack_policy=dict(required=False, default=False, type='bool'), + stack_events=dict(required=False, default=False, type='bool'), + stack_resources=dict(required=False, default=False, type='bool'), + stack_template=dict(required=False, default=False, type='bool'), + stack_change_sets=dict(required=False, default=False, type='bool'), + ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + service_mgr = CloudFormationServiceManager(module) + + result = {'cloudformation': {}} + + for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')): + facts = {'stack_description': stack_description} + stack_name = stack_description.get('StackName') + + # Create stack output and stack parameter dictionaries + if facts['stack_description']: + facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue') + facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'), + 'ParameterKey', 'ParameterValue') + facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags')) + + # Create optional stack outputs + all_facts = module.params.get('all_facts') + if all_facts or module.params.get('stack_resources'): + facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name) + facts['stack_resources'] = to_dict(facts.get('stack_resource_list'), + 'LogicalResourceId', 'PhysicalResourceId') + if all_facts or module.params.get('stack_template'): + facts['stack_template'] = service_mgr.get_template(stack_name) + if all_facts or module.params.get('stack_policy'): + facts['stack_policy'] = service_mgr.get_stack_policy(stack_name) + if all_facts or module.params.get('stack_events'): + facts['stack_events'] = service_mgr.describe_stack_events(stack_name) + if all_facts or module.params.get('stack_change_sets'): + facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name) + + result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs', + 'stack_parameters', + 'stack_policy', + 'stack_resources', + 'stack_tags', + 'stack_template')) + module.exit_json(changed=False, **result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py b/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py new file mode 100644 index 000000000..af48e7ea8 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py @@ -0,0 +1,642 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: cloudtrail +version_added: 5.0.0 +short_description: manage CloudTrail create, delete, update +description: + - Creates, deletes, or updates CloudTrail configuration. Ensures logging is also enabled. + - This module was originally added to C(community.aws) in release 1.0.0. +author: + - Ansible Core Team + - Ted Timmons (@tedder) + - Daniel Shepherd (@shepdelacreme) +options: + state: + description: + - Add or remove CloudTrail configuration. + - 'The following states have been preserved for backwards compatibility: I(state=enabled) and I(state=disabled).' + - I(state=enabled) is equivalet to I(state=present). + - I(state=disabled) is equivalet to I(state=absent). + type: str + choices: ['present', 'absent', 'enabled', 'disabled'] + default: present + name: + description: + - Name for the CloudTrail. + - Names are unique per-region unless the CloudTrail is a multi-region trail, in which case it is unique per-account. + type: str + default: default + enable_logging: + description: + - Start or stop the CloudTrail logging. If stopped the trail will be paused and will not record events or deliver log files. + default: true + type: bool + s3_bucket_name: + description: + - An existing S3 bucket where CloudTrail will deliver log files. + - This bucket should exist and have the proper policy. + - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html). + - Required when I(state=present). + type: str + s3_key_prefix: + description: + - S3 Key prefix for delivered log files. A trailing slash is not necessary and will be removed. + type: str + is_multi_region_trail: + description: + - Specify whether the trail belongs only to one region or exists in all regions. + default: false + type: bool + enable_log_file_validation: + description: + - Specifies whether log file integrity validation is enabled. + - CloudTrail will create a hash for every log file delivered and produce a signed digest file that can be used to ensure log files have not been tampered. + type: bool + aliases: [ "log_file_validation_enabled" ] + include_global_events: + description: + - Record API calls from global services such as IAM and STS. + default: true + type: bool + aliases: [ "include_global_service_events" ] + sns_topic_name: + description: + - SNS Topic name to send notifications to when a log file is delivered. + type: str + cloudwatch_logs_role_arn: + description: + - Specifies a full ARN for an IAM role that assigns the proper permissions for CloudTrail to create and write to the log group. + - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html). + - Required when C(cloudwatch_logs_log_group_arn). + type: str + cloudwatch_logs_log_group_arn: + description: + - A full ARN specifying a valid CloudWatch log group to which CloudTrail logs will be delivered. The log group should already exist. + - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html). + - Required when C(cloudwatch_logs_role_arn). + type: str + kms_key_id: + description: + - Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. This also has the effect of enabling log file encryption. + - The value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. + - Encryption can be disabled by setting I(kms_key_id=""). + - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html). + type: str +notes: + - The I(purge_tags) option was added in release 4.0.0 + +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 + +''' + +EXAMPLES = ''' +- name: create single region cloudtrail + amazon.aws.cloudtrail: + state: present + name: default + s3_bucket_name: mylogbucket + s3_key_prefix: cloudtrail + region: us-east-1 + +- name: create multi-region trail with validation and tags + amazon.aws.cloudtrail: + state: present + name: default + s3_bucket_name: mylogbucket + region: us-east-1 + is_multi_region_trail: true + enable_log_file_validation: true + cloudwatch_logs_role_arn: "arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role" + cloudwatch_logs_log_group_arn: "arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*" + kms_key_id: "alias/MyAliasName" + tags: + environment: dev + Name: default + +- name: show another valid kms_key_id + amazon.aws.cloudtrail: + state: present + name: default + s3_bucket_name: mylogbucket + kms_key_id: "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012" + # simply "12345678-1234-1234-1234-123456789012" would be valid too. + +- name: pause logging the trail we just created + amazon.aws.cloudtrail: + state: present + name: default + enable_logging: false + s3_bucket_name: mylogbucket + region: us-east-1 + is_multi_region_trail: true + enable_log_file_validation: true + tags: + environment: dev + Name: default + +- name: delete a trail + amazon.aws.cloudtrail: + state: absent + name: default +''' + +RETURN = ''' +exists: + description: whether the resource exists + returned: always + type: bool + sample: true +trail: + description: CloudTrail resource details + returned: always + type: complex + sample: hash/dictionary of values + contains: + trail_arn: + description: Full ARN of the CloudTrail resource + returned: success + type: str + sample: arn:aws:cloudtrail:us-east-1:123456789012:trail/default + name: + description: Name of the CloudTrail resource + returned: success + type: str + sample: default + is_logging: + description: Whether logging is turned on or paused for the Trail + returned: success + type: bool + sample: True + s3_bucket_name: + description: S3 bucket name where log files are delivered + returned: success + type: str + sample: myBucket + s3_key_prefix: + description: Key prefix in bucket where log files are delivered (if any) + returned: success when present + type: str + sample: myKeyPrefix + log_file_validation_enabled: + description: Whether log file validation is enabled on the trail + returned: success + type: bool + sample: true + include_global_service_events: + description: Whether global services (IAM, STS) are logged with this trail + returned: success + type: bool + sample: true + is_multi_region_trail: + description: Whether the trail applies to all regions or just one + returned: success + type: bool + sample: true + has_custom_event_selectors: + description: Whether any custom event selectors are used for this trail. + returned: success + type: bool + sample: False + home_region: + description: The home region where the trail was originally created and must be edited. + returned: success + type: str + sample: us-east-1 + sns_topic_name: + description: The SNS topic name where log delivery notifications are sent. + returned: success when present + type: str + sample: myTopic + sns_topic_arn: + description: Full ARN of the SNS topic where log delivery notifications are sent. + returned: success when present + type: str + sample: arn:aws:sns:us-east-1:123456789012:topic/myTopic + cloud_watch_logs_log_group_arn: + description: Full ARN of the CloudWatch Logs log group where events are delivered. + returned: success when present + type: str + sample: arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:* + cloud_watch_logs_role_arn: + description: Full ARN of the IAM role that CloudTrail assumes to deliver events. + returned: success when present + type: str + sample: arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role + kms_key_id: + description: Full ARN of the KMS Key used to encrypt log files. + returned: success when present + type: str + sample: arn:aws:kms::123456789012:key/12345678-1234-1234-1234-123456789012 + tags: + description: hash/dictionary of tags applied to this resource + returned: success + type: dict + sample: {'environment': 'dev', 'Name': 'default'} +''' + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + + +def get_kms_key_aliases(module, client, keyId): + """ + get list of key aliases + + module : AnsibleAWSModule object + client : boto3 client connection object for kms + keyId : keyId to get aliases for + """ + try: + key_resp = client.list_aliases(KeyId=keyId) + except (BotoCoreError, ClientError): + # Don't fail here, just return [] to maintain backwards compat + # in case user doesn't have kms:ListAliases permissions + return [] + + return key_resp['Aliases'] + + +def create_trail(module, client, ct_params): + """ + Creates a CloudTrail + + module : AnsibleAWSModule object + client : boto3 client connection object + ct_params : The parameters for the Trail to create + """ + resp = {} + try: + resp = client.create_trail(**ct_params) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to create Trail") + + return resp + + +def tag_trail(module, client, tags, trail_arn, curr_tags=None, purge_tags=True): + """ + Creates, updates, removes tags on a CloudTrail resource + + module : AnsibleAWSModule object + client : boto3 client connection object + tags : Dict of tags converted from ansible_dict to boto3 list of dicts + trail_arn : The ARN of the CloudTrail to operate on + curr_tags : Dict of the current tags on resource, if any + dry_run : true/false to determine if changes will be made if needed + """ + + if tags is None: + return False + + curr_tags = curr_tags or {} + + tags_to_add, tags_to_remove = compare_aws_tags(curr_tags, tags, purge_tags=purge_tags) + if not tags_to_add and not tags_to_remove: + return False + + if module.check_mode: + return True + + if tags_to_remove: + remove = {k: curr_tags[k] for k in tags_to_remove} + tags_to_remove = ansible_dict_to_boto3_tag_list(remove) + try: + client.remove_tags(ResourceId=trail_arn, TagsList=tags_to_remove) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to remove tags from Trail") + + if tags_to_add: + tags_to_add = ansible_dict_to_boto3_tag_list(tags_to_add) + try: + client.add_tags(ResourceId=trail_arn, TagsList=tags_to_add) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to add tags to Trail") + + return True + + +def get_tag_list(keys, tags): + """ + Returns a list of dicts with tags to act on + keys : set of keys to get the values for + tags : the dict of tags to turn into a list + """ + tag_list = [] + for k in keys: + tag_list.append({'Key': k, 'Value': tags[k]}) + + return tag_list + + +def set_logging(module, client, name, action): + """ + Starts or stops logging based on given state + + module : AnsibleAWSModule object + client : boto3 client connection object + name : The name or ARN of the CloudTrail to operate on + action : start or stop + """ + if action == 'start': + try: + client.start_logging(Name=name) + return client.get_trail_status(Name=name) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to start logging") + elif action == 'stop': + try: + client.stop_logging(Name=name) + return client.get_trail_status(Name=name) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to stop logging") + else: + module.fail_json(msg="Unsupported logging action") + + +def get_trail_facts(module, client, name): + """ + Describes existing trail in an account + + module : AnsibleAWSModule object + client : boto3 client connection object + name : Name of the trail + """ + # get Trail info + try: + trail_resp = client.describe_trails(trailNameList=[name]) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to describe Trail") + + # Now check to see if our trail exists and get status and tags + if len(trail_resp['trailList']): + trail = trail_resp['trailList'][0] + try: + status_resp = client.get_trail_status(Name=trail['Name']) + tags_list = client.list_tags(ResourceIdList=[trail['TrailARN']]) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to describe Trail") + + trail['IsLogging'] = status_resp['IsLogging'] + trail['tags'] = boto3_tag_list_to_ansible_dict(tags_list['ResourceTagList'][0]['TagsList']) + # Check for non-existent values and populate with None + optional_vals = set(['S3KeyPrefix', 'SnsTopicName', 'SnsTopicARN', 'CloudWatchLogsLogGroupArn', 'CloudWatchLogsRoleArn', 'KmsKeyId']) + for v in optional_vals - set(trail.keys()): + trail[v] = None + return trail + + else: + # trail doesn't exist return None + return None + + +def delete_trail(module, client, trail_arn): + """ + Delete a CloudTrail + + module : AnsibleAWSModule object + client : boto3 client connection object + trail_arn : Full CloudTrail ARN + """ + try: + client.delete_trail(Name=trail_arn) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to delete Trail") + + +def update_trail(module, client, ct_params): + """ + Delete a CloudTrail + + module : AnsibleAWSModule object + client : boto3 client connection object + ct_params : The parameters for the Trail to update + """ + try: + client.update_trail(**ct_params) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to update Trail") + + +def main(): + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']), + name=dict(default='default'), + enable_logging=dict(default=True, type='bool'), + s3_bucket_name=dict(), + s3_key_prefix=dict(no_log=False), + sns_topic_name=dict(), + is_multi_region_trail=dict(default=False, type='bool'), + enable_log_file_validation=dict(type='bool', aliases=['log_file_validation_enabled']), + include_global_events=dict(default=True, type='bool', aliases=['include_global_service_events']), + cloudwatch_logs_role_arn=dict(), + cloudwatch_logs_log_group_arn=dict(), + kms_key_id=dict(), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, type='bool') + ) + + required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])] + required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')] + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if) + + # collect parameters + if module.params['state'] in ('present', 'enabled'): + state = 'present' + elif module.params['state'] in ('absent', 'disabled'): + state = 'absent' + tags = module.params['tags'] + purge_tags = module.params['purge_tags'] + enable_logging = module.params['enable_logging'] + ct_params = dict( + Name=module.params['name'], + S3BucketName=module.params['s3_bucket_name'], + IncludeGlobalServiceEvents=module.params['include_global_events'], + IsMultiRegionTrail=module.params['is_multi_region_trail'], + ) + + if module.params['s3_key_prefix']: + ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/') + + if module.params['sns_topic_name']: + ct_params['SnsTopicName'] = module.params['sns_topic_name'] + + if module.params['cloudwatch_logs_role_arn']: + ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn'] + + if module.params['cloudwatch_logs_log_group_arn']: + ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn'] + + if module.params['enable_log_file_validation'] is not None: + ct_params['EnableLogFileValidation'] = module.params['enable_log_file_validation'] + + if module.params["kms_key_id"] is not None: + ct_params["KmsKeyId"] = module.params["kms_key_id"] + + client = module.client('cloudtrail') + region = module.region + + results = dict( + changed=False, + exists=False + ) + + # Get existing trail facts + trail = get_trail_facts(module, client, ct_params['Name']) + + # If the trail exists set the result exists variable + if trail is not None: + results['exists'] = True + initial_kms_key_id = trail.get('KmsKeyId') + + if state == 'absent' and results['exists']: + # If Trail exists go ahead and delete + results['changed'] = True + results['exists'] = False + results['trail'] = dict() + if not module.check_mode: + delete_trail(module, client, trail['TrailARN']) + + elif state == 'present' and results['exists']: + # If Trail exists see if we need to update it + do_update = False + for key in ct_params: + tkey = str(key) + # boto3 has inconsistent parameter naming so we handle it here + if key == 'EnableLogFileValidation': + tkey = 'LogFileValidationEnabled' + # We need to make an empty string equal None + if ct_params.get(key) == '': + val = None + else: + val = ct_params.get(key) + if val != trail.get(tkey): + do_update = True + if tkey != 'KmsKeyId': + # We'll check if the KmsKeyId casues changes later since + # user could've provided a key alias, alias arn, or key id + # and trail['KmsKeyId'] is always a key arn + results['changed'] = True + # If we are in check mode copy the changed values to the trail facts in result output to show what would change. + if module.check_mode: + trail.update({tkey: ct_params.get(key)}) + + if not module.check_mode and do_update: + update_trail(module, client, ct_params) + trail = get_trail_facts(module, client, ct_params['Name']) + + # Determine if KmsKeyId changed + if not module.check_mode: + if initial_kms_key_id != trail.get('KmsKeyId'): + results['changed'] = True + else: + new_key = ct_params.get('KmsKeyId') + if initial_kms_key_id != new_key: + # Assume changed for a moment + results['changed'] = True + + # However, new_key could be a key id, alias arn, or alias name + # that maps back to the key arn in initial_kms_key_id. So check + # all aliases for a match. + initial_aliases = get_kms_key_aliases(module, module.client('kms'), initial_kms_key_id) + for a in initial_aliases: + if a['AliasName'] == new_key or a['AliasArn'] == new_key or a['TargetKeyId'] == new_key: + results['changed'] = False + + # Check if we need to start/stop logging + if enable_logging and not trail['IsLogging']: + results['changed'] = True + trail['IsLogging'] = True + if not module.check_mode: + set_logging(module, client, name=ct_params['Name'], action='start') + if not enable_logging and trail['IsLogging']: + results['changed'] = True + trail['IsLogging'] = False + if not module.check_mode: + set_logging(module, client, name=ct_params['Name'], action='stop') + + # Check if we need to update tags on resource + tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], + purge_tags=purge_tags) + if tags_changed: + updated_tags = dict() + if not purge_tags: + updated_tags = trail['tags'] + updated_tags.update(tags) + results['changed'] = True + trail['tags'] = updated_tags + + # Populate trail facts in output + results['trail'] = camel_dict_to_snake_dict(trail, ignore_list=['tags']) + + elif state == 'present' and not results['exists']: + # Trail doesn't exist just go create it + results['changed'] = True + results['exists'] = True + if not module.check_mode: + if tags: + ct_params["TagsList"] = ansible_dict_to_boto3_tag_list(tags) + # If we aren't in check_mode then actually create it + created_trail = create_trail(module, client, ct_params) + # Get the trail status + try: + status_resp = client.get_trail_status(Name=created_trail['Name']) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to fetch Trail statuc") + # Set the logging state for the trail to desired value + if enable_logging and not status_resp['IsLogging']: + set_logging(module, client, name=ct_params['Name'], action='start') + if not enable_logging and status_resp['IsLogging']: + set_logging(module, client, name=ct_params['Name'], action='stop') + # Get facts for newly created Trail + trail = get_trail_facts(module, client, ct_params['Name']) + + # If we are in check mode create a fake return structure for the newly minted trail + if module.check_mode: + acct_id = '123456789012' + try: + sts_client = module.client('sts') + acct_id = sts_client.get_caller_identity()['Account'] + except (BotoCoreError, ClientError): + pass + trail = dict() + trail.update(ct_params) + if 'EnableLogFileValidation' not in ct_params: + ct_params['EnableLogFileValidation'] = False + trail['EnableLogFileValidation'] = ct_params['EnableLogFileValidation'] + trail.pop('EnableLogFileValidation') + fake_arn = 'arn:aws:cloudtrail:' + region + ':' + acct_id + ':trail/' + ct_params['Name'] + trail['HasCustomEventSelectors'] = False + trail['HomeRegion'] = region + trail['TrailARN'] = fake_arn + trail['IsLogging'] = enable_logging + trail['tags'] = tags + # Populate trail facts in output + results['trail'] = camel_dict_to_snake_dict(trail, ignore_list=['tags']) + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudtrail_info.py b/ansible_collections/amazon/aws/plugins/modules/cloudtrail_info.py new file mode 100644 index 000000000..0429bb7f0 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/cloudtrail_info.py @@ -0,0 +1,238 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: cloudtrail_info +version_added: 5.0.0 +short_description: Gather information about trails in AWS Cloud Trail. +description: + - Gather information about trails in AWS CloudTrail. +author: "Gomathi Selvi Srinivasan (@GomathiselviS)" +options: + trail_names: + type: list + elements: str + default: [] + description: + - Specifies a list of trail names, trail ARNs, or both, of the trails to describe. + - If an empty list is specified, information for the trail in the current region is returned. + include_shadow_trails: + type: bool + default: true + description: Specifies whether to include shadow trails in the response. +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all trails +- amazon.aws.cloudtrail_info: + +# Gather information about a particular trail +- amazon.aws.cloudtrail_info: + trail_names: + - arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail + +''' + +RETURN = ''' +trail_list: + description: List of trail objects. Each element consists of a dict with all the information related to that cloudtrail. + type: list + elements: dict + returned: always + contains: + name: + description: Name of the trail. + type: str + sample: "MyTrail" + s3_bucket_name: + description: Name of the Amazon S3 bucket into which CloudTrail delivers the trail files. + type: str + sample: "aws-cloudtrail-logs-xxxx" + s3_key_prefix: + description: Amazon S3 key prefix that comes after the name of the bucket that is designated for log file delivery. + type: str + sample: "xxxx" + sns_topic_arn: + description: ARN of the Amazon SNS topic that CloudTrail uses to send notifications when log files are delivered. + type: str + sample: "arn:aws:sns:us-east-2:123456789012:MyTopic" + include_global_service_events: + description: If True, AWS API calls from AWS global services such as IAM are included. + type: bool + sample: true + is_multi_region_trail: + description: Specifies whether the trail exists only in one region or exists in all regions. + type: bool + sample: true + home_region: + description: The region in which the trail was created. + type: str + sample: "us-east-1" + trail_arn: + description: Specifies the ARN of the trail. + type: str + sample: "arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail" + log_file_validation_enabled: + description: Specifies whether log file validation is enabled. + type: bool + sample: true + cloud_watch_logs_log_group_arn: + description: Specifies an ARN, that represents the log group to which CloudTrail logs will be delivered. + type: str + sample: "arn:aws:sns:us-east-2:123456789012:Mylog" + cloud_watch_logs_role_arn: + description: Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group. + type: str + sample: "arn:aws:sns:us-east-2:123456789012:Mylog" + kms_key_id: + description: Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. + type: str + sample: "arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012" + has_custom_event_selectors: + description: Specifies if the trail has custom event selectors. + type: bool + sample: true + has_insight_selectors: + description: Specifies whether a trail has insight types specified in an InsightSelector list. + type: bool + sample: true + is_organization_trail: + description: Specifies whether the trail is an organization trail. + type: bool + sample: true + is_logging: + description: Whether the CloudTrail is currently logging AWS API calls. + type: bool + sample: true + latest_delivery_error: + description: Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver log files to the designated bucket. + type: str + latest_notification_error: + description: Displays any Amazon SNS error that CloudTrail encountered when attempting to send a notification. + type: str + latest_delivery_time: + description: Specifies the date and time that CloudTrail last delivered log files to an account's Amazon S3 bucket. + type: str + start_logging_time: + description: Specifies the most recent date and time when CloudTrail started recording API calls for an AWS account. + type: str + stop_logging_time: + description: Specifies the most recent date and time when CloudTrail stopped recording API calls for an AWS account. + type: str + latest_cloud_watch_logs_delivery_error: + description: Displays any CloudWatch Logs error that CloudTrail encountered when attempting to deliver logs to CloudWatch Logs. + type: str + latest_cloud_watch_logs_delivery_time: + description: Displays the most recent date and time when CloudTrail delivered logs to CloudWatch Logs. + type: str + latest_digest_delivery_time: + description: Specifies the date and time that CloudTrail last delivered a digest file to an account's Amazon S3 bucket. + type: str + latest_digest_delivery_error: + description: Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver a digest file to the designated bucket. + type: str + resource_id: + description: Specifies the ARN of the resource. + type: str + tags: + description: Any tags assigned to the cloudtrail. + type: dict + returned: always + sample: "{ 'my_tag_key': 'my_tag_value' }" + +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def get_trails(connection, module): + all_trails = [] + try: + result = connection.get_paginator('list_trails') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get the trails.") + for trail in result.paginate(): + all_trails.extend(list_cloud_trails(trail)) + return all_trails + + +def list_cloud_trails(trail_dict): + return [x["TrailARN"] for x in trail_dict["Trails"]] + + +def get_trail_detail(connection, module): + output = {} + trail_name_list = module.params.get("trail_names") + include_shadow_trails = module.params.get("include_shadow_trails") + if not trail_name_list: + trail_name_list = get_trails(connection, module) + try: + result = connection.describe_trails(trailNameList=trail_name_list, includeShadowTrails=include_shadow_trails, aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get the trails.") + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_cloud_trail = [] + for cloud_trail in result['trailList']: + try: + status_dict = connection.get_trail_status(Name=cloud_trail["TrailARN"], aws_retry=True) + cloud_trail.update(status_dict) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get the trail status") + try: + tag_list = connection.list_tags(ResourceIdList=[cloud_trail["TrailARN"]]) + for tag_dict in tag_list["ResourceTagList"]: + cloud_trail.update(tag_dict) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.warn("Failed to get the trail tags - {0}".format(e)) + snaked_cloud_trail.append(camel_dict_to_snake_dict(cloud_trail)) + + # Turn the boto3 result in to ansible friendly tag dictionary + for tr in snaked_cloud_trail: + if 'tags_list' in tr: + tr['tags'] = boto3_tag_list_to_ansible_dict(tr['tags_list'], 'key', 'value') + del (tr['tags_list']) + if 'response_metadata' in tr: + del (tr['response_metadata']) + output['trail_list'] = snaked_cloud_trail + return output + + +def main(): + argument_spec = dict( + trail_names=dict(type='list', elements='str', default=[]), + include_shadow_trails=dict(type='bool', default=True), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + try: + connection = module.client('cloudtrail', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + result = get_trail_detail(connection, module) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm.py new file mode 100644 index 000000000..af66b39e0 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm.py @@ -0,0 +1,499 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +module: cloudwatch_metric_alarm +short_description: "Create/update or delete AWS CloudWatch 'metric alarms'" +version_added: 5.0.0 +description: + - Can create or delete AWS CloudWatch metric alarms. + - Metrics you wish to alarm on must already exist. + - Prior to release 5.0.0 this module was called C(community.aws.ec2_metric_alarm). + The usage did not change. + - This module was originally added to C(community.aws) in release 1.0.0. +author: + - "Zacharie Eakin (@Zeekin)" +options: + state: + description: + - Register or deregister the alarm. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: + - Unique name for the alarm. + required: true + type: str + metric_name: + description: + - Name of the monitored metric (e.g. C(CPUUtilization)). + - Metric must already exist. + required: false + type: str + aliases: ['metric'] + metrics: + description: + - An array of MetricDataQuery structures that enable + you to create an alarm based on the result of a metric math expression. + type: list + required: false + version_added: "5.5.0" + elements: dict + suboptions: + id: + description: + - A short name used to tie this object to the results in the response. + type: str + required: true + metric_stat: + description: The metric to be returned, along with statistics, period, and units. + type: dict + required: false + suboptions: + metric: + description: The metric to return, including the metric name, namespace, and dimensions. + type: dict + required: false + suboptions: + namespace: + description: The namespace of the metric. + type: str + required: false + metric_name: + description: The name of the metric. + type: str + required: True + dimensions: + description: a name/value pair that is part of the identity of a metric. + type: list + elements: dict + required: false + suboptions: + name: + description: The name of the dimension. + type: str + required: True + value: + description: The value of the dimension. + type: str + required: True + period: + description: The granularity, in seconds, of the returned data points. + type: int + required: True + stat: + description: The statistic to return. It can include any CloudWatch statistic or extended statistic. + type: str + required: True + unit: + description: Unit to use when storing the metric. + type: str + required: false + expression: + description: + - This field can contain either a Metrics Insights query, + or a metric math expression to be performed on the returned data. + type: str + required: false + label: + description: A human-readable label for this metric or expression. + type: str + required: false + return_data: + description: This option indicates whether to return the timestamps and raw data values of this metric. + type: bool + required: false + period: + description: The granularity, in seconds, of the returned data points. + type: int + required: false + account_id: + description: The ID of the account where the metrics are located, if this is a cross-account alarm. + type: str + required: false + namespace: + description: + - Name of the appropriate namespace (C(AWS/EC2), C(System/Linux), etc.), which determines the category it will appear under in CloudWatch. + required: false + type: str + statistic: + description: + - Operation applied to the metric. + - Works in conjunction with I(period) and I(evaluation_periods) to determine the comparison value. + required: false + choices: ['SampleCount','Average','Sum','Minimum','Maximum'] + type: str + extended_statistic: + description: The percentile statistic for the metric specified in the metric name. + type: str + required: false + version_added: "5.5.0" + comparison: + description: + - Determines how the threshold value is compared + required: false + type: str + choices: + - 'GreaterThanOrEqualToThreshold' + - 'GreaterThanThreshold' + - 'LessThanThreshold' + - 'LessThanOrEqualToThreshold' + threshold: + description: + - Sets the min/max bound for triggering the alarm. + required: false + type: float + period: + description: + - The time (in seconds) between metric evaluations. + required: false + type: int + evaluation_periods: + description: + - The number of times in which the metric is evaluated before final calculation. + required: false + type: int + unit: + description: + - The threshold's unit of measurement. + required: false + type: str + choices: + - 'Seconds' + - 'Microseconds' + - 'Milliseconds' + - 'Bytes' + - 'Kilobytes' + - 'Megabytes' + - 'Gigabytes' + - 'Terabytes' + - 'Bits' + - 'Kilobits' + - 'Megabits' + - 'Gigabits' + - 'Terabits' + - 'Percent' + - 'Count' + - 'Bytes/Second' + - 'Kilobytes/Second' + - 'Megabytes/Second' + - 'Gigabytes/Second' + - 'Terabytes/Second' + - 'Bits/Second' + - 'Kilobits/Second' + - 'Megabits/Second' + - 'Gigabits/Second' + - 'Terabits/Second' + - 'Count/Second' + - 'None' + description: + description: + - A longer description of the alarm. + required: false + type: str + dimensions: + description: + - A dictionary describing which metric the alarm is applied to. + - 'For more information see the AWS documentation:' + - U(https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Dimension) + required: false + type: dict + default: {} + alarm_actions: + description: + - A list of the names action(s) taken when the alarm is in the C(alarm) status, denoted as Amazon Resource Name(s). + required: false + type: list + elements: str + default: [] + insufficient_data_actions: + description: + - A list of the names of action(s) to take when the alarm is in the C(insufficient_data) status. + required: false + type: list + elements: str + default: [] + ok_actions: + description: + - A list of the names of action(s) to take when the alarm is in the C(ok) status, denoted as Amazon Resource Name(s). + required: false + type: list + elements: str + default: [] + treat_missing_data: + description: + - Sets how the alarm handles missing data points. + required: false + type: str + choices: + - 'breaching' + - 'notBreaching' + - 'ignore' + - 'missing' + default: 'missing' +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 +''' + +EXAMPLES = r''' + - name: create alarm + amazon.aws.cloudwatch_metric_alarm: + state: present + region: ap-southeast-2 + name: "cpu-low" + metric_name: "CPUUtilization" + namespace: "AWS/EC2" + statistic: Average + comparison: "LessThanOrEqualToThreshold" + threshold: 5.0 + period: 300 + evaluation_periods: 3 + unit: "Percent" + description: "This will alarm when a instance's CPU usage average is lower than 5% for 15 minutes" + dimensions: {'InstanceId':'i-XXX'} + alarm_actions: ["action1","action2"] + + - name: create alarm with metrics + amazon.aws.cloudwatch_metric_alarm: + state: present + region: ap-southeast-2 + name: "cpu-low" + metrics: + - id: 'CPU' + metric_stat: + metric: + dimensions: + name: "InstanceId" + value: "i-xx" + metric_name: "CPUUtilization" + namespace: "AWS/EC2" + period: "300" + stat: "Average" + unit: "Percent" + return_data: False + alarm_actions: ["action1","action2"] + + - name: Create an alarm to recover a failed instance + amazon.aws.cloudwatch_metric_alarm: + state: present + region: us-west-1 + name: "recover-instance" + metric: "StatusCheckFailed_System" + namespace: "AWS/EC2" + statistic: "Minimum" + comparison: "GreaterThanOrEqualToThreshold" + threshold: 1.0 + period: 60 + evaluation_periods: 2 + unit: "Count" + description: "This will recover an instance when it fails" + dimensions: {"InstanceId":'i-XXX'} + alarm_actions: ["arn:aws:automate:us-west-1:ec2:recover"] +''' + +try: + from botocore.exceptions import ClientError +except ImportError: + pass # protected by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + + +def create_metric_alarm(connection, module, params): + alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']]) + if params.get('Dimensions'): + if not isinstance(params['Dimensions'], list): + fixed_dimensions = [] + for key, value in params['Dimensions'].items(): + fixed_dimensions.append({'Name': key, 'Value': value}) + params['Dimensions'] = fixed_dimensions + + if not alarms['MetricAlarms']: + try: + if not module.check_mode: + connection.put_metric_alarm(**params) + changed = True + except ClientError as e: + module.fail_json_aws(e) + + else: + changed = False + alarm = alarms['MetricAlarms'][0] + + # Workaround for alarms created before TreatMissingData was introduced + if 'TreatMissingData' not in alarm.keys(): + alarm['TreatMissingData'] = 'missing' + + # Exclude certain props from change detection + for key in ['ActionsEnabled', 'StateValue', 'StateReason', + 'StateReasonData', 'StateUpdatedTimestamp', + 'StateTransitionedTimestamp', + 'AlarmArn', 'AlarmConfigurationUpdatedTimestamp', 'Metrics']: + alarm.pop(key, None) + if alarm != params: + changed = True + alarm = params + + try: + if changed: + if not module.check_mode: + connection.put_metric_alarm(**alarm) + except ClientError as e: + module.fail_json_aws(e) + + try: + alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']]) + except ClientError as e: + module.fail_json_aws(e) + + result = {} + if alarms['MetricAlarms']: + if alarms['MetricAlarms'][0].get('Metrics'): + metric_list = [] + for metric_element in alarms['MetricAlarms'][0]['Metrics']: + metric_list.append(camel_dict_to_snake_dict(metric_element)) + alarms['MetricAlarms'][0]['Metrics'] = metric_list + result = alarms['MetricAlarms'][0] + + module.exit_json(changed=changed, + name=result.get('AlarmName'), + actions_enabled=result.get('ActionsEnabled'), + alarm_actions=result.get('AlarmActions'), + alarm_arn=result.get('AlarmArn'), + comparison=result.get('ComparisonOperator'), + description=result.get('AlarmDescription'), + dimensions=result.get('Dimensions'), + evaluation_periods=result.get('EvaluationPeriods'), + insufficient_data_actions=result.get('InsufficientDataActions'), + last_updated=result.get('AlarmConfigurationUpdatedTimestamp'), + metric=result.get('MetricName'), + metric_name=result.get('MetricName'), + metrics=result.get('Metrics'), + namespace=result.get('Namespace'), + ok_actions=result.get('OKActions'), + period=result.get('Period'), + state_reason=result.get('StateReason'), + state_value=result.get('StateValue'), + statistic=result.get('Statistic'), + threshold=result.get('Threshold'), + treat_missing_data=result.get('TreatMissingData'), + unit=result.get('Unit')) + + +def delete_metric_alarm(connection, module, params): + alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']]) + + if alarms['MetricAlarms']: + try: + if not module.check_mode: + connection.delete_alarms(AlarmNames=[params['AlarmName']]) + module.exit_json(changed=True) + except (ClientError) as e: + module.fail_json_aws(e) + else: + module.exit_json(changed=False) + + +def main(): + argument_spec = dict( + name=dict(required=True, type='str'), + metric_name=dict(type='str', aliases=['metric']), + namespace=dict(type='str'), + statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']), + comparison=dict(type='str', choices=['LessThanOrEqualToThreshold', 'LessThanThreshold', 'GreaterThanThreshold', + 'GreaterThanOrEqualToThreshold']), + threshold=dict(type='float'), + period=dict(type='int'), + unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', + 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', + 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', + 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', + 'Terabits/Second', 'Count/Second', 'None']), + evaluation_periods=dict(type='int'), + extended_statistic=dict(type='str'), + description=dict(type='str'), + dimensions=dict(type='dict'), + alarm_actions=dict(type='list', default=[], elements='str'), + insufficient_data_actions=dict(type='list', default=[], elements='str'), + ok_actions=dict(type='list', default=[], elements='str'), + treat_missing_data=dict(type='str', choices=['breaching', 'notBreaching', 'ignore', 'missing'], default='missing'), + state=dict(default='present', choices=['present', 'absent']), + metrics=dict(type='list', elements='dict', default=[]), + ) + + mutually_exclusive = [ + ['metric_name', 'metrics'], + ['dimensions', 'metrics'], + ['period', 'metrics'], + ['namespace', 'metrics'], + ['statistic', 'metrics'], + ['extended_statistic', 'metrics'], + ['unit', 'metrics'], + ['statistic', 'extended_statistic'], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + ) + + state = module.params.get('state') + + params = dict() + params['AlarmName'] = module.params.get('name') + params['MetricName'] = module.params.get('metric_name') + params['Namespace'] = module.params.get('namespace') + params['Statistic'] = module.params.get('statistic') + params['ComparisonOperator'] = module.params.get('comparison') + params['Threshold'] = module.params.get('threshold') + params['Period'] = module.params.get('period') + params['EvaluationPeriods'] = module.params.get('evaluation_periods') + if module.params.get('unit'): + params['Unit'] = module.params.get('unit') + params['AlarmDescription'] = module.params.get('description') + params['Dimensions'] = module.params.get('dimensions') + params['AlarmActions'] = module.params.get('alarm_actions', []) + params['InsufficientDataActions'] = module.params.get('insufficient_data_actions', []) + params['OKActions'] = module.params.get('ok_actions', []) + params['TreatMissingData'] = module.params.get('treat_missing_data') + if module.params.get('metrics'): + params['Metrics'] = snake_dict_to_camel_dict(module.params['metrics'], capitalize_first=True) + if module.params.get('extended_statistic'): + params['ExtendedStatistic'] = module.params.get('extended_statistic') + + for key, value in list(params.items()): + if value is None: + del params[key] + + connection = module.client('cloudwatch') + + if state == 'present': + create_metric_alarm(connection, module, params) + elif state == 'absent': + delete_metric_alarm(connection, module, params) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm_info.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm_info.py new file mode 100644 index 000000000..24678b054 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm_info.py @@ -0,0 +1,323 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://wwww.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: cloudwatch_metric_alarm_info +version_added: 5.0.0 +short_description: Gather information about the alarms for the specified metric +description: + - Retrieves the alarms for the specified metric. +author: + - Mandar Vijay Kulkarni (@mandar242) +options: + alarm_names: + description: + - The name of the metric. + required: false + type: list + elements: str + alarm_name_prefix: + description: + - An alarm name prefix to retrieve information about alarms that have names that start with this prefix. + - Can not be used with I(alarm_names). + required: false + type: str + alarm_type: + description: + - Specify this to return metric alarms or composite alarms. + - Module is defaulted to return metric alarms but can return composite alarms if I(alarm_type=CompositeAlarm). + required: false + type: str + default: MetricAlarm + choices: ['CompositeAlarm', 'MetricAlarm'] + children_of_alarm_name: + description: + - If specified returns information about the "children" alarms of the alarm name specified. + required: false + type: str + parents_of_alarm_name: + description: + - If specified returns information about the "parent" alarms of the alarm name specified. + required: false + type: str + state_value: + description: + - If specified returns information only about alarms that are currently in the particular state. + required: false + type: str + choices: ['OK', 'ALARM', 'INSUFFICIENT_DATA'] + action_prefix: + description: + - This parameter can be used to filter the results of the operation to only those alarms that use a certain alarm action. + required: false + type: str + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: describe the metric alarm based on alarm names + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - my-test-alarm-1 + - my-test-alarm-2 + +- name: describe the metric alarm based alarm names and state value + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - my-test-alarm-1 + - my-test-alarm-2 + state_value: OK + +- name: describe the metric alarm based alarm names prefix + amazon.aws.cloudwatch_metric_alarm_info: + alarm_name_prefix: my-test- + +''' + +RETURN = ''' +metric_alarms: + description: The gathered information about specified metric alarms. + returned: when success + type: list + elements: dict + contains: + alarm_name: + description: Unique name for the alarm. + returned: always + type: str + alarm_arn: + description: The Amazon Resource Name (ARN) of the alarm. + returned: always + type: str + alarm_description: + description: The description of the alarm. + returned: always + type: str + alarm_configuration_updated_timestamp: + description: The time stamp of the last update to the alarm configuration. + returned: always + type: str + actions_enabled: + description: Indicates whether actions should be executed during any changes to the alarm state. + returned: always + type: bool + ok_actions: + description: The actions to execute when this alarm transitions to an OK state from any other state. + returned: always + type: list + elements: str + alarm_actions: + description: The actions to execute when this alarm transitions to an ALARM state from any other state. + returned: always + type: list + elements: str + insufficient_data_actions: + description: The actions to execute when this alarm transitions to an INSUFFICIENT_DATA state from any other state. + returned: always + type: list + elements: str + state_value: + description: The state value for the alarm. + returned: always + type: str + state_reason: + description: An explanation for the alarm state, in text format. + returned: always + type: str + state_reason_data: + description: An explanation for the alarm state, in JSON format. + returned: always + type: str + state_updated_timestamp: + description: The time stamp of the last update to the alarm state. + returned: always + type: str + metric_name: + description: Name of the monitored metric (e.g. C(CPUUtilization)). + returned: always + type: str + namespace: + description: + - Name of the appropriate namespace (C(AWS/EC2), C(System/Linux), etc.). + - Determines the category it will appear under in CloudWatch. + returned: always + type: str + statistic: + description: The statistic for the metric associated with the alarm, other than percentile. + returned: always + type: str + extended_statistic: + description: The percentile statistic for the metric associated with the alarm. + returned: always + type: str + dimensions: + description: The dimensions for the metric. + returned: always + type: list + elements: dict + contains: + name: + description: The name of the dimension. + returned: always + type: str + value: + description: The value of the dimension. + returned: always + type: str + period: + description: + - The length, in seconds, used each time the metric specified in MetricName is evaluated. + - Valid values are 10, 30, and any multiple of 60. + returned: always + type: int + unit: + description: Unit used when storing the metric + returned: always + type: str + evaluation_period: + description: The number of periods over which data is compared to the specified threshold. + returned: always + type: int + datapoints_to_alarm: + description: The number of data points that must be breaching to trigger the alarm. + returned: always + type: int + threshold: + description: The value to compare with the specified statistic. + returned: always + type: float + comparison_operator: + description: The arithmetic operation to use when comparing the specified statistic and threshold. + returned: always + type: str + treat_missing_data: + description: Sets how alarm is to handle missing data points. + returned: always + type: str + evaluate_low_sample_count_percentile: + description: + - Used only for alarms based on percentiles. + - If I(ignore), the alarm state does not change during periods with too few data points to be statistically significant. + - If I(evaluate) or this parameter is not used, the alarm is always evaluated and possibly changes state. + returned: always + type: str + metrics: + description: An array of MetricDataQuery structures, used in an alarm based on a metric math expression. + returned: always + type: list + elements: dict + threshold_metric_id: + description: This is the ID of the ANOMALY_DETECTION_BAND function used as the threshold for the alarm. + returned: always + type: str + +''' + + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + + +@AWSRetry.jittered_backoff(retries=10) +def _describe_alarms(connection, **params): + paginator = connection.get_paginator('describe_alarms') + return paginator.paginate(**params).build_full_result() + + +def describe_metric_alarms_info(connection, module): + + params = build_params(module) + + alarm_type_to_return = module.params.get('alarm_type') + + try: + describe_metric_alarms_info_response = _describe_alarms(connection, **params) + # describe_metric_alarms_info_response = describe_metric_alarms_info_response[alarm_type_to_return] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to describe cloudwatch metric alarm') + + result = [] + + if alarm_type_to_return == 'CompositeAlarm': + for response_list_item in describe_metric_alarms_info_response['CompositeAlarms']: + result.append(camel_dict_to_snake_dict(response_list_item)) + module.exit_json(composite_alarms=result) + + for response_list_item in describe_metric_alarms_info_response['MetricAlarms']: + result.append(camel_dict_to_snake_dict(response_list_item)) + + module.exit_json(metric_alarms=result) + + +def build_params(module): + + params = {} + + if module.params.get('alarm_names'): + params['AlarmNames'] = module.params.get('alarm_names') + + if module.params.get('alarm_name_prefix'): + params['AlarmNamePrefix'] = module.params.get('alarm_name_prefix') + + if module.params.get('children_of_alarm_name'): + params['ChildrenOfAlarmName'] = module.params.get('children_of_alarm_name') + + if module.params.get('parents_of_alarm_name'): + params['ParentsOfAlarmName'] = module.params.get('parents_of_alarm_name') + + if module.params.get('state_value'): + params['StateValue'] = module.params.get('state_value') + + if module.params.get('action_prefix'): + params['ActionPrefix'] = module.params.get('action_prefix') + + return params + + +def main(): + + argument_spec = dict( + alarm_names=dict(type='list', elements='str', required=False), + alarm_name_prefix=dict(type='str', required=False), + alarm_type=dict(type='str', choices=['CompositeAlarm', 'MetricAlarm'], default='MetricAlarm', required=False), + children_of_alarm_name=dict(type='str', required=False), + parents_of_alarm_name=dict(type='str', required=False), + state_value=dict(type='str', choices=['OK', 'ALARM', 'INSUFFICIENT_DATA'], required=False), + action_prefix=dict(type='str', required=False), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[['alarm_names', 'alarm_name_prefix']], + supports_check_mode=True + ) + + try: + connection = module.client('cloudwatch', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + describe_metric_alarms_info(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatchevent_rule.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatchevent_rule.py new file mode 100644 index 000000000..3368ba69a --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatchevent_rule.py @@ -0,0 +1,517 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: cloudwatchevent_rule +version_added: 5.0.0 +short_description: Manage CloudWatch Event rules and targets +description: + - This module creates and manages CloudWatch event rules and targets. + - This module was originally added to C(community.aws) in release 1.0.0. +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 + +author: + - "Jim Dalton (@jsdalton) " +notes: + - A rule must contain at least an I(event_pattern) or I(schedule_expression). A + rule can have both an I(event_pattern) and a I(schedule_expression), in which + case the rule will trigger on matching events as well as on a schedule. + - When specifying targets, I(input), I(input_path), I(input_paths_map) and I(input_template) + are mutually-exclusive and optional parameters. +options: + name: + description: + - The name of the rule you are creating, updating or deleting. No spaces + or special characters allowed (i.e. must match C([\.\-_A-Za-z0-9]+)). + required: true + type: str + schedule_expression: + description: + - A cron or rate expression that defines the schedule the rule will + trigger on. For example, C(cron(0 20 * * ? *)), C(rate(5 minutes)). + required: false + type: str + event_pattern: + description: + - A string pattern that is used to match against incoming events to determine if the rule + should be triggered. + required: false + type: json + state: + description: + - Whether the rule is present (and enabled), disabled, or absent. + choices: ["present", "disabled", "absent"] + default: present + required: false + type: str + description: + description: + - A description of the rule. + required: false + type: str + role_arn: + description: + - The Amazon Resource Name (ARN) of the IAM role associated with the rule. + required: false + type: str + targets: + type: list + elements: dict + default: [] + description: + - A list of targets to add to or update for the rule. + suboptions: + id: + type: str + required: true + description: The unique target assignment ID. + arn: + type: str + required: true + description: The ARN associated with the target. + role_arn: + type: str + description: The ARN of the IAM role to be used for this target when the rule is triggered. + input: + type: json + description: + - A JSON object that will override the event data passed to the target. + - If neither I(input) nor I(input_path) nor I(input_transformer) + is specified, then the entire event is passed to the target in JSON form. + input_path: + type: str + description: + - A JSONPath string (e.g. C($.detail)) that specifies the part of the event data to be + passed to the target. + - If neither I(input) nor I(input_path) nor I(input_transformer) + is specified, then the entire event is passed to the target in JSON form. + input_transformer: + type: dict + description: + - Settings to support providing custom input to a target based on certain event data. + version_added: 4.1.0 + version_added_collection: community.aws + suboptions: + input_paths_map: + type: dict + description: + - A dict that specifies the transformation of the event data to + custom input parameters. + input_template: + type: json + description: + - A string that templates the values input_paths_map extracted from the event data. + It is used to produce the output you want to be sent to the target. + ecs_parameters: + type: dict + description: + - Contains the ECS task definition and task count to be used, if the event target is an ECS task. + suboptions: + task_definition_arn: + type: str + description: The full ARN of the task definition. + required: true + task_count: + type: int + description: The number of tasks to create based on I(task_definition). + required: false +''' + +EXAMPLES = r''' +- amazon.aws.cloudwatchevent_rule: + name: MyCronTask + schedule_expression: "cron(0 20 * * ? *)" + description: Run my scheduled task + targets: + - id: MyTargetId + arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction + +- amazon.aws.cloudwatchevent_rule: + name: MyDisabledCronTask + schedule_expression: "rate(5 minutes)" + description: Run my disabled scheduled task + state: disabled + targets: + - id: MyOtherTargetId + arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction + input: '{"foo": "bar"}' + +- amazon.aws.cloudwatchevent_rule: + name: MyInstanceLaunchEvent + description: "Rule for EC2 instance launch" + state: present + event_pattern: '{"source":["aws.ec2"],"detail-type":["EC2 Instance State-change Notification"],"detail":{"state":["pending"]}}' + targets: + - id: MyTargetSnsTopic + arn: arn:aws:sns:us-east-1:123456789012:MySNSTopic + input_transformer: + input_paths_map: + instance: "$.detail.instance-id" + state: "$.detail.state" + input_template: " is in state " + +- amazon.aws.cloudwatchevent_rule: + name: MyCronTask + state: absent +''' + +RETURN = r''' +rule: + description: CloudWatch Event rule data. + returned: success + type: dict + sample: + arn: 'arn:aws:events:us-east-1:123456789012:rule/MyCronTask' + description: 'Run my scheduled task' + name: 'MyCronTask' + schedule_expression: 'cron(0 20 * * ? *)' + state: 'ENABLED' +targets: + description: CloudWatch Event target(s) assigned to the rule. + returned: success + type: list + sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]" +''' + +import json + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters + + +def _format_json(json_string): + # When passed a simple string, Ansible doesn't quote it to ensure it's a *quoted* string + try: + json.loads(json_string) + return json_string + except json.decoder.JSONDecodeError: + return str(json.dumps(json_string)) + + +class CloudWatchEventRule(object): + def __init__(self, module, name, client, schedule_expression=None, + event_pattern=None, description=None, role_arn=None): + self.name = name + self.client = client + self.changed = False + self.schedule_expression = schedule_expression + self.event_pattern = event_pattern + self.description = description + self.role_arn = role_arn + self.module = module + + def describe(self): + """Returns the existing details of the rule in AWS""" + try: + rule_info = self.client.describe_rule(Name=self.name) + except is_boto3_error_code('ResourceNotFoundException'): + return {} + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name) + return self._snakify(rule_info) + + def put(self, enabled=True): + """Creates or updates the rule in AWS""" + request = { + 'Name': self.name, + 'State': "ENABLED" if enabled else "DISABLED", + } + if self.schedule_expression: + request['ScheduleExpression'] = self.schedule_expression + if self.event_pattern: + request['EventPattern'] = self.event_pattern + if self.description: + request['Description'] = self.description + if self.role_arn: + request['RoleArn'] = self.role_arn + try: + response = self.client.put_rule(**request) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Could not create/update rule %s" % self.name) + self.changed = True + return response + + def delete(self): + """Deletes the rule in AWS""" + self.remove_all_targets() + + try: + response = self.client.delete_rule(Name=self.name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Could not delete rule %s" % self.name) + self.changed = True + return response + + def enable(self): + """Enables the rule in AWS""" + try: + response = self.client.enable_rule(Name=self.name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Could not enable rule %s" % self.name) + self.changed = True + return response + + def disable(self): + """Disables the rule in AWS""" + try: + response = self.client.disable_rule(Name=self.name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Could not disable rule %s" % self.name) + self.changed = True + return response + + def list_targets(self): + """Lists the existing targets for the rule in AWS""" + try: + targets = self.client.list_targets_by_rule(Rule=self.name) + except is_boto3_error_code('ResourceNotFoundException'): + return [] + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name) + return self._snakify(targets)['targets'] + + def put_targets(self, targets): + """Creates or updates the provided targets on the rule in AWS""" + if not targets: + return + request = { + 'Rule': self.name, + 'Targets': self._targets_request(targets), + } + try: + response = self.client.put_targets(**request) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Could not create/update rule targets for rule %s" % self.name) + self.changed = True + return response + + def remove_targets(self, target_ids): + """Removes the provided targets from the rule in AWS""" + if not target_ids: + return + request = { + 'Rule': self.name, + 'Ids': target_ids + } + try: + response = self.client.remove_targets(**request) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Could not remove rule targets from rule %s" % self.name) + self.changed = True + return response + + def remove_all_targets(self): + """Removes all targets on rule""" + targets = self.list_targets() + return self.remove_targets([t['id'] for t in targets]) + + def _targets_request(self, targets): + """Formats each target for the request""" + targets_request = [] + for target in targets: + target_request = scrub_none_parameters(snake_dict_to_camel_dict(target, True)) + if target_request.get('Input', None): + target_request['Input'] = _format_json(target_request['Input']) + if target_request.get('InputTransformer', None): + if target_request.get('InputTransformer').get('InputTemplate', None): + target_request['InputTransformer']['InputTemplate'] = _format_json(target_request['InputTransformer']['InputTemplate']) + if target_request.get('InputTransformer').get('InputPathsMap', None): + target_request['InputTransformer']['InputPathsMap'] = target['input_transformer']['input_paths_map'] + targets_request.append(target_request) + return targets_request + + def _snakify(self, dict): + """Converts camel case to snake case""" + return camel_dict_to_snake_dict(dict) + + +class CloudWatchEventRuleManager(object): + RULE_FIELDS = ['name', 'event_pattern', 'schedule_expression', 'description', 'role_arn'] + + def __init__(self, rule, targets): + self.rule = rule + self.targets = targets + + def ensure_present(self, enabled=True): + """Ensures the rule and targets are present and synced""" + rule_description = self.rule.describe() + if rule_description: + # Rule exists so update rule, targets and state + self._sync_rule(enabled) + self._sync_targets() + self._sync_state(enabled) + else: + # Rule does not exist, so create new rule and targets + self._create(enabled) + + def ensure_disabled(self): + """Ensures the rule and targets are present, but disabled, and synced""" + self.ensure_present(enabled=False) + + def ensure_absent(self): + """Ensures the rule and targets are absent""" + rule_description = self.rule.describe() + if not rule_description: + # Rule doesn't exist so don't need to delete + return + self.rule.delete() + + def fetch_aws_state(self): + """Retrieves rule and target state from AWS""" + aws_state = { + 'rule': {}, + 'targets': [], + 'changed': self.rule.changed + } + rule_description = self.rule.describe() + if not rule_description: + return aws_state + + # Don't need to include response metadata noise in response + del rule_description['response_metadata'] + + aws_state['rule'] = rule_description + aws_state['targets'].extend(self.rule.list_targets()) + return aws_state + + def _sync_rule(self, enabled=True): + """Syncs local rule state with AWS""" + if not self._rule_matches_aws(): + self.rule.put(enabled) + + def _sync_targets(self): + """Syncs local targets with AWS""" + # Identify and remove extraneous targets on AWS + target_ids_to_remove = self._remote_target_ids_to_remove() + if target_ids_to_remove: + self.rule.remove_targets(target_ids_to_remove) + + # Identify targets that need to be added or updated on AWS + targets_to_put = self._targets_to_put() + if targets_to_put: + self.rule.put_targets(targets_to_put) + + def _sync_state(self, enabled=True): + """Syncs local rule state with AWS""" + remote_state = self._remote_state() + if enabled and remote_state != 'ENABLED': + self.rule.enable() + elif not enabled and remote_state != 'DISABLED': + self.rule.disable() + + def _create(self, enabled=True): + """Creates rule and targets on AWS""" + self.rule.put(enabled) + self.rule.put_targets(self.targets) + + def _rule_matches_aws(self): + """Checks if the local rule data matches AWS""" + aws_rule_data = self.rule.describe() + + # The rule matches AWS only if all rule data fields are equal + # to their corresponding local value defined in the task + return all( + getattr(self.rule, field) == aws_rule_data.get(field, None) + for field in self.RULE_FIELDS + ) + + def _targets_to_put(self): + """Returns a list of targets that need to be updated or added remotely""" + remote_targets = self.rule.list_targets() + return [t for t in self.targets if t not in remote_targets] + + def _remote_target_ids_to_remove(self): + """Returns a list of targets that need to be removed remotely""" + target_ids = [t['id'] for t in self.targets] + remote_targets = self.rule.list_targets() + return [ + rt['id'] for rt in remote_targets if rt['id'] not in target_ids + ] + + def _remote_state(self): + """Returns the remote state from AWS""" + description = self.rule.describe() + if not description: + return + return description['state'] + + +def main(): + target_args = dict( + type='list', elements='dict', default=[], + options=dict( + id=dict(type='str', required=True), + arn=dict(type='str', required=True), + role_arn=dict(type='str'), + input=dict(type='json'), + input_path=dict(type='str'), + input_transformer=dict( + type='dict', + options=dict( + input_paths_map=dict(type='dict'), + input_template=dict(type='json'), + ), + ), + ecs_parameters=dict( + type='dict', + options=dict( + task_definition_arn=dict(type='str', required=True), + task_count=dict(type='int'), + ), + ), + ), + ) + argument_spec = dict( + name=dict(required=True), + schedule_expression=dict(), + event_pattern=dict(type='json'), + state=dict(choices=['present', 'disabled', 'absent'], + default='present'), + description=dict(), + role_arn=dict(), + targets=target_args, + ) + module = AnsibleAWSModule(argument_spec=argument_spec) + + rule_data = dict( + [(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS] + ) + targets = module.params.get('targets') + state = module.params.get('state') + client = module.client('events') + + cwe_rule = CloudWatchEventRule(module, client=client, **rule_data) + cwe_rule_manager = CloudWatchEventRuleManager(cwe_rule, targets) + + if state == 'present': + cwe_rule_manager.ensure_present() + elif state == 'disabled': + cwe_rule_manager.ensure_disabled() + elif state == 'absent': + cwe_rule_manager.ensure_absent() + else: + module.fail_json(msg="Invalid state '{0}' provided".format(state)) + + module.exit_json(**cwe_rule_manager.fetch_aws_state()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group.py new file mode 100644 index 000000000..ee6df826e --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group.py @@ -0,0 +1,351 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: cloudwatchlogs_log_group +version_added: 5.0.0 +short_description: create or delete log_group in CloudWatchLogs +description: + - Create or delete log_group in CloudWatchLogs. + - This module was originally added to C(community.aws) in release 1.0.0. +notes: + - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/logs.html). + - Support for I(purge_tags) was added in release 4.0.0. +author: + - Willian Ricardo (@willricardo) +options: + state: + description: + - Whether the rule is present or absent. + choices: ["present", "absent"] + default: present + required: false + type: str + log_group_name: + description: + - The name of the log group. + required: true + type: str + kms_key_id: + description: + - The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. + required: false + type: str + retention: + description: + - The number of days to retain the log events in the specified log group. + - "Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]" + - Mutually exclusive with I(purge_retention_policy). + required: false + type: int + purge_retention_policy: + description: + - "Whether to purge the retention policy or not." + - "Mutually exclusive with I(retention) and I(overwrite)." + default: false + required: false + type: bool + overwrite: + description: + - Whether an existing log group should be overwritten on create. + - Mutually exclusive with I(purge_retention_policy). + default: false + required: false + type: bool +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- amazon.aws.cloudwatchlogs_log_group: + log_group_name: test-log-group + +- amazon.aws.cloudwatchlogs_log_group: + state: present + log_group_name: test-log-group + tags: { "Name": "test-log-group", "Env" : "QA" } + +- amazon.aws.cloudwatchlogs_log_group: + state: present + log_group_name: test-log-group + tags: { "Name": "test-log-group", "Env" : "QA" } + kms_key_id: arn:aws:kms:region:account-id:key/key-id + +- amazon.aws.cloudwatchlogs_log_group: + state: absent + log_group_name: test-log-group + +''' + +RETURN = ''' +log_groups: + description: Return the list of complex objects representing log groups + returned: success + type: complex + version_added: 4.0.0 + version_added_collection: community.aws + contains: + log_group_name: + description: The name of the log group. + returned: always + type: str + creation_time: + description: The creation time of the log group. + returned: always + type: int + retention_in_days: + description: The number of days to retain the log events in the specified log group. + returned: always + type: int + metric_filter_count: + description: The number of metric filters. + returned: always + type: int + arn: + description: The Amazon Resource Name (ARN) of the log group. + returned: always + type: str + stored_bytes: + description: The number of bytes stored. + returned: always + type: str + kms_key_id: + description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. + returned: always + type: str + tags: + description: A dictionary representing the tags on the log group. + returned: always + type: dict +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + + +def create_log_group(client, log_group_name, kms_key_id, tags, retention, module): + request = {'logGroupName': log_group_name} + if kms_key_id: + request['kmsKeyId'] = kms_key_id + if tags: + request['tags'] = tags + + if module.check_mode: + module.exit_json(changed=True, msg="Would have created log group if not in check_mode.") + + try: + client.create_log_group(**request) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to create log group") + + if retention: + input_retention_policy(client=client, + log_group_name=log_group_name, + retention=retention, module=module) + + found_log_group = describe_log_group(client=client, log_group_name=log_group_name, module=module) + + if not found_log_group: + module.fail_json(msg="The aws CloudWatchLogs log group was not created. \n please try again!") + return found_log_group + + +def input_retention_policy(client, log_group_name, retention, module): + try: + permited_values = [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653] + + if retention in permited_values: + response = client.put_retention_policy(logGroupName=log_group_name, + retentionInDays=retention) + else: + delete_log_group(client=client, log_group_name=log_group_name, module=module) + module.fail_json(msg="Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to put retention policy for log group {0}".format(log_group_name)) + + +def delete_retention_policy(client, log_group_name, module): + if module.check_mode: + return True + + try: + client.delete_retention_policy(logGroupName=log_group_name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to delete retention policy for log group {0}".format(log_group_name)) + + +def delete_log_group(client, log_group_name, module): + if module.check_mode: + module.exit_json(changed=True, msg="Would have deleted log group if not in check_mode.") + + try: + client.delete_log_group(logGroupName=log_group_name) + except is_boto3_error_code('ResourceNotFoundException'): + return {} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to delete log group {0}".format(log_group_name)) + + +def describe_log_group(client, log_group_name, module): + try: + desc_log_group = client.describe_log_groups(logGroupNamePrefix=log_group_name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to describe log group {0}".format(log_group_name)) + + matching_logs = [log for log in desc_log_group.get('logGroups', []) if log['logGroupName'] == log_group_name] + + if not matching_logs: + return {} + + found_log_group = matching_logs[0] + + try: + tags = client.list_tags_log_group(logGroupName=log_group_name) + except is_boto3_error_code('AccessDeniedException'): + tags = {} + module.warn('Permission denied listing tags for log group {0}'.format(log_group_name)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to describe tags for log group {0}".format(log_group_name)) + + found_log_group['tags'] = tags.get('tags', {}) + return found_log_group + + +def format_result(found_log_group): + # Prior to 4.0.0 we documented returning log_groups=[log_group], but returned **log_group + # Return both to avoid a breaking change. + log_group = camel_dict_to_snake_dict(found_log_group, ignore_list=['tags']) + return dict(log_groups=[log_group], **log_group) + + +def ensure_tags(client, found_log_group, desired_tags, purge_tags, module): + if desired_tags is None: + return False + + group_name = module.params.get('log_group_name') + current_tags = found_log_group.get('tags', {}) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, purge_tags) + + if not tags_to_add and not tags_to_remove: + return False + if module.check_mode: + return True + + try: + if tags_to_remove: + client.untag_log_group(logGroupName=group_name, tags=tags_to_remove) + if tags_to_add: + client.tag_log_group(logGroupName=group_name, tags=tags_to_add) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to update tags') + + return True + + +def main(): + argument_spec = dict( + log_group_name=dict(required=True, type='str'), + state=dict(choices=['present', 'absent'], + default='present'), + kms_key_id=dict(required=False, type='str'), + tags=dict(required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(required=False, type='bool', default=True), + retention=dict(required=False, type='int'), + purge_retention_policy=dict(required=False, type='bool', default=False), + overwrite=dict(required=False, type='bool', default=False), + ) + + mutually_exclusive = [['retention', 'purge_retention_policy'], ['purge_retention_policy', 'overwrite']] + module = AnsibleAWSModule(supports_check_mode=True, argument_spec=argument_spec, mutually_exclusive=mutually_exclusive) + + try: + logs = module.client('logs') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + state = module.params.get('state') + changed = False + + # Determine if the log group exists + found_log_group = describe_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module) + + if state == 'present': + if found_log_group: + if module.params['overwrite'] is True: + changed = True + delete_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module) + found_log_group = create_log_group(client=logs, + log_group_name=module.params['log_group_name'], + kms_key_id=module.params['kms_key_id'], + tags=module.params['tags'], + retention=module.params['retention'], + module=module) + else: + changed |= ensure_tags(client=logs, + found_log_group=found_log_group, + desired_tags=module.params['tags'], + purge_tags=module.params['purge_tags'], + module=module) + if module.params['purge_retention_policy']: + if found_log_group.get('retentionInDays'): + changed = True + delete_retention_policy(client=logs, + log_group_name=module.params['log_group_name'], + module=module) + elif module.params['retention'] != found_log_group.get('retentionInDays'): + if module.params['retention'] is not None: + changed = True + input_retention_policy(client=logs, + log_group_name=module.params['log_group_name'], + retention=module.params['retention'], + module=module) + if changed: + found_log_group = describe_log_group(client=logs, + log_group_name=module.params['log_group_name'], + module=module) + + elif not found_log_group: + changed = True + found_log_group = create_log_group(client=logs, + log_group_name=module.params['log_group_name'], + kms_key_id=module.params['kms_key_id'], + tags=module.params['tags'], + retention=module.params['retention'], + module=module) + + result = format_result(found_log_group) + module.exit_json(changed=changed, **result) + + elif state == 'absent': + if found_log_group: + changed = True + delete_log_group(client=logs, + log_group_name=module.params['log_group_name'], + module=module) + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_info.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_info.py new file mode 100644 index 000000000..cb4c3808a --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_info.py @@ -0,0 +1,139 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: cloudwatchlogs_log_group_info +version_added: 5.0.0 +short_description: Get information about log_group in CloudWatchLogs +description: + - Lists the specified log groups. You can list all your log groups or filter the results by prefix. + - This module was originally added to C(community.aws) in release 1.0.0. +author: + - Willian Ricardo (@willricardo) +options: + log_group_name: + description: + - The name or prefix of the log group to filter by. + type: str +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +- amazon.aws.cloudwatchlogs_log_group_info: + log_group_name: test-log-group +''' + +RETURN = ''' +log_groups: + description: Return the list of complex objects representing log groups + returned: success + type: complex + contains: + log_group_name: + description: The name of the log group. + returned: always + type: str + creation_time: + description: The creation time of the log group. + returned: always + type: int + retention_in_days: + description: The number of days to retain the log events in the specified log group. + returned: always + type: int + metric_filter_count: + description: The number of metric filters. + returned: always + type: int + arn: + description: The Amazon Resource Name (ARN) of the log group. + returned: always + type: str + stored_bytes: + description: The number of bytes stored. + returned: always + type: str + kms_key_id: + description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. + returned: always + type: str + tags: + description: A dictionary representing the tags on the log group. + returned: always + type: dict + version_added: 4.0.0 + version_added_collection: community.aws +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + + +def describe_log_group(client, log_group_name, module): + params = {} + if log_group_name: + params['logGroupNamePrefix'] = log_group_name + try: + paginator = client.get_paginator('describe_log_groups') + desc_log_group = paginator.paginate(**params).build_full_result() + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to describe log group {0}".format(log_group_name)) + + for log_group in desc_log_group['logGroups']: + log_group_name = log_group['logGroupName'] + try: + tags = client.list_tags_log_group(logGroupName=log_group_name) + except is_boto3_error_code('AccessDeniedException'): + tags = {} + module.warn('Permission denied listing tags for log group {0}'.format(log_group_name)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to describe tags for log group {0}".format(log_group_name)) + log_group['tags'] = tags.get('tags', {}) + + return desc_log_group + + +def main(): + argument_spec = dict( + log_group_name=dict(), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + try: + logs = module.client('logs') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + desc_log_group = describe_log_group(client=logs, + log_group_name=module.params['log_group_name'], + module=module) + final_log_group_snake = [] + + for log_group in desc_log_group['logGroups']: + final_log_group_snake.append(camel_dict_to_snake_dict(log_group, ignore_list=['tags'])) + + desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake) + module.exit_json(**desc_log_group_result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py new file mode 100644 index 000000000..82435f4cb --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py @@ -0,0 +1,218 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: cloudwatchlogs_log_group_metric_filter +version_added: 5.0.0 +author: + - "Markus Bergholz (@markuman)" +short_description: Manage CloudWatch log group metric filter +description: + - Create, modify and delete CloudWatch log group metric filter. + - CloudWatch log group metric filter can be use with M(community.aws.ec2_metric_alarm). + - This module was originally added to C(community.aws) in release 1.0.0. +options: + state: + description: + - Whether the rule is present or absent. + choices: ["present", "absent"] + required: true + type: str + log_group_name: + description: + - The name of the log group where the metric filter is applied on. + required: true + type: str + filter_name: + description: + - A name for the metric filter you create. + required: true + type: str + filter_pattern: + description: + - A filter pattern for extracting metric data out of ingested log events. Required when I(state=present). + type: str + metric_transformation: + description: + - A collection of information that defines how metric data gets emitted. Required when I(state=present). + type: dict + suboptions: + metric_name: + description: + - The name of the cloudWatch metric. + type: str + metric_namespace: + description: + - The namespace of the cloudWatch metric. + type: str + metric_value: + description: + - The value to publish to the cloudWatch metric when a filter pattern matches a log event. + type: str + default_value: + description: + - The value to emit when a filter pattern does not match a log event. + type: float +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 + +''' + +EXAMPLES = ''' +- name: set metric filter on log group /fluentd/testcase + amazon.aws.cloudwatchlogs_log_group_metric_filter: + log_group_name: /fluentd/testcase + filter_name: BoxFreeStorage + filter_pattern: '{($.value = *) && ($.hostname = "box")}' + state: present + metric_transformation: + metric_name: box_free_space + metric_namespace: fluentd_metrics + metric_value: "$.value" + +- name: delete metric filter on log group /fluentd/testcase + amazon.aws.cloudwatchlogs_log_group_metric_filter: + log_group_name: /fluentd/testcase + filter_name: BoxFreeStorage + state: absent +''' + +RETURN = """ +metric_filters: + description: Return the origin response value + returned: success + type: list + sample: [ + { + "default_value": 3.1415, + "metric_name": "box_free_space", + "metric_namespace": "made_with_ansible", + "metric_value": "$.value" + } + ] + +""" + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + + +def metricTransformationHandler(metricTransformations, originMetricTransformations=None): + + if originMetricTransformations: + change = False + originMetricTransformations = camel_dict_to_snake_dict( + originMetricTransformations) + for item in ["default_value", "metric_name", "metric_namespace", "metric_value"]: + if metricTransformations.get(item) != originMetricTransformations.get(item): + change = True + else: + change = True + + defaultValue = metricTransformations.get("default_value") + if isinstance(defaultValue, int) or isinstance(defaultValue, float): + retval = [ + { + 'metricName': metricTransformations.get("metric_name"), + 'metricNamespace': metricTransformations.get("metric_namespace"), + 'metricValue': metricTransformations.get("metric_value"), + 'defaultValue': defaultValue + } + ] + else: + retval = [ + { + 'metricName': metricTransformations.get("metric_name"), + 'metricNamespace': metricTransformations.get("metric_namespace"), + 'metricValue': metricTransformations.get("metric_value"), + } + ] + + return retval, change + + +def main(): + + arg_spec = dict( + state=dict(type='str', required=True, choices=['present', 'absent']), + log_group_name=dict(type='str', required=True), + filter_name=dict(type='str', required=True), + filter_pattern=dict(type='str'), + metric_transformation=dict(type='dict', options=dict( + metric_name=dict(type='str'), + metric_namespace=dict(type='str'), + metric_value=dict(type='str'), + default_value=dict(type='float') + )), + ) + + module = AnsibleAWSModule( + argument_spec=arg_spec, + supports_check_mode=True, + required_if=[('state', 'present', ['metric_transformation', 'filter_pattern'])] + ) + + log_group_name = module.params.get("log_group_name") + filter_name = module.params.get("filter_name") + filter_pattern = module.params.get("filter_pattern") + metric_transformation = module.params.get("metric_transformation") + state = module.params.get("state") + + cwl = module.client('logs') + + # check if metric filter exists + response = cwl.describe_metric_filters( + logGroupName=log_group_name, + filterNamePrefix=filter_name + ) + + if len(response.get("metricFilters")) == 1: + originMetricTransformations = response.get( + "metricFilters")[0].get("metricTransformations")[0] + originFilterPattern = response.get("metricFilters")[ + 0].get("filterPattern") + else: + originMetricTransformations = None + originFilterPattern = None + change = False + metricTransformation = None + + if state == "absent" and originMetricTransformations: + if not module.check_mode: + response = cwl.delete_metric_filter( + logGroupName=log_group_name, + filterName=filter_name + ) + change = True + metricTransformation = [camel_dict_to_snake_dict(item) for item in [originMetricTransformations]] + + elif state == "present": + metricTransformation, change = metricTransformationHandler( + metricTransformations=metric_transformation, originMetricTransformations=originMetricTransformations) + + change = change or filter_pattern != originFilterPattern + + if change: + if not module.check_mode: + response = cwl.put_metric_filter( + logGroupName=log_group_name, + filterName=filter_name, + filterPattern=filter_pattern, + metricTransformations=metricTransformation + ) + + metricTransformation = [camel_dict_to_snake_dict(item) for item in metricTransformation] + + module.exit_json(changed=change, metric_filters=metricTransformation) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py b/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py new file mode 100644 index 000000000..537277c34 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py @@ -0,0 +1,820 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_ami +version_added: 1.0.0 +short_description: Create or destroy an image (AMI) in EC2 +description: + - Registers or deregisters EC2 images. +options: + instance_id: + description: + - Instance ID to create the AMI from. + type: str + name: + description: + - The name of the new AMI. + type: str + architecture: + description: + - The target architecture of the image to register. + default: "x86_64" + type: str + kernel_id: + description: + - The target kernel id of the image to register. + type: str + virtualization_type: + description: + - The virtualization type of the image to register. + default: "hvm" + type: str + root_device_name: + description: + - The root device name of the image to register. + type: str + wait: + description: + - Wait for the AMI to be in state 'available' before returning. + default: false + type: bool + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 1200 + type: int + state: + description: + - Register or deregister an AMI. + default: 'present' + choices: [ "absent", "present" ] + type: str + description: + description: + - Human-readable string describing the contents and purpose of the AMI. + type: str + default: '' + no_reboot: + description: + - Flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the + responsibility of maintaining file system integrity is left to the owner of the instance. + default: false + type: bool + image_id: + description: + - Image ID to be deregistered. + type: str + device_mapping: + description: + - List of device hashes/dictionaries with custom configurations (same block-device-mapping parameters). + type: list + elements: dict + suboptions: + device_name: + type: str + description: + - The device name. For example C(/dev/sda). + required: true + virtual_name: + type: str + description: + - The virtual name for the device. + - See the AWS documentation for more detail U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html). + no_device: + type: bool + description: + - Suppresses the specified device included in the block device mapping of the AMI. + volume_type: + type: str + description: The volume type. Defaults to C(gp2) when not set. + delete_on_termination: + type: bool + description: Whether the device should be automatically deleted when the Instance is terminated. + snapshot_id: + type: str + description: The ID of the Snapshot. + iops: + type: int + description: When using an C(io1) I(volume_type) this sets the number of IOPS provisioned for the volume. + encrypted: + type: bool + description: Whether the volume should be encrypted. + volume_size: + aliases: ['size'] + type: int + description: The size of the volume (in GiB). + delete_snapshot: + description: + - Delete snapshots when deregistering the AMI. + default: false + type: bool + launch_permissions: + description: + - Users and groups that should be able to launch the AMI. + - Expects dictionary with a key of C(user_ids) and/or C(group_names). + - C(user_ids) should be a list of account IDs. + - C(group_name) should be a list of groups, C(all) is the only acceptable value currently. + - You must pass all desired launch permissions if you wish to modify existing launch permissions (passing just groups will remove all users). + type: dict + image_location: + description: + - The S3 location of an image to use for the AMI. + type: str + enhanced_networking: + description: + - A boolean representing whether enhanced networking with ENA is enabled or not. + type: bool + billing_products: + description: + - A list of valid billing codes. To be used with valid accounts by AWS Marketplace vendors. + type: list + elements: str + ramdisk_id: + description: + - The ID of the RAM disk. + type: str + sriov_net_support: + description: + - Set to simple to enable enhanced networking with the Intel 82599 Virtual Function interface for the AMI and any instances that you launch from the AMI. + type: str + boot_mode: + description: + - The boot mode of the AMI. + - See the AWS documentation for more detail U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html). + type: str + choices: ['legacy-bios', 'uefi'] + version_added: 5.5.0 + tpm_support: + description: + - Set to v2.0 to enable Trusted Platform Module (TPM) support. + - If the image is configured for NitroTPM support, the value is v2.0 . + - Requires I(boot_mode) to be set to 'uefi'. + - Requires an instance type that is compatible with Nitro. + - Requires minimum botocore version 1.26.0. + - See the AWS documentation for more detail U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html). + type: str + version_added: 5.5.0 + uefi_data: + description: + - Base64 representation of the non-volatile UEFI variable store. + - Requires minimum botocore version 1.26.0. + - See the AWS documentation for more detail U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/uefi-secure-boot.html). + type: str + version_added: 5.5.0 +author: + - "Evan Duffield (@scicoin-project) " + - "Constantin Bugneac (@Constantin07) " + - "Ross Williams (@gunzy83) " + - "Willem van Ketwich (@wilvk) " +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +''' + +# Thank you to iAcquire for sponsoring development of this module. + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Basic AMI Creation + amazon.aws.ec2_ami: + instance_id: i-xxxxxx + wait: true + name: newtest + tags: + Name: newtest + Service: TestService + +- name: Basic AMI Creation, without waiting + amazon.aws.ec2_ami: + instance_id: i-xxxxxx + wait: no + name: newtest + +- name: AMI Registration from EBS Snapshot + amazon.aws.ec2_ami: + name: newtest + state: present + architecture: x86_64 + virtualization_type: hvm + root_device_name: /dev/xvda + device_mapping: + - device_name: /dev/xvda + volume_size: 8 + snapshot_id: snap-xxxxxxxx + delete_on_termination: true + volume_type: gp2 + +- name: AMI Creation, with a custom root-device size and another EBS attached + amazon.aws.ec2_ami: + instance_id: i-xxxxxx + name: newtest + device_mapping: + - device_name: /dev/sda1 + size: XXX + delete_on_termination: true + volume_type: gp2 + - device_name: /dev/sdb + size: YYY + delete_on_termination: false + volume_type: gp2 + +- name: AMI Creation, excluding a volume attached at /dev/sdb + amazon.aws.ec2_ami: + instance_id: i-xxxxxx + name: newtest + device_mapping: + - device_name: /dev/sda1 + size: XXX + delete_on_termination: true + volume_type: gp2 + - device_name: /dev/sdb + no_device: true + +- name: AMI Creation with boot_mode and tpm_support + amazon.aws.ec2_ami: + name: newtest + state: present + architecture: x86_64 + virtualization_type: hvm + root_device_name: /dev/sda1 + device_mapping: + - device_name: /dev/sda1 + snapshot_id: "{{ snapshot_id }}" + wait: yes + region: us-east-1 + boot_mode: uefi + uefi_data: data_file.bin + tpm_support: v2.0 + +- name: Deregister/Delete AMI (keep associated snapshots) + amazon.aws.ec2_ami: + image_id: "{{ instance.image_id }}" + delete_snapshot: False + state: absent + +- name: Deregister AMI (delete associated snapshots too) + amazon.aws.ec2_ami: + image_id: "{{ instance.image_id }}" + delete_snapshot: True + state: absent + +- name: Update AMI Launch Permissions, making it public + amazon.aws.ec2_ami: + image_id: "{{ instance.image_id }}" + state: present + launch_permissions: + group_names: ['all'] + +- name: Allow AMI to be launched by another account + amazon.aws.ec2_ami: + image_id: "{{ instance.image_id }}" + state: present + launch_permissions: + user_ids: ['123456789012'] +''' + +RETURN = ''' +architecture: + description: Architecture of image. + returned: when AMI is created or already exists + type: str + sample: "x86_64" +block_device_mapping: + description: Block device mapping associated with image. + returned: when AMI is created or already exists + type: dict + sample: { + "/dev/sda1": { + "delete_on_termination": true, + "encrypted": false, + "size": 10, + "snapshot_id": "snap-1a03b80e7", + "volume_type": "standard" + } + } +creationDate: + description: Creation date of image. + returned: when AMI is created or already exists + type: str + sample: "2015-10-15T22:43:44.000Z" +description: + description: Description of image. + returned: when AMI is created or already exists + type: str + sample: "nat-server" +hypervisor: + description: Type of hypervisor. + returned: when AMI is created or already exists + type: str + sample: "xen" +image_id: + description: ID of the image. + returned: when AMI is created or already exists + type: str + sample: "ami-1234abcd" +is_public: + description: Whether image is public. + returned: when AMI is created or already exists + type: bool + sample: false +launch_permission: + description: Permissions allowing other accounts to access the AMI. + returned: when AMI is created or already exists + type: list + sample: + - group: "all" +location: + description: Location of image. + returned: when AMI is created or already exists + type: str + sample: "123456789012/nat-server" +name: + description: AMI name of image. + returned: when AMI is created or already exists + type: str + sample: "nat-server" +ownerId: + description: Owner of image. + returned: when AMI is created or already exists + type: str + sample: "123456789012" +platform: + description: Platform of image. + returned: when AMI is created or already exists + type: str + sample: null +root_device_name: + description: Root device name of image. + returned: when AMI is created or already exists + type: str + sample: "/dev/sda1" +root_device_type: + description: Root device type of image. + returned: when AMI is created or already exists + type: str + sample: "ebs" +state: + description: State of image. + returned: when AMI is created or already exists + type: str + sample: "available" +tags: + description: A dictionary of tags assigned to image. + returned: when AMI is created or already exists + type: dict + sample: { + "Env": "devel", + "Name": "nat-server" + } +virtualization_type: + description: Image virtualization type. + returned: when AMI is created or already exists + type: str + sample: "hvm" +snapshots_deleted: + description: A list of snapshot ids deleted after deregistering image. + returned: after AMI is deregistered, if I(delete_snapshot=true) + type: list + sample: [ + "snap-fbcccb8f", + "snap-cfe7cdb4" + ] +''' + +import time + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import add_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + + +def get_block_device_mapping(image): + bdm_dict = dict() + if image is not None and image.get('block_device_mappings') is not None: + bdm = image.get('block_device_mappings') + for device in bdm: + device_name = device.get('device_name') + if 'ebs' in device: + ebs = device.get("ebs") + bdm_dict_item = { + 'size': ebs.get("volume_size"), + 'snapshot_id': ebs.get("snapshot_id"), + 'volume_type': ebs.get("volume_type"), + 'encrypted': ebs.get("encrypted"), + 'delete_on_termination': ebs.get("delete_on_termination") + } + elif 'virtual_name' in device: + bdm_dict_item = dict(virtual_name=device['virtual_name']) + bdm_dict[device_name] = bdm_dict_item + return bdm_dict + + +def get_ami_info(camel_image): + image = camel_dict_to_snake_dict(camel_image) + return dict( + image_id=image.get("image_id"), + state=image.get("state"), + architecture=image.get("architecture"), + block_device_mapping=get_block_device_mapping(image), + creationDate=image.get("creation_date"), + description=image.get("description"), + hypervisor=image.get("hypervisor"), + is_public=image.get("public"), + location=image.get("image_location"), + ownerId=image.get("owner_id"), + root_device_name=image.get("root_device_name"), + root_device_type=image.get("root_device_type"), + virtualization_type=image.get("virtualization_type"), + name=image.get("name"), + tags=boto3_tag_list_to_ansible_dict(image.get('tags')), + platform=image.get("platform"), + enhanced_networking=image.get("ena_support"), + image_owner_alias=image.get("image_owner_alias"), + image_type=image.get("image_type"), + kernel_id=image.get("kernel_id"), + product_codes=image.get("product_codes"), + ramdisk_id=image.get("ramdisk_id"), + sriov_net_support=image.get("sriov_net_support"), + state_reason=image.get("state_reason"), + launch_permissions=image.get('launch_permissions') + ) + + +def create_image(module, connection): + instance_id = module.params.get('instance_id') + name = module.params.get('name') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + description = module.params.get('description') + architecture = module.params.get('architecture') + kernel_id = module.params.get('kernel_id') + root_device_name = module.params.get('root_device_name') + virtualization_type = module.params.get('virtualization_type') + no_reboot = module.params.get('no_reboot') + device_mapping = module.params.get('device_mapping') + tags = module.params.get('tags') + launch_permissions = module.params.get('launch_permissions') + image_location = module.params.get('image_location') + enhanced_networking = module.params.get('enhanced_networking') + billing_products = module.params.get('billing_products') + ramdisk_id = module.params.get('ramdisk_id') + sriov_net_support = module.params.get('sriov_net_support') + boot_mode = module.params.get('boot_mode') + tpm_support = module.params.get('tpm_support') + uefi_data = module.params.get('uefi_data') + + if tpm_support and boot_mode != 'uefi': + module.fail_json(msg="To specify 'tpm_support', 'boot_mode' must be 'uefi'.") + + if module.check_mode: + image = connection.describe_images(Filters=[{'Name': 'name', 'Values': [str(name)]}]) + if not image['Images']: + module.exit_json(changed=True, msg='Would have created a AMI if not in check mode.') + else: + module.exit_json(changed=False, msg='Error registering image: AMI name is already in use by another AMI') + + try: + params = { + 'Name': name, + 'Description': description + } + + block_device_mapping = None + # Remove empty values injected by using options + if device_mapping: + block_device_mapping = [] + for device in device_mapping: + device = dict((k, v) for k, v in device.items() if v is not None) + device['Ebs'] = {} + device = rename_item_if_exists(device, 'device_name', 'DeviceName') + device = rename_item_if_exists(device, 'virtual_name', 'VirtualName') + device = rename_item_if_exists(device, 'no_device', 'NoDevice') + device = rename_item_if_exists(device, 'volume_type', 'VolumeType', 'Ebs') + device = rename_item_if_exists(device, 'snapshot_id', 'SnapshotId', 'Ebs') + device = rename_item_if_exists(device, 'delete_on_termination', 'DeleteOnTermination', 'Ebs') + device = rename_item_if_exists(device, 'size', 'VolumeSize', 'Ebs', attribute_type=int) + device = rename_item_if_exists(device, 'volume_size', 'VolumeSize', 'Ebs', attribute_type=int) + device = rename_item_if_exists(device, 'iops', 'Iops', 'Ebs') + device = rename_item_if_exists(device, 'encrypted', 'Encrypted', 'Ebs') + + # The NoDevice parameter in Boto3 is a string. Empty string omits the device from block device mapping + # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.create_image + if 'NoDevice' in device: + if device['NoDevice'] is True: + device['NoDevice'] = "" + else: + del device['NoDevice'] + block_device_mapping.append(device) + if block_device_mapping: + params['BlockDeviceMappings'] = block_device_mapping + if instance_id: + params['InstanceId'] = instance_id + params['NoReboot'] = no_reboot + tag_spec = boto3_tag_specifications(tags, types=['image', 'snapshot']) + if tag_spec: + params['TagSpecifications'] = tag_spec + image_id = connection.create_image(aws_retry=True, **params).get('ImageId') + else: + if architecture: + params['Architecture'] = architecture + if virtualization_type: + params['VirtualizationType'] = virtualization_type + if image_location: + params['ImageLocation'] = image_location + if enhanced_networking: + params['EnaSupport'] = enhanced_networking + if billing_products: + params['BillingProducts'] = billing_products + if ramdisk_id: + params['RamdiskId'] = ramdisk_id + if sriov_net_support: + params['SriovNetSupport'] = sriov_net_support + if kernel_id: + params['KernelId'] = kernel_id + if root_device_name: + params['RootDeviceName'] = root_device_name + if boot_mode: + params['BootMode'] = boot_mode + if tpm_support: + params['TpmSupport'] = tpm_support + if uefi_data: + params['UefiData'] = uefi_data + image_id = connection.register_image(aws_retry=True, **params).get('ImageId') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error registering image") + + if wait: + delay = 15 + max_attempts = wait_timeout // delay + waiter = get_waiter(connection, 'image_available') + waiter.wait(ImageIds=[image_id], WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts)) + + if tags and 'TagSpecifications' not in params: + image_info = get_image_by_id(module, connection, image_id) + add_ec2_tags(connection, module, image_id, tags) + if image_info and image_info.get('BlockDeviceMappings'): + for mapping in image_info.get('BlockDeviceMappings'): + # We can only tag Ebs volumes + if 'Ebs' not in mapping: + continue + add_ec2_tags(connection, module, mapping.get('Ebs').get('SnapshotId'), tags) + + if launch_permissions: + try: + params = dict(Attribute='LaunchPermission', ImageId=image_id, LaunchPermission=dict(Add=list())) + for group_name in launch_permissions.get('group_names', []): + params['LaunchPermission']['Add'].append(dict(Group=group_name)) + for user_id in launch_permissions.get('user_ids', []): + params['LaunchPermission']['Add'].append(dict(UserId=str(user_id))) + if params['LaunchPermission']['Add']: + connection.modify_image_attribute(aws_retry=True, **params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error setting launch permissions for image %s" % image_id) + + module.exit_json(msg="AMI creation operation complete.", changed=True, + **get_ami_info(get_image_by_id(module, connection, image_id))) + + +def deregister_image(module, connection): + image_id = module.params.get('image_id') + delete_snapshot = module.params.get('delete_snapshot') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + image = get_image_by_id(module, connection, image_id) + + if image is None: + module.exit_json(changed=False) + + # Get all associated snapshot ids before deregistering image otherwise this information becomes unavailable. + snapshots = [] + if 'BlockDeviceMappings' in image: + for mapping in image.get('BlockDeviceMappings'): + snapshot_id = mapping.get('Ebs', {}).get('SnapshotId') + if snapshot_id is not None: + snapshots.append(snapshot_id) + + # When trying to re-deregister an already deregistered image it doesn't raise an exception, it just returns an object without image attributes. + if 'ImageId' in image: + if module.check_mode: + module.exit_json(changed=True, msg='Would have deregistered AMI if not in check mode.') + try: + connection.deregister_image(aws_retry=True, ImageId=image_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error deregistering image") + else: + module.exit_json(msg="Image %s has already been deregistered." % image_id, changed=False) + + image = get_image_by_id(module, connection, image_id) + wait_timeout = time.time() + wait_timeout + + while wait and wait_timeout > time.time() and image is not None: + image = get_image_by_id(module, connection, image_id) + time.sleep(3) + + if wait and wait_timeout <= time.time(): + module.fail_json(msg="Timed out waiting for image to be deregistered.") + + exit_params = {'msg': "AMI deregister operation complete.", 'changed': True} + + if delete_snapshot: + for snapshot_id in snapshots: + try: + connection.delete_snapshot(aws_retry=True, SnapshotId=snapshot_id) + # Don't error out if root volume snapshot was already deregistered as part of deregister_image + except is_boto3_error_code('InvalidSnapshot.NotFound'): + pass + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to delete snapshot.') + exit_params['snapshots_deleted'] = snapshots + + module.exit_json(**exit_params) + + +def update_image(module, connection, image_id): + launch_permissions = module.params.get('launch_permissions') + image = get_image_by_id(module, connection, image_id) + if image is None: + module.fail_json(msg="Image %s does not exist" % image_id, changed=False) + changed = False + + if launch_permissions is not None: + current_permissions = image['LaunchPermissions'] + + current_users = set(permission['UserId'] for permission in current_permissions if 'UserId' in permission) + desired_users = set(str(user_id) for user_id in launch_permissions.get('user_ids', [])) + current_groups = set(permission['Group'] for permission in current_permissions if 'Group' in permission) + desired_groups = set(launch_permissions.get('group_names', [])) + + to_add_users = desired_users - current_users + to_remove_users = current_users - desired_users + to_add_groups = desired_groups - current_groups + to_remove_groups = current_groups - desired_groups + + to_add = [dict(Group=group) for group in to_add_groups] + [dict(UserId=user_id) for user_id in to_add_users] + to_remove = [dict(Group=group) for group in to_remove_groups] + [dict(UserId=user_id) for user_id in to_remove_users] + + if to_add or to_remove: + try: + if not module.check_mode: + connection.modify_image_attribute(aws_retry=True, + ImageId=image_id, Attribute='launchPermission', + LaunchPermission=dict(Add=to_add, Remove=to_remove)) + changed = True + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error updating launch permissions of image %s" % image_id) + + desired_tags = module.params.get('tags') + if desired_tags is not None: + changed |= ensure_ec2_tags(connection, module, image_id, tags=desired_tags, purge_tags=module.params.get('purge_tags')) + + description = module.params.get('description') + if description and description != image['Description']: + try: + if not module.check_mode: + connection.modify_image_attribute(aws_retry=True, Attribute='Description ', ImageId=image_id, Description=dict(Value=description)) + changed = True + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error setting description for image %s" % image_id) + + if changed: + if module.check_mode: + module.exit_json(changed=True, msg='Would have updated AMI if not in check mode.') + module.exit_json(msg="AMI updated.", changed=True, + **get_ami_info(get_image_by_id(module, connection, image_id))) + else: + module.exit_json(msg="AMI not updated.", changed=False, + **get_ami_info(get_image_by_id(module, connection, image_id))) + + +def get_image_by_id(module, connection, image_id): + try: + try: + images_response = connection.describe_images(aws_retry=True, ImageIds=[image_id]) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error retrieving image %s" % image_id) + images = images_response.get('Images') + no_images = len(images) + if no_images == 0: + return None + if no_images == 1: + result = images[0] + try: + result['LaunchPermissions'] = connection.describe_image_attribute(aws_retry=True, Attribute='launchPermission', + ImageId=image_id)['LaunchPermissions'] + result['ProductCodes'] = connection.describe_image_attribute(aws_retry=True, Attribute='productCodes', + ImageId=image_id)['ProductCodes'] + except is_boto3_error_code('InvalidAMIID.Unavailable'): + pass + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Error retrieving image attributes for image %s" % image_id) + return result + module.fail_json(msg="Invalid number of instances (%s) found for image_id: %s." % (str(len(images)), image_id)) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Error retrieving image by image_id") + + +def rename_item_if_exists(dict_object, attribute, new_attribute, child_node=None, attribute_type=None): + new_item = dict_object.get(attribute) + if new_item is not None: + if attribute_type is not None: + new_item = attribute_type(new_item) + if child_node is None: + dict_object[new_attribute] = new_item + else: + dict_object[child_node][new_attribute] = new_item + dict_object.pop(attribute) + return dict_object + + +def main(): + mapping_options = dict( + device_name=dict(type='str', required=True), + virtual_name=dict(type='str'), + no_device=dict(type='bool'), + volume_type=dict(type='str'), + delete_on_termination=dict(type='bool'), + snapshot_id=dict(type='str'), + iops=dict(type='int'), + encrypted=dict(type='bool'), + volume_size=dict(type='int', aliases=['size']), + ) + argument_spec = dict( + instance_id=dict(), + image_id=dict(), + architecture=dict(default='x86_64'), + kernel_id=dict(), + virtualization_type=dict(default='hvm'), + root_device_name=dict(), + delete_snapshot=dict(default=False, type='bool'), + name=dict(), + wait=dict(type='bool', default=False), + wait_timeout=dict(default=1200, type='int'), + description=dict(default=''), + no_reboot=dict(default=False, type='bool'), + state=dict(default='present', choices=['present', 'absent']), + device_mapping=dict(type='list', elements='dict', options=mapping_options), + launch_permissions=dict(type='dict'), + image_location=dict(), + enhanced_networking=dict(type='bool'), + billing_products=dict(type='list', elements='str',), + ramdisk_id=dict(), + sriov_net_support=dict(), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + boot_mode=dict(type='str', choices=['legacy-bios', 'uefi']), + tpm_support=dict(type='str'), + uefi_data=dict(type='str'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[ + ['state', 'absent', ['image_id']], + ], + supports_check_mode=True, + ) + + # Using a required_one_of=[['name', 'image_id']] overrides the message that should be provided by + # the required_if for state=absent, so check manually instead + if not any([module.params['image_id'], module.params['name']]): + module.fail_json(msg="one of the following is required: name, image_id") + + if any([module.params['tpm_support'], module.params['uefi_data']]): + module.require_botocore_at_least('1.26.0', reason='required for ec2.register_image with tpm_support or uefi_data') + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + if module.params.get('state') == 'absent': + deregister_image(module, connection) + elif module.params.get('state') == 'present': + if module.params.get('image_id'): + update_image(module, connection, module.params.get('image_id')) + if not module.params.get('instance_id') and not module.params.get('device_mapping'): + module.fail_json(msg="The parameters instance_id or device_mapping (register from EBS snapshot) are required for a new image.") + create_image(module, connection) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py new file mode 100644 index 000000000..3d67e89de --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py @@ -0,0 +1,287 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_ami_info +version_added: 1.0.0 +short_description: Gather information about ec2 AMIs +description: + - Gather information about ec2 AMIs +author: + - Prasad Katti (@prasadkatti) +options: + image_ids: + description: One or more image IDs. + aliases: [image_id] + type: list + elements: str + default: [] + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) for possible filters. + - Filter names and values are case sensitive. + type: dict + default: {} + owners: + description: + - Filter the images by the owner. Valid options are an AWS account ID, self, + or an AWS owner alias ( amazon | aws-marketplace | microsoft ). + aliases: [owner] + type: list + elements: str + default: [] + executable_users: + description: + - Filter images by users with explicit launch permissions. Valid options are an AWS account ID, self, or all (public AMIs). + aliases: [executable_user] + type: list + elements: str + default: [] + describe_image_attributes: + description: + - Describe attributes (like launchPermission) of the images found. + default: false + type: bool + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: gather information about an AMI using ami-id + amazon.aws.ec2_ami_info: + image_ids: ami-5b488823 + +- name: gather information about all AMIs with tag key Name and value webapp + amazon.aws.ec2_ami_info: + filters: + "tag:Name": webapp + +- name: gather information about an AMI with 'AMI Name' equal to foobar + amazon.aws.ec2_ami_info: + filters: + name: foobar + +- name: gather information about Ubuntu 17.04 AMIs published by Canonical (099720109477) + amazon.aws.ec2_ami_info: + owners: 099720109477 + filters: + name: "ubuntu/images/ubuntu-zesty-17.04-*" +''' + +RETURN = ''' +images: + description: A list of images. + returned: always + type: list + elements: dict + contains: + architecture: + description: The architecture of the image. + returned: always + type: str + sample: x86_64 + block_device_mappings: + description: Any block device mapping entries. + returned: always + type: list + elements: dict + contains: + device_name: + description: The device name exposed to the instance. + returned: always + type: str + sample: /dev/sda1 + ebs: + description: EBS volumes + returned: always + type: complex + creation_date: + description: The date and time the image was created. + returned: always + type: str + sample: '2017-10-16T19:22:13.000Z' + description: + description: The description of the AMI. + returned: always + type: str + sample: '' + ena_support: + description: Whether enhanced networking with ENA is enabled. + returned: always + type: bool + sample: true + hypervisor: + description: The hypervisor type of the image. + returned: always + type: str + sample: xen + image_id: + description: The ID of the AMI. + returned: always + type: str + sample: ami-5b466623 + image_location: + description: The location of the AMI. + returned: always + type: str + sample: 123456789012/Webapp + image_type: + description: The type of image. + returned: always + type: str + sample: machine + launch_permissions: + description: A List of AWS accounts may launch the AMI. + returned: When image is owned by calling account and I(describe_image_attributes=true). + type: list + elements: dict + contains: + group: + description: A value of 'all' means the AMI is public. + type: str + user_id: + description: An AWS account ID with permissions to launch the AMI. + type: str + sample: [{"group": "all"}, {"user_id": "123456789012"}] + name: + description: The name of the AMI that was provided during image creation. + returned: always + type: str + sample: Webapp + owner_id: + description: The AWS account ID of the image owner. + returned: always + type: str + sample: '123456789012' + public: + description: Whether the image has public launch permissions. + returned: always + type: bool + sample: true + root_device_name: + description: The device name of the root device. + returned: always + type: str + sample: /dev/sda1 + root_device_type: + description: The type of root device used by the AMI. + returned: always + type: str + sample: ebs + sriov_net_support: + description: Whether enhanced networking is enabled. + returned: always + type: str + sample: simple + state: + description: The current state of the AMI. + returned: always + type: str + sample: available + tags: + description: Any tags assigned to the image. + returned: always + type: dict + virtualization_type: + description: The type of virtualization of the AMI. + returned: always + type: str + sample: hvm +''' + +try: + from botocore.exceptions import ClientError, BotoCoreError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def list_ec2_images(ec2_client, module): + + image_ids = module.params.get("image_ids") + owners = module.params.get("owners") + executable_users = module.params.get("executable_users") + filters = module.params.get("filters") + owner_param = [] + + # describe_images is *very* slow if you pass the `Owners` + # param (unless it's self), for some reason. + # Converting the owners to filters and removing from the + # owners param greatly speeds things up. + # Implementation based on aioue's suggestion in #24886 + for owner in owners: + if owner.isdigit(): + if 'owner-id' not in filters: + filters['owner-id'] = list() + filters['owner-id'].append(owner) + elif owner == 'self': + # self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) + owner_param.append(owner) + else: + if 'owner-alias' not in filters: + filters['owner-alias'] = list() + filters['owner-alias'].append(owner) + + filters = ansible_dict_to_boto3_filter_list(filters) + + try: + images = ec2_client.describe_images(aws_retry=True, ImageIds=image_ids, Filters=filters, Owners=owner_param, + ExecutableUsers=executable_users) + images = [camel_dict_to_snake_dict(image) for image in images["Images"]] + except (ClientError, BotoCoreError) as err: + module.fail_json_aws(err, msg="error describing images") + for image in images: + try: + image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', [])) + if module.params.get("describe_image_attributes"): + launch_permissions = ec2_client.describe_image_attribute(aws_retry=True, Attribute='launchPermission', + ImageId=image['image_id'])['LaunchPermissions'] + image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions] + except is_boto3_error_code('AuthFailure'): + # describing launch permissions of images owned by others is not permitted, but shouldn't cause failures + pass + except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except + module.fail_json_aws(err, 'Failed to describe AMI') + + images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist + module.exit_json(images=images) + + +def main(): + + argument_spec = dict( + image_ids=dict(default=[], type='list', elements='str', aliases=['image_id']), + filters=dict(default={}, type='dict'), + owners=dict(default=[], type='list', elements='str', aliases=['owner']), + executable_users=dict(default=[], type='list', elements='str', aliases=['executable_user']), + describe_image_attributes=dict(default=False, type='bool') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + ec2_client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + list_ec2_images(ec2_client, module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eip.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eip.py new file mode 100644 index 000000000..4c3094b98 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eip.py @@ -0,0 +1,666 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_eip +version_added: 5.0.0 +short_description: manages EC2 elastic IP (EIP) addresses. +description: + - This module can allocate or release an EIP. + - This module can associate/disassociate an EIP with instances or network interfaces. + - This module was originally added to C(community.aws) in release 1.0.0. +options: + device_id: + description: + - The id of the device for the EIP. Can be an EC2 Instance id or Elastic Network Interface (ENI) id. + - The I(instance_id) alias has been deprecated and will be removed after 2022-12-01. + required: false + aliases: [ instance_id ] + type: str + public_ip: + description: + - The IP address of a previously allocated EIP. + - When I(state=present) and device is specified, the EIP is associated with the device. + - When I(state=absent) and device is specified, the EIP is disassociated from the device. + aliases: [ ip ] + type: str + state: + description: + - When C(state=present), allocate an EIP or associate an existing EIP with a device. + - When C(state=absent), disassociate the EIP from the device and optionally release it. + choices: ['present', 'absent'] + default: present + type: str + in_vpc: + description: + - Allocate an EIP inside a VPC or not. + - Required if specifying an ENI with I(device_id). + default: false + type: bool + reuse_existing_ip_allowed: + description: + - Reuse an EIP that is not associated to a device (when available), instead of allocating a new one. + default: false + type: bool + release_on_disassociation: + description: + - Whether or not to automatically release the EIP when it is disassociated. + default: false + type: bool + private_ip_address: + description: + - The primary or secondary private IP address to associate with the Elastic IP address. + type: str + allow_reassociation: + description: + - Specify this option to allow an Elastic IP address that is already associated with another + network interface or instance to be re-associated with the specified instance or interface. + default: false + type: bool + tag_name: + description: + - When I(reuse_existing_ip_allowed=true), supplement with this option to only reuse + an Elastic IP if it is tagged with I(tag_name). + type: str + tag_value: + description: + - Supplements I(tag_name) but also checks that the value of the tag provided in I(tag_name) matches I(tag_value). + type: str + public_ipv4_pool: + description: + - Allocates the new Elastic IP from the provided public IPv4 pool (BYOIP) + only applies to newly allocated Elastic IPs, isn't validated when I(reuse_existing_ip_allowed=true). + type: str +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 + +author: + - "Rick Mendes (@rickmendes) " +notes: + - There may be a delay between the time the EIP is assigned and when + the cloud instance is reachable via the new address. Use wait_for and + pause to delay further playbook execution until the instance is reachable, + if necessary. + - This module returns multiple changed statuses on disassociation or release. + It returns an overall status based on any changes occurring. It also returns + individual changed statuses for disassociation and release. + - Support for I(tags) and I(purge_tags) was added in release 2.1.0. +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: associate an elastic IP with an instance + amazon.aws.ec2_eip: + device_id: i-1212f003 + ip: 93.184.216.119 + +- name: associate an elastic IP with a device + amazon.aws.ec2_eip: + device_id: eni-c8ad70f3 + ip: 93.184.216.119 + +- name: associate an elastic IP with a device and allow reassociation + amazon.aws.ec2_eip: + device_id: eni-c8ad70f3 + public_ip: 93.184.216.119 + allow_reassociation: true + +- name: disassociate an elastic IP from an instance + amazon.aws.ec2_eip: + device_id: i-1212f003 + ip: 93.184.216.119 + state: absent + +- name: disassociate an elastic IP with a device + amazon.aws.ec2_eip: + device_id: eni-c8ad70f3 + ip: 93.184.216.119 + state: absent + +- name: allocate a new elastic IP and associate it with an instance + amazon.aws.ec2_eip: + device_id: i-1212f003 + +- name: allocate a new elastic IP without associating it to anything + amazon.aws.ec2_eip: + state: present + register: eip + +- name: output the IP + ansible.builtin.debug: + msg: "Allocated IP is {{ eip.public_ip }}" + +- name: provision new instances with ec2 + amazon.aws.ec2: + keypair: mykey + instance_type: c1.medium + image: ami-40603AD1 + wait: true + group: webserver + count: 3 + register: ec2 + +- name: associate new elastic IPs with each of the instances + amazon.aws.ec2_eip: + device_id: "{{ item }}" + loop: "{{ ec2.instance_ids }}" + +- name: allocate a new elastic IP inside a VPC in us-west-2 + amazon.aws.ec2_eip: + region: us-west-2 + in_vpc: true + register: eip + +- name: output the IP + ansible.builtin.debug: + msg: "Allocated IP inside a VPC is {{ eip.public_ip }}" + +- name: allocate eip - reuse unallocated ips (if found) with FREE tag + amazon.aws.ec2_eip: + region: us-east-1 + in_vpc: true + reuse_existing_ip_allowed: true + tag_name: FREE + +- name: allocate eip - reuse unallocated ips if tag reserved is nope + amazon.aws.ec2_eip: + region: us-east-1 + in_vpc: true + reuse_existing_ip_allowed: true + tag_name: reserved + tag_value: nope + +- name: allocate new eip - from servers given ipv4 pool + amazon.aws.ec2_eip: + region: us-east-1 + in_vpc: true + public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02 + +- name: allocate eip - from a given pool (if no free addresses where dev-servers tag is dynamic) + amazon.aws.ec2_eip: + region: us-east-1 + in_vpc: true + reuse_existing_ip_allowed: true + tag_name: dev-servers + public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02 + +- name: allocate eip from pool - check if tag reserved_for exists and value is our hostname + amazon.aws.ec2_eip: + region: us-east-1 + in_vpc: true + reuse_existing_ip_allowed: true + tag_name: reserved_for + tag_value: "{{ inventory_hostname }}" + public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02 +''' + +RETURN = ''' +allocation_id: + description: allocation_id of the elastic ip + returned: on success + type: str + sample: eipalloc-51aa3a6c +public_ip: + description: an elastic ip address + returned: on success + type: str + sample: 52.88.159.209 +''' + +try: + import botocore.exceptions +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags + + +def associate_ip_and_device(ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode, is_instance=True): + if address_is_associated_with_device(ec2, module, address, device_id, is_instance): + return {'changed': False} + + # If we're in check mode, nothing else to do + if not check_mode: + if is_instance: + try: + params = dict( + InstanceId=device_id, + AllowReassociation=allow_reassociation, + ) + if private_ip_address: + params['PrivateIpAddress'] = private_ip_address + if address['Domain'] == 'vpc': + params['AllocationId'] = address['AllocationId'] + else: + params['PublicIp'] = address['PublicIp'] + res = ec2.associate_address(aws_retry=True, **params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + msg = "Couldn't associate Elastic IP address with instance '{0}'".format(device_id) + module.fail_json_aws(e, msg=msg) + else: + params = dict( + NetworkInterfaceId=device_id, + AllocationId=address['AllocationId'], + AllowReassociation=allow_reassociation, + ) + + if private_ip_address: + params['PrivateIpAddress'] = private_ip_address + + try: + res = ec2.associate_address(aws_retry=True, **params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + msg = "Couldn't associate Elastic IP address with network interface '{0}'".format(device_id) + module.fail_json_aws(e, msg=msg) + if not res: + module.fail_json_aws(e, msg='Association failed.') + + return {'changed': True} + + +def disassociate_ip_and_device(ec2, module, address, device_id, check_mode, is_instance=True): + if not address_is_associated_with_device(ec2, module, address, device_id, is_instance): + return {'changed': False} + + # If we're in check mode, nothing else to do + if not check_mode: + try: + if address['Domain'] == 'vpc': + res = ec2.disassociate_address( + AssociationId=address['AssociationId'], aws_retry=True + ) + else: + res = ec2.disassociate_address( + PublicIp=address['PublicIp'], aws_retry=True + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Dissassociation of Elastic IP failed") + + return {'changed': True} + + +@AWSRetry.jittered_backoff() +def find_address(ec2, module, public_ip, device_id, is_instance=True): + """ Find an existing Elastic IP address """ + filters = [] + kwargs = {} + + if public_ip: + kwargs["PublicIps"] = [public_ip] + elif device_id: + if is_instance: + filters.append({"Name": 'instance-id', "Values": [device_id]}) + else: + filters.append({'Name': 'network-interface-id', "Values": [device_id]}) + + if len(filters) > 0: + kwargs["Filters"] = filters + elif len(filters) == 0 and public_ip is None: + return None + + try: + addresses = ec2.describe_addresses(**kwargs) + except is_boto3_error_code('InvalidAddress.NotFound') as e: + # If we're releasing and we can't find it, it's already gone... + if module.params.get('state') == 'absent': + module.exit_json(changed=False, disassociated=False, released=False) + module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses") + + addresses = addresses["Addresses"] + if len(addresses) == 1: + return addresses[0] + elif len(addresses) > 1: + msg = "Found more than one address using args {0}".format(kwargs) + msg += "Addresses found: {0}".format(addresses) + module.fail_json_aws(botocore.exceptions.ClientError, msg=msg) + + +def address_is_associated_with_device(ec2, module, address, device_id, is_instance=True): + """ Check if the elastic IP is currently associated with the device """ + address = find_address(ec2, module, address["PublicIp"], device_id, is_instance) + if address: + if is_instance: + if "InstanceId" in address and address["InstanceId"] == device_id: + return address + else: + if "NetworkInterfaceId" in address and address["NetworkInterfaceId"] == device_id: + return address + return False + + +def allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode, tag_dict=None, public_ipv4_pool=None): + """ Allocate a new elastic IP address (when needed) and return it """ + if not domain: + domain = 'standard' + + if reuse_existing_ip_allowed: + filters = [] + filters.append({'Name': 'domain', "Values": [domain]}) + + if tag_dict is not None: + filters += ansible_dict_to_boto3_filter_list(tag_dict) + + try: + all_addresses = ec2.describe_addresses(Filters=filters, aws_retry=True) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses") + + all_addresses = all_addresses["Addresses"] + + if domain == 'vpc': + unassociated_addresses = [a for a in all_addresses + if not a.get('AssociationId', None)] + else: + unassociated_addresses = [a for a in all_addresses + if not a['InstanceId']] + if unassociated_addresses: + return unassociated_addresses[0], False + + if public_ipv4_pool: + return allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool), True + + try: + if check_mode: + return None, True + result = ec2.allocate_address(Domain=domain, aws_retry=True), True + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't allocate Elastic IP address") + return result + + +def release_address(ec2, module, address, check_mode): + """ Release a previously allocated elastic IP address """ + + # If we're in check mode, nothing else to do + if not check_mode: + try: + result = ec2.release_address(AllocationId=address['AllocationId'], aws_retry=True) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't release Elastic IP address") + + return {'changed': True} + + +@AWSRetry.jittered_backoff() +def describe_eni_with_backoff(ec2, module, device_id): + try: + return ec2.describe_network_interfaces(NetworkInterfaceIds=[device_id]) + except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound') as e: + module.fail_json_aws(e, msg="Couldn't get list of network interfaces.") + + +def find_device(ec2, module, device_id, is_instance=True): + """ Attempt to find the EC2 instance and return it """ + + if is_instance: + try: + paginator = ec2.get_paginator('describe_instances') + reservations = list(paginator.paginate(InstanceIds=[device_id]).search('Reservations[]')) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't get list of instances") + + if len(reservations) == 1: + instances = reservations[0]['Instances'] + if len(instances) == 1: + return instances[0] + else: + try: + interfaces = describe_eni_with_backoff(ec2, module, device_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't get list of network interfaces.") + if len(interfaces) == 1: + return interfaces[0] + + +def ensure_present(ec2, module, domain, address, private_ip_address, device_id, + reuse_existing_ip_allowed, allow_reassociation, check_mode, is_instance=True): + changed = False + + # Return the EIP object since we've been given a public IP + if not address: + if check_mode: + return {'changed': True} + + address, changed = allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode) + + if device_id: + # Allocate an IP for instance since no public_ip was provided + if is_instance: + instance = find_device(ec2, module, device_id) + if reuse_existing_ip_allowed: + if instance['VpcId'] and len(instance['VpcId']) > 0 and domain is None: + msg = "You must set 'in_vpc' to true to associate an instance with an existing ip in a vpc" + module.fail_json_aws(botocore.exceptions.ClientError, msg=msg) + + # Associate address object (provided or allocated) with instance + assoc_result = associate_ip_and_device( + ec2, module, address, private_ip_address, device_id, allow_reassociation, + check_mode + ) + else: + instance = find_device(ec2, module, device_id, is_instance=False) + # Associate address object (provided or allocated) with instance + assoc_result = associate_ip_and_device( + ec2, module, address, private_ip_address, device_id, allow_reassociation, + check_mode, is_instance=False + ) + + changed = changed or assoc_result['changed'] + + return {'changed': changed, 'public_ip': address['PublicIp'], 'allocation_id': address['AllocationId']} + + +def ensure_absent(ec2, module, address, device_id, check_mode, is_instance=True): + if not address: + return {'changed': False} + + # disassociating address from instance + if device_id: + if is_instance: + return disassociate_ip_and_device( + ec2, module, address, device_id, check_mode + ) + else: + return disassociate_ip_and_device( + ec2, module, address, device_id, check_mode, is_instance=False + ) + # releasing address + else: + return release_address(ec2, module, address, check_mode) + + +def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool): + # type: (EC2Connection, AnsibleAWSModule, str, bool, str) -> Address + """ Overrides botocore's allocate_address function to support BYOIP """ + if check_mode: + return None + + params = {} + + if domain is not None: + params['Domain'] = domain + + if public_ipv4_pool is not None: + params['PublicIpv4Pool'] = public_ipv4_pool + + try: + result = ec2.allocate_address(aws_retry=True, **params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't allocate Elastic IP address") + return result + + +def generate_tag_dict(module, tag_name, tag_value): + # type: (AnsibleAWSModule, str, str) -> Optional[Dict] + """ Generates a dictionary to be passed as a filter to Amazon """ + if tag_name and not tag_value: + if tag_name.startswith('tag:'): + tag_name = tag_name.strip('tag:') + return {'tag-key': tag_name} + + elif tag_name and tag_value: + if not tag_name.startswith('tag:'): + tag_name = 'tag:' + tag_name + return {tag_name: tag_value} + + elif tag_value and not tag_name: + module.fail_json(msg="parameters are required together: ('tag_name', 'tag_value')") + + +def main(): + argument_spec = dict( + device_id=dict(required=False, aliases=['instance_id'], + deprecated_aliases=[dict(name='instance_id', + date='2022-12-01', + collection_name='amazon.aws')]), + public_ip=dict(required=False, aliases=['ip']), + state=dict(required=False, default='present', + choices=['present', 'absent']), + in_vpc=dict(required=False, type='bool', default=False), + reuse_existing_ip_allowed=dict(required=False, type='bool', + default=False), + release_on_disassociation=dict(required=False, type='bool', default=False), + allow_reassociation=dict(type='bool', default=False), + private_ip_address=dict(), + tags=dict(required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(required=False, type='bool', default=True), + tag_name=dict(), + tag_value=dict(), + public_ipv4_pool=dict() + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_by={ + 'private_ip_address': ['device_id'], + }, + ) + + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + device_id = module.params.get('device_id') + instance_id = module.params.get('instance_id') + public_ip = module.params.get('public_ip') + private_ip_address = module.params.get('private_ip_address') + state = module.params.get('state') + in_vpc = module.params.get('in_vpc') + domain = 'vpc' if in_vpc else None + reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') + release_on_disassociation = module.params.get('release_on_disassociation') + allow_reassociation = module.params.get('allow_reassociation') + tag_name = module.params.get('tag_name') + tag_value = module.params.get('tag_value') + public_ipv4_pool = module.params.get('public_ipv4_pool') + tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') + + if instance_id: + is_instance = True + device_id = instance_id + else: + if device_id and device_id.startswith('i-'): + is_instance = True + elif device_id: + if device_id.startswith('eni-') and not in_vpc: + module.fail_json(msg="If you are specifying an ENI, in_vpc must be true") + is_instance = False + + # Tags for *searching* for an EIP. + tag_dict = generate_tag_dict(module, tag_name, tag_value) + + try: + if device_id: + address = find_address(ec2, module, public_ip, device_id, is_instance=is_instance) + else: + address = find_address(ec2, module, public_ip, None) + + if state == 'present': + if device_id: + result = ensure_present( + ec2, module, domain, address, private_ip_address, device_id, + reuse_existing_ip_allowed, allow_reassociation, + module.check_mode, is_instance=is_instance + ) + if 'allocation_id' not in result: + # Don't check tags on check_mode here - no EIP to pass through + module.exit_json(**result) + else: + if address: + result = { + 'changed': False, + 'public_ip': address['PublicIp'], + 'allocation_id': address['AllocationId'] + } + else: + address, changed = allocate_address( + ec2, module, domain, reuse_existing_ip_allowed, + module.check_mode, tag_dict, public_ipv4_pool + ) + if address: + result = { + 'changed': changed, + 'public_ip': address['PublicIp'], + 'allocation_id': address['AllocationId'] + } + else: + # Don't check tags on check_mode here - no EIP to pass through + result = { + 'changed': changed + } + module.exit_json(**result) + + result['changed'] |= ensure_ec2_tags( + ec2, module, result['allocation_id'], + resource_type='elastic-ip', tags=tags, purge_tags=purge_tags) + else: + if device_id: + disassociated = ensure_absent( + ec2, module, address, device_id, module.check_mode, is_instance=is_instance + ) + + if release_on_disassociation and disassociated['changed']: + released = release_address(ec2, module, address, module.check_mode) + result = { + 'changed': True, + 'disassociated': disassociated['changed'], + 'released': released['changed'] + } + else: + result = { + 'changed': disassociated['changed'], + 'disassociated': disassociated['changed'], + 'released': False + } + else: + released = release_address(ec2, module, address, module.check_mode) + result = { + 'changed': released['changed'], + 'disassociated': False, + 'released': released['changed'] + } + + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(str(e)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py new file mode 100644 index 000000000..c94f164f5 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py @@ -0,0 +1,147 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_eip_info +version_added: 5.0.0 +short_description: List EC2 EIP details +description: + - List details of EC2 Elastic IP addresses. + - This module was originally added to C(community.aws) in release 1.0.0. +author: + - "Brad Macpherson (@iiibrad)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and filter + value. See U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-addresses.html#options) + for possible filters. Filter names and values are case sensitive. + required: false + default: {} + type: dict +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 + +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details or the AWS region, +# see the AWS Guide for details. + +- name: List all EIP addresses in the current region. + amazon.aws.ec2_eip_info: + register: regional_eip_addresses + +- name: List all EIP addresses for a VM. + amazon.aws.ec2_eip_info: + filters: + instance-id: i-123456789 + register: my_vm_eips + +- ansible.builtin.debug: + msg: "{{ my_vm_eips.addresses | selectattr('private_ip_address', 'equalto', '10.0.0.5') }}" + +- name: List all EIP addresses for several VMs. + amazon.aws.ec2_eip_info: + filters: + instance-id: + - i-123456789 + - i-987654321 + register: my_vms_eips + +- name: List all EIP addresses using the 'Name' tag as a filter. + amazon.aws.ec2_eip_info: + filters: + tag:Name: www.example.com + register: my_vms_eips + +- name: List all EIP addresses using the Allocation-id as a filter + amazon.aws.ec2_eip_info: + filters: + allocation-id: eipalloc-64de1b01 + register: my_vms_eips + +# Set the variable eip_alloc to the value of the first allocation_id +# and set the variable my_pub_ip to the value of the first public_ip +- ansible.builtin.set_fact: + eip_alloc: my_vms_eips.addresses[0].allocation_id + my_pub_ip: my_vms_eips.addresses[0].public_ip + +''' + + +RETURN = ''' +addresses: + description: Properties of all Elastic IP addresses matching the provided filters. Each element is a dict with all the information related to an EIP. + returned: on success + type: list + sample: [{ + "allocation_id": "eipalloc-64de1b01", + "association_id": "eipassoc-0fe9ce90d6e983e97", + "domain": "vpc", + "instance_id": "i-01020cfeb25b0c84f", + "network_interface_id": "eni-02fdeadfd4beef9323b", + "network_interface_owner_id": "0123456789", + "private_ip_address": "10.0.0.1", + "public_ip": "54.81.104.1", + "tags": { + "Name": "test-vm-54.81.104.1" + } + }] + +''' + +try: + from botocore.exceptions import (BotoCoreError, ClientError) +except ImportError: + pass # caught by imported AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def get_eips_details(module): + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + filters = module.params.get("filters") + try: + response = connection.describe_addresses( + aws_retry=True, + Filters=ansible_dict_to_boto3_filter_list(filters) + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws( + e, + msg="Error retrieving EIPs") + + addresses = camel_dict_to_snake_dict(response)['addresses'] + for address in addresses: + if 'tags' in address: + address['tags'] = boto3_tag_list_to_ansible_dict(address['tags']) + return addresses + + +def main(): + module = AnsibleAWSModule( + argument_spec=dict( + filters=dict(type='dict', default={}) + ), + supports_check_mode=True + ) + + module.exit_json(changed=False, addresses=get_eips_details(module)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py new file mode 100644 index 000000000..46c90d542 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py @@ -0,0 +1,876 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_eni +version_added: 1.0.0 +short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance +description: + - Create and optionally attach an Elastic Network Interface (ENI) to an instance. + - If I(eni_id) or I(private_ip) is provided, the existing ENI (if any) will be modified. + - The I(attached) parameter controls the attachment status of the network interface. +author: + - "Rob White (@wimnat)" + - "Mike Healey (@healem)" +options: + eni_id: + description: + - The ID of the ENI (to modify). + - If I(eni_id=None) and I(state=present), a new ENI will be created. + type: str + instance_id: + description: + - Instance ID that you wish to attach ENI to. + type: str + private_ip_address: + description: + - Private IP address. + type: str + subnet_id: + description: + - ID of subnet in which to create the ENI. + type: str + description: + description: + - Optional description of the ENI. + type: str + security_groups: + description: + - List of security groups associated with the interface. + - Ignored when I(state=absent). + type: list + elements: str + default: [] + state: + description: + - Create or delete ENI. + default: present + choices: [ 'present', 'absent' ] + type: str + device_index: + description: + - The index of the device for the network interface attachment on the instance. + default: 0 + type: int + attached: + description: + - Specifies if network interface should be attached or detached from instance. If omitted, attachment status + won't change + type: bool + force_detach: + description: + - Force detachment of the interface. This applies either when explicitly detaching the interface by setting I(instance_id=None) + or when deleting an interface with I(state=absent). + default: false + type: bool + delete_on_termination: + description: + - Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the + interface is being modified, not on creation. + required: false + type: bool + source_dest_check: + description: + - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. + You can only specify this flag when the interface is being modified, not on creation. + required: false + type: bool + secondary_private_ip_addresses: + description: + - A list of IP addresses to assign as secondary IP addresses to the network interface. + - This option is mutually exclusive of I(secondary_private_ip_address_count). + required: false + type: list + elements: str + purge_secondary_private_ip_addresses: + description: + - To be used with I(secondary_private_ip_addresses) to determine whether or not to remove any secondary IP addresses other than those specified. + - Set I(secondary_private_ip_addresses=[]) to purge all secondary addresses. + default: false + type: bool + secondary_private_ip_address_count: + description: + - The number of secondary IP addresses to assign to the network interface. + - This option is mutually exclusive of I(secondary_private_ip_addresses). + required: false + type: int + allow_reassignment: + description: + - Indicates whether to allow an IP address that is already assigned to another network interface or instance + to be reassigned to the specified network interface. + required: false + default: false + type: bool + name: + description: + - Name for the ENI. This will create a tag with the key C(Name) and the value assigned here. + - This can be used in conjunction with I(subnet_id) as another means of identifiying a network interface. + - AWS does not enforce unique C(Name) tags, so duplicate names are possible if you configure it that way. + If that is the case, you will need to provide other identifying information such as I(private_ip_address) or I(eni_id). + required: false + type: str +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +notes: + - This module identifies and ENI based on either the I(eni_id), a combination of I(private_ip_address) and I(subnet_id), + or a combination of I(instance_id) and I(device_id). Any of these options will let you specify a particular ENI. + - Support for I(tags) and I(purge_tags) was added in release 1.3.0. +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create an ENI. As no security group is defined, ENI will be created in default security group +- amazon.aws.ec2_eni: + private_ip_address: 172.31.0.20 + subnet_id: subnet-xxxxxxxx + state: present + +# Create an ENI and attach it to an instance +- amazon.aws.ec2_eni: + instance_id: i-xxxxxxx + device_index: 1 + private_ip_address: 172.31.0.20 + subnet_id: subnet-xxxxxxxx + state: present + +# Create an ENI with two secondary addresses +- amazon.aws.ec2_eni: + subnet_id: subnet-xxxxxxxx + state: present + secondary_private_ip_address_count: 2 + +# Assign a secondary IP address to an existing ENI +# This will purge any existing IPs +- amazon.aws.ec2_eni: + subnet_id: subnet-xxxxxxxx + eni_id: eni-yyyyyyyy + state: present + secondary_private_ip_addresses: + - 172.16.1.1 + +# Remove any secondary IP addresses from an existing ENI +- amazon.aws.ec2_eni: + subnet_id: subnet-xxxxxxxx + eni_id: eni-yyyyyyyy + state: present + secondary_private_ip_address_count: 0 + +# Destroy an ENI, detaching it from any instance if necessary +- amazon.aws.ec2_eni: + eni_id: eni-xxxxxxx + force_detach: true + state: absent + +# Update an ENI +- amazon.aws.ec2_eni: + eni_id: eni-xxxxxxx + description: "My new description" + state: present + +# Update an ENI using name and subnet_id +- amazon.aws.ec2_eni: + name: eni-20 + subnet_id: subnet-xxxxxxx + description: "My new description" + state: present + +# Update an ENI identifying it by private_ip_address and subnet_id +- amazon.aws.ec2_eni: + subnet_id: subnet-xxxxxxx + private_ip_address: 172.16.1.1 + description: "My new description" + +# Detach an ENI from an instance +- amazon.aws.ec2_eni: + eni_id: eni-xxxxxxx + instance_id: None + state: present + +### Delete an interface on termination +# First create the interface +- amazon.aws.ec2_eni: + instance_id: i-xxxxxxx + device_index: 1 + private_ip_address: 172.31.0.20 + subnet_id: subnet-xxxxxxxx + state: present + register: eni + +# Modify the interface to enable the delete_on_terminaton flag +- amazon.aws.ec2_eni: + eni_id: "{{ eni.interface.id }}" + delete_on_termination: true + +''' + + +RETURN = ''' +interface: + description: Network interface attributes + returned: when state != absent + type: complex + contains: + description: + description: interface description + type: str + sample: Firewall network interface + groups: + description: list of security groups + type: list + elements: dict + sample: [ { "sg-f8a8a9da": "default" } ] + id: + description: network interface id + type: str + sample: "eni-1d889198" + mac_address: + description: interface's physical address + type: str + sample: "00:00:5E:00:53:23" + name: + description: The name of the ENI + type: str + sample: "my-eni-20" + owner_id: + description: aws account id + type: str + sample: 812381371 + private_ip_address: + description: primary ip address of this interface + type: str + sample: 10.20.30.40 + private_ip_addresses: + description: list of all private ip addresses associated to this interface + type: list + elements: dict + sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ] + source_dest_check: + description: value of source/dest check flag + type: bool + sample: True + status: + description: network interface status + type: str + sample: "pending" + subnet_id: + description: which vpc subnet the interface is bound + type: str + sample: subnet-b0a0393c + tags: + description: The dictionary of tags associated with the ENI + type: dict + sample: { "Name": "my-eni", "group": "Finance" } + vpc_id: + description: which vpc this network interface is bound + type: str + sample: vpc-9a9a9da + +''' + +import time +from ipaddress import ip_address +from ipaddress import ip_network + +try: + import botocore.exceptions +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + + +def get_eni_info(interface): + + # Private addresses + private_addresses = [] + if "PrivateIpAddresses" in interface: + for ip in interface["PrivateIpAddresses"]: + private_addresses.append({'private_ip_address': ip["PrivateIpAddress"], 'primary_address': ip["Primary"]}) + + groups = {} + if "Groups" in interface: + for group in interface["Groups"]: + groups[group["GroupId"]] = group["GroupName"] + + interface_info = {'id': interface.get("NetworkInterfaceId"), + 'subnet_id': interface.get("SubnetId"), + 'vpc_id': interface.get("VpcId"), + 'description': interface.get("Description"), + 'owner_id': interface.get("OwnerId"), + 'status': interface.get("Status"), + 'mac_address': interface.get("MacAddress"), + 'private_ip_address': interface.get("PrivateIpAddress"), + 'source_dest_check': interface.get("SourceDestCheck"), + 'groups': groups, + 'private_ip_addresses': private_addresses + } + + if "TagSet" in interface: + tags = boto3_tag_list_to_ansible_dict(interface["TagSet"]) + if "Name" in tags: + interface_info["name"] = tags["Name"] + interface_info["tags"] = tags + + if "Attachment" in interface: + interface_info['attachment'] = { + 'attachment_id': interface["Attachment"].get("AttachmentId"), + 'instance_id': interface["Attachment"].get("InstanceId"), + 'device_index': interface["Attachment"].get("DeviceIndex"), + 'status': interface["Attachment"].get("Status"), + 'attach_time': interface["Attachment"].get("AttachTime"), + 'delete_on_termination': interface["Attachment"].get("DeleteOnTermination"), + } + + return interface_info + + +def correct_ips(connection, ip_list, module, eni_id): + eni = describe_eni(connection, module, eni_id) + private_addresses = set() + if "PrivateIpAddresses" in eni: + for ip in eni["PrivateIpAddresses"]: + private_addresses.add(ip["PrivateIpAddress"]) + + ip_set = set(ip_list) + + return ip_set.issubset(private_addresses) + + +def absent_ips(connection, ip_list, module, eni_id): + eni = describe_eni(connection, module, eni_id) + private_addresses = set() + if "PrivateIpAddresses" in eni: + for ip in eni["PrivateIpAddresses"]: + private_addresses.add(ip["PrivateIpAddress"]) + + ip_set = set(ip_list) + + return not ip_set.union(private_addresses) + + +def correct_ip_count(connection, ip_count, module, eni_id): + eni = describe_eni(connection, module, eni_id) + private_addresses = set() + if "PrivateIpAddresses" in eni: + for ip in eni["PrivateIpAddresses"]: + private_addresses.add(ip["PrivateIpAddress"]) + + if len(private_addresses) == ip_count: + return True + else: + return False + + +def wait_for(function_pointer, *args): + max_wait = 30 + interval_time = 3 + current_wait = 0 + while current_wait < max_wait: + time.sleep(interval_time) + current_wait += interval_time + if function_pointer(*args): + break + + +def create_eni(connection, vpc_id, module): + + instance_id = module.params.get("instance_id") + attached = module.params.get("attached") + if instance_id == 'None': + instance_id = None + device_index = module.params.get("device_index") + subnet_id = module.params.get('subnet_id') + private_ip_address = module.params.get('private_ip_address') + description = module.params.get('description') + security_groups = get_ec2_security_group_ids_from_names( + module.params.get('security_groups'), + connection, + vpc_id=vpc_id, + boto3=True + ) + secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses") + secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count") + changed = False + + tags = module.params.get("tags") or dict() + name = module.params.get("name") + # Make sure that the 'name' parameter sets the Name tag + if name: + tags['Name'] = name + + try: + args = {"SubnetId": subnet_id} + if private_ip_address: + args["PrivateIpAddress"] = private_ip_address + if description: + args["Description"] = description + if len(security_groups) > 0: + args["Groups"] = security_groups + if tags: + args["TagSpecifications"] = boto3_tag_specifications(tags, types='network-interface') + + # check if provided private_ip_address is within the subnet's address range + if private_ip_address: + cidr_block = connection.describe_subnets(SubnetIds=[str(subnet_id)])['Subnets'][0]['CidrBlock'] + valid_private_ip = ip_address(private_ip_address) in ip_network(cidr_block) + if not valid_private_ip: + module.fail_json(changed=False, msg="Error: cannot create ENI - Address does not fall within the subnet's address range.") + if module.check_mode: + module.exit_json(changed=True, msg="Would have created ENI if not in check mode.") + + eni_dict = connection.create_network_interface(aws_retry=True, **args) + eni = eni_dict["NetworkInterface"] + # Once we have an ID make sure we're always modifying the same object + eni_id = eni["NetworkInterfaceId"] + get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id]) + + if attached and instance_id is not None: + try: + connection.attach_network_interface( + aws_retry=True, + InstanceId=instance_id, + DeviceIndex=device_index, + NetworkInterfaceId=eni["NetworkInterfaceId"] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): + connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id) + raise + get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id]) + + if secondary_private_ip_address_count is not None: + try: + connection.assign_private_ip_addresses( + aws_retry=True, + NetworkInterfaceId=eni["NetworkInterfaceId"], + SecondaryPrivateIpAddressCount=secondary_private_ip_address_count + ) + wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): + connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id) + raise + + if secondary_private_ip_addresses is not None: + try: + connection.assign_private_ip_addresses( + NetworkInterfaceId=eni["NetworkInterfaceId"], + PrivateIpAddresses=secondary_private_ip_addresses + ) + wait_for(correct_ips, connection, secondary_private_ip_addresses, module, eni_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): + connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id) + raise + + # Refresh the eni data + eni = describe_eni(connection, module, eni_id) + changed = True + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws( + e, + "Failed to create eni {0} for {1} in {2} with {3}".format(name, subnet_id, vpc_id, private_ip_address) + ) + + module.exit_json(changed=changed, interface=get_eni_info(eni)) + + +def modify_eni(connection, module, eni): + + instance_id = module.params.get("instance_id") + attached = module.params.get("attached") + device_index = module.params.get("device_index") + description = module.params.get('description') + security_groups = module.params.get('security_groups') + source_dest_check = module.params.get("source_dest_check") + delete_on_termination = module.params.get("delete_on_termination") + secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses") + purge_secondary_private_ip_addresses = module.params.get("purge_secondary_private_ip_addresses") + secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count") + allow_reassignment = module.params.get("allow_reassignment") + changed = False + tags = module.params.get("tags") + name = module.params.get("name") + purge_tags = module.params.get("purge_tags") + + eni = uniquely_find_eni(connection, module, eni) + eni_id = eni["NetworkInterfaceId"] + + try: + if description is not None: + if "Description" not in eni or eni["Description"] != description: + if not module.check_mode: + connection.modify_network_interface_attribute( + aws_retry=True, + NetworkInterfaceId=eni_id, + Description={'Value': description} + ) + changed = True + if len(security_groups) > 0: + groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=eni["VpcId"], boto3=True) + if sorted(get_sec_group_list(eni["Groups"])) != sorted(groups): + if not module.check_mode: + connection.modify_network_interface_attribute( + aws_retry=True, + NetworkInterfaceId=eni_id, + Groups=groups + ) + changed = True + if source_dest_check is not None: + if "SourceDestCheck" not in eni or eni["SourceDestCheck"] != source_dest_check: + if not module.check_mode: + connection.modify_network_interface_attribute( + aws_retry=True, + NetworkInterfaceId=eni_id, + SourceDestCheck={'Value': source_dest_check} + ) + changed = True + if delete_on_termination is not None and "Attachment" in eni: + if eni["Attachment"]["DeleteOnTermination"] is not delete_on_termination: + if not module.check_mode: + connection.modify_network_interface_attribute( + aws_retry=True, + NetworkInterfaceId=eni_id, + Attachment={'AttachmentId': eni["Attachment"]["AttachmentId"], + 'DeleteOnTermination': delete_on_termination} + ) + if delete_on_termination: + waiter = "network_interface_delete_on_terminate" + else: + waiter = "network_interface_no_delete_on_terminate" + get_waiter(connection, waiter).wait(NetworkInterfaceIds=[eni_id]) + changed = True + + current_secondary_addresses = [] + if "PrivateIpAddresses" in eni: + current_secondary_addresses = [i["PrivateIpAddress"] for i in eni["PrivateIpAddresses"] if not i["Primary"]] + + if secondary_private_ip_addresses is not None: + secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses)) + if secondary_addresses_to_remove and purge_secondary_private_ip_addresses: + if not module.check_mode: + connection.unassign_private_ip_addresses( + aws_retry=True, + NetworkInterfaceId=eni_id, + PrivateIpAddresses=list(set(current_secondary_addresses) - set(secondary_private_ip_addresses)), + ) + wait_for(absent_ips, connection, secondary_addresses_to_remove, module, eni_id) + changed = True + secondary_addresses_to_add = list(set(secondary_private_ip_addresses) - set(current_secondary_addresses)) + if secondary_addresses_to_add: + if not module.check_mode: + connection.assign_private_ip_addresses( + aws_retry=True, + NetworkInterfaceId=eni_id, + PrivateIpAddresses=secondary_addresses_to_add, + AllowReassignment=allow_reassignment + ) + wait_for(correct_ips, connection, secondary_addresses_to_add, module, eni_id) + changed = True + + if secondary_private_ip_address_count is not None: + current_secondary_address_count = len(current_secondary_addresses) + if secondary_private_ip_address_count > current_secondary_address_count: + if not module.check_mode: + connection.assign_private_ip_addresses( + aws_retry=True, + NetworkInterfaceId=eni_id, + SecondaryPrivateIpAddressCount=(secondary_private_ip_address_count - current_secondary_address_count), + AllowReassignment=allow_reassignment + ) + wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id) + changed = True + elif secondary_private_ip_address_count < current_secondary_address_count: + # How many of these addresses do we want to remove + if not module.check_mode: + secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count + connection.unassign_private_ip_addresses( + aws_retry=True, + NetworkInterfaceId=eni_id, + PrivateIpAddresses=current_secondary_addresses[:secondary_addresses_to_remove_count] + ) + wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id) + changed = True + + if attached is True: + if "Attachment" in eni and eni["Attachment"]["InstanceId"] != instance_id: + if not module.check_mode: + detach_eni(connection, eni, module) + connection.attach_network_interface( + aws_retry=True, + InstanceId=instance_id, + DeviceIndex=device_index, + NetworkInterfaceId=eni_id, + ) + get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id]) + changed = True + if "Attachment" not in eni: + if not module.check_mode: + connection.attach_network_interface( + aws_retry=True, + InstanceId=instance_id, + DeviceIndex=device_index, + NetworkInterfaceId=eni_id, + ) + get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id]) + changed = True + + elif attached is False: + changed |= detach_eni(connection, eni, module) + get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id]) + + changed |= manage_tags(connection, module, eni, name, tags, purge_tags) + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to modify eni {0}".format(eni_id)) + + eni = describe_eni(connection, module, eni_id) + if module.check_mode and changed: + module.exit_json(changed=changed, msg="Would have modified ENI: {0} if not in check mode".format(eni['NetworkInterfaceId'])) + module.exit_json(changed=changed, interface=get_eni_info(eni)) + + +def _wait_for_detach(connection, module, eni_id): + try: + get_waiter(connection, 'network_interface_available').wait( + NetworkInterfaceIds=[eni_id], + WaiterConfig={'Delay': 5, 'MaxAttempts': 80}, + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, "Timeout waiting for ENI {0} to detach".format(eni_id)) + + +def delete_eni(connection, module): + + eni = uniquely_find_eni(connection, module) + if not eni: + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True, msg="Would have deleted ENI if not in check mode.") + + eni_id = eni["NetworkInterfaceId"] + force_detach = module.params.get("force_detach") + + try: + if force_detach is True: + if "Attachment" in eni: + connection.detach_network_interface( + aws_retry=True, + AttachmentId=eni["Attachment"]["AttachmentId"], + Force=True, + ) + _wait_for_detach(connection, module, eni_id) + connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id) + changed = True + else: + connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id) + changed = True + + module.exit_json(changed=changed) + except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'): + module.exit_json(changed=False) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, "Failure during delete of {0}".format(eni_id)) + + +def detach_eni(connection, eni, module): + + if module.check_mode: + module.exit_json(changed=True, msg="Would have detached ENI if not in check mode.") + + eni_id = eni["NetworkInterfaceId"] + + force_detach = module.params.get("force_detach") + if "Attachment" in eni: + connection.detach_network_interface( + aws_retry=True, + AttachmentId=eni["Attachment"]["AttachmentId"], + Force=force_detach, + ) + _wait_for_detach(connection, module, eni_id) + return True + + return False + + +def describe_eni(connection, module, eni_id): + try: + eni_result = connection.describe_network_interfaces(aws_retry=True, NetworkInterfaceIds=[eni_id]) + if eni_result["NetworkInterfaces"]: + return eni_result["NetworkInterfaces"][0] + else: + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to describe eni with id: {0}".format(eni_id)) + + +def uniquely_find_eni(connection, module, eni=None): + + if eni: + # In the case of create, eni_id will not be a param but we can still get the eni_id after creation + if "NetworkInterfaceId" in eni: + eni_id = eni["NetworkInterfaceId"] + else: + eni_id = None + else: + eni_id = module.params.get("eni_id") + + private_ip_address = module.params.get('private_ip_address') + subnet_id = module.params.get('subnet_id') + instance_id = module.params.get('instance_id') + device_index = module.params.get('device_index') + attached = module.params.get('attached') + name = module.params.get("name") + + filters = [] + + # proceed only if we're unequivocally specifying an ENI + if eni_id is None and private_ip_address is None and (instance_id is None and device_index is None): + return None + + if eni_id: + filters.append({'Name': 'network-interface-id', + 'Values': [eni_id]}) + + if private_ip_address and subnet_id and not filters: + filters.append({'Name': 'private-ip-address', + 'Values': [private_ip_address]}) + filters.append({'Name': 'subnet-id', + 'Values': [subnet_id]}) + + if not attached and instance_id and device_index and not filters: + filters.append({'Name': 'attachment.instance-id', + 'Values': [instance_id]}) + filters.append({'Name': 'attachment.device-index', + 'Values': [str(device_index)]}) + + if name and subnet_id and not filters: + filters.append({'Name': 'tag:Name', + 'Values': [name]}) + filters.append({'Name': 'subnet-id', + 'Values': [subnet_id]}) + + if not filters: + return None + + try: + eni_result = connection.describe_network_interfaces(aws_retry=True, Filters=filters)["NetworkInterfaces"] + if len(eni_result) == 1: + return eni_result[0] + else: + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to find unique eni with filters: {0}".format(filters)) + + return None + + +def get_sec_group_list(groups): + + # Build list of remote security groups + remote_security_groups = [] + for group in groups: + remote_security_groups.append(group["GroupId"]) + + return remote_security_groups + + +def _get_vpc_id(connection, module, subnet_id): + + try: + subnets = connection.describe_subnets(aws_retry=True, SubnetIds=[subnet_id]) + return subnets["Subnets"][0]["VpcId"] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to get vpc_id for {0}".format(subnet_id)) + + +def manage_tags(connection, module, eni, name, tags, purge_tags): + # Do not purge tags unless tags is not None + if tags is None: + purge_tags = False + tags = {} + + if name: + tags['Name'] = name + + eni_id = eni['NetworkInterfaceId'] + + changed = ensure_ec2_tags(connection, module, eni_id, tags=tags, purge_tags=purge_tags) + return changed + + +def main(): + argument_spec = dict( + eni_id=dict(default=None, type='str'), + instance_id=dict(default=None, type='str'), + private_ip_address=dict(type='str'), + subnet_id=dict(type='str'), + description=dict(type='str'), + security_groups=dict(default=[], type='list', elements='str'), + device_index=dict(default=0, type='int'), + state=dict(default='present', choices=['present', 'absent']), + force_detach=dict(default='no', type='bool'), + source_dest_check=dict(default=None, type='bool'), + delete_on_termination=dict(default=None, type='bool'), + secondary_private_ip_addresses=dict(default=None, type='list', elements='str'), + purge_secondary_private_ip_addresses=dict(default=False, type='bool'), + secondary_private_ip_address_count=dict(default=None, type='int'), + allow_reassignment=dict(default=False, type='bool'), + attached=dict(default=None, type='bool'), + name=dict(default=None, type='str'), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, type='bool'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['secondary_private_ip_addresses', 'secondary_private_ip_address_count'] + ], + required_if=([ + ('attached', True, ['instance_id']), + ('purge_secondary_private_ip_addresses', True, ['secondary_private_ip_addresses']) + ]), + supports_check_mode=True, + ) + + retry_decorator = AWSRetry.jittered_backoff( + catch_extra_error_codes=['IncorrectState'], + ) + connection = module.client('ec2', retry_decorator=retry_decorator) + state = module.params.get("state") + + if state == 'present': + eni = uniquely_find_eni(connection, module) + if eni is None: + subnet_id = module.params.get("subnet_id") + if subnet_id is None: + module.fail_json(msg='subnet_id is required when creating a new ENI') + + vpc_id = _get_vpc_id(connection, module, subnet_id) + create_eni(connection, vpc_id, module) + else: + modify_eni(connection, module, eni) + + elif state == 'absent': + delete_eni(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py new file mode 100644 index 000000000..6eb24c22f --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py @@ -0,0 +1,299 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_eni_info +version_added: 1.0.0 +short_description: Gather information about EC2 ENI interfaces in AWS +description: + - Gather information about EC2 ENI interfaces in AWS. +author: + - "Rob White (@wimnat)" +options: + eni_id: + description: + - The ID of the ENI. + - This option is mutually exclusive of I(filters). + type: str + version_added: 1.3.0 + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters. + - This option is mutually exclusive of I(eni_id). + type: dict +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all ENIs +- amazon.aws.ec2_eni_info: + +# Gather information about a particular ENI +- amazon.aws.ec2_eni_info: + filters: + network-interface-id: eni-xxxxxxx + +''' + +RETURN = ''' +network_interfaces: + description: List of matching elastic network interfaces. + returned: always + type: complex + contains: + association: + description: Info of associated elastic IP (EIP). + returned: When an ENI is associated with an EIP + type: dict + sample: { + allocation_id: "eipalloc-5sdf123", + association_id: "eipassoc-8sdf123", + ip_owner_id: "123456789012", + public_dns_name: "ec2-52-1-0-63.compute-1.amazonaws.com", + public_ip: "52.1.0.63" + } + attachment: + description: Info about attached ec2 instance. + returned: When an ENI is attached to an ec2 instance + type: dict + sample: { + attach_time: "2017-08-05T15:25:47+00:00", + attachment_id: "eni-attach-149d21234", + delete_on_termination: false, + device_index: 1, + instance_id: "i-15b8d3cadbafa1234", + instance_owner_id: "123456789012", + status: "attached" + } + availability_zone: + description: Availability zone of ENI. + returned: always + type: str + sample: "us-east-1b" + description: + description: Description text for ENI. + returned: always + type: str + sample: "My favourite network interface" + groups: + description: List of attached security groups. + returned: always + type: list + sample: [ + { + group_id: "sg-26d0f1234", + group_name: "my_ec2_security_group" + } + ] + id: + description: The id of the ENI (alias for network_interface_id). + returned: always + type: str + sample: "eni-392fsdf" + interface_type: + description: Type of the network interface. + returned: always + type: str + sample: "interface" + ipv6_addresses: + description: List of IPv6 addresses for this interface. + returned: always + type: list + sample: [] + mac_address: + description: MAC address of the network interface. + returned: always + type: str + sample: "0a:f8:10:2f:ab:a1" + name: + description: The Name tag of the ENI, often displayed in the AWS UIs as Name. + returned: When a Name tag has been set + type: str + version_added: 1.3.0 + network_interface_id: + description: The id of the ENI. + returned: always + type: str + sample: "eni-392fsdf" + owner_id: + description: AWS account id of the owner of the ENI. + returned: always + type: str + sample: "123456789012" + private_dns_name: + description: Private DNS name for the ENI. + returned: always + type: str + sample: "ip-172-16-1-180.ec2.internal" + private_ip_address: + description: Private IP address for the ENI. + returned: always + type: str + sample: "172.16.1.180" + private_ip_addresses: + description: List of private IP addresses attached to the ENI. + returned: always + type: list + sample: [] + requester_id: + description: The ID of the entity that launched the ENI. + returned: always + type: str + sample: "AIDA12345EXAMPLE54321" + requester_managed: + description: Indicates whether the network interface is being managed by an AWS service. + returned: always + type: bool + sample: false + source_dest_check: + description: Indicates whether the network interface performs source/destination checking. + returned: always + type: bool + sample: false + status: + description: Indicates if the network interface is attached to an instance or not. + returned: always + type: str + sample: "in-use" + subnet_id: + description: Subnet ID the ENI is in. + returned: always + type: str + sample: "subnet-7bbf01234" + tags: + description: Dictionary of tags added to the ENI. + returned: always + type: dict + sample: {} + version_added: 1.3.0 + tag_set: + description: Dictionary of tags added to the ENI. + returned: always + type: dict + sample: {} + vpc_id: + description: ID of the VPC the network interface it part of. + returned: always + type: str + sample: "vpc-b3f1f123" +''' + +try: + from botocore.exceptions import ClientError + from botocore.exceptions import NoCredentialsError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def list_eni(connection, module): + + params = {} + # Options are mutually exclusive + if module.params.get("eni_id"): + params['NetworkInterfaceIds'] = [module.params.get("eni_id")] + elif module.params.get("filters"): + params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + else: + params['Filters'] = [] + + try: + network_interfaces_result = connection.describe_network_interfaces(aws_retry=True, **params)['NetworkInterfaces'] + except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'): + module.exit_json(network_interfaces=[]) + except (ClientError, NoCredentialsError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + + # Modify boto3 tags list to be ansible friendly dict and then camel_case + camel_network_interfaces = [] + for network_interface in network_interfaces_result: + network_interface['TagSet'] = boto3_tag_list_to_ansible_dict(network_interface['TagSet']) + network_interface['Tags'] = network_interface['TagSet'] + if 'Name' in network_interface['Tags']: + network_interface['Name'] = network_interface['Tags']['Name'] + # Added id to interface info to be compatible with return values of ec2_eni module: + network_interface['Id'] = network_interface['NetworkInterfaceId'] + camel_network_interfaces.append(camel_dict_to_snake_dict(network_interface, ignore_list=['Tags', 'TagSet'])) + + module.exit_json(network_interfaces=camel_network_interfaces) + + +def get_eni_info(interface): + + # Private addresses + private_addresses = [] + for ip in interface.private_ip_addresses: + private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary}) + + interface_info = {'id': interface.id, + 'subnet_id': interface.subnet_id, + 'vpc_id': interface.vpc_id, + 'description': interface.description, + 'owner_id': interface.owner_id, + 'status': interface.status, + 'mac_address': interface.mac_address, + 'private_ip_address': interface.private_ip_address, + 'source_dest_check': interface.source_dest_check, + 'groups': dict((group.id, group.name) for group in interface.groups), + 'private_ip_addresses': private_addresses + } + + if hasattr(interface, 'publicDnsName'): + interface_info['association'] = {'public_ip_address': interface.publicIp, + 'public_dns_name': interface.publicDnsName, + 'ip_owner_id': interface.ipOwnerId + } + + if interface.attachment is not None: + interface_info['attachment'] = {'attachment_id': interface.attachment.id, + 'instance_id': interface.attachment.instance_id, + 'device_index': interface.attachment.device_index, + 'status': interface.attachment.status, + 'attach_time': interface.attachment.attach_time, + 'delete_on_termination': interface.attachment.delete_on_termination, + } + + return interface_info + + +def main(): + argument_spec = dict( + eni_id=dict(type='str'), + filters=dict(default=None, type='dict') + ) + mutually_exclusive = [ + ['eni_id', 'filters'] + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + ) + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + list_eni(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py b/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py new file mode 100644 index 000000000..1cf5a5ddb --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py @@ -0,0 +1,2170 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: ec2_instance +version_added: 1.0.0 +short_description: Create & manage EC2 instances +description: + - Create and manage AWS EC2 instances. + - This module does not support creating + L(EC2 Spot instances,https://aws.amazon.com/ec2/spot/). + - The M(amazon.aws.ec2_spot_instance) module can create and manage spot instances. +author: + - Ryan Scott Brown (@ryansb) +options: + instance_ids: + description: + - If you specify one or more instance IDs, only instances that have the specified IDs are returned. + - Mutually exclusive with I(exact_count). + type: list + elements: str + default: [] + state: + description: + - Goal state for the instances. + - "I(state=present): ensures instances exist, but does not guarantee any state (e.g. running). Newly-launched instances will be run by EC2." + - "I(state=running): I(state=present) + ensures the instances are running" + - "I(state=started): I(state=running) + waits for EC2 status checks to report OK if I(wait=true)" + - "I(state=stopped): ensures an existing instance is stopped." + - "I(state=rebooted): convenience alias for I(state=stopped) immediately followed by I(state=running)" + - "I(state=restarted): convenience alias for I(state=stopped) immediately followed by I(state=started)" + - "I(state=terminated): ensures an existing instance is terminated." + - "I(state=absent): alias for I(state=terminated)" + choices: [present, terminated, running, started, stopped, restarted, rebooted, absent] + default: present + type: str + wait: + description: + - Whether or not to wait for the desired I(state) (use (wait_timeout) to customize this). + default: true + type: bool + wait_timeout: + description: + - How long to wait (in seconds) for the instance to finish booting/terminating. + default: 600 + type: int + instance_type: + description: + - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + - Only required when instance is not already present. + - If not specified, C(t2.micro) will be used. + - In a release after 2023-01-01 the default will be removed and either I(instance_type) or + I(launch_template) must be specificed when launching an instance. + type: str + count: + description: + - Number of instances to launch. + - Setting this value will result in always launching new instances. + - Mutually exclusive with I(exact_count). + type: int + version_added: 2.2.0 + exact_count: + description: + - An integer value which indicates how many instances that match the I(filters) parameter should be running. + - Instances are either created or terminated based on this value. + - If termination takes place, least recently created instances will be terminated based on Launch Time. + - Mutually exclusive with I(count), I(instance_ids). + type: int + version_added: 2.2.0 + user_data: + description: + - Opaque blob of data which is made available to the EC2 instance. + type: str + aap_callback: + description: + - Preconfigured user-data to enable an instance to perform an Ansible Automation Platform + callback (Linux only). + - For Windows instances, to enable remote access via Ansible set I(windows) to C(true), and + optionally set an admin password. + - If using I(windows) and I(set_password), callback ton Ansible Automation Platform will not + be performed but the instance will be ready to receive winrm connections from Ansible. + - Mutually exclusive with I(user_data). + type: dict + aliases: ['tower_callback'] + suboptions: + windows: + description: + - Set I(windows=True) to use powershell instead of bash for the callback script. + type: bool + default: False + set_password: + description: + - Optional admin password to use if I(windows=True). + type: str + tower_address: + description: + - IP address or DNS name of Tower server. Must be accessible via this address from the + VPC that this instance will be launched in. + - Required if I(windows=False). + type: str + job_template_id: + description: + - Either the integer ID of the Tower Job Template, or the name. + Using a name for the job template is not supported by Ansible Tower prior to version + 3.2. + - Required if I(windows=False). + type: str + host_config_key: + description: + - Host configuration secret key generated by the Tower job template. + - Required if I(windows=False). + type: str + image: + description: + - An image to use for the instance. The M(amazon.aws.ec2_ami_info) module may be used to retrieve images. + One of I(image) or I(image_id) are required when instance is not already present. + type: dict + suboptions: + id: + description: + - The AMI ID. + type: str + ramdisk: + description: + - Overrides the AMI's default ramdisk ID. + type: str + kernel: + description: + - a string AKI to override the AMI kernel. + image_id: + description: + - I(ami) ID to use for the instance. One of I(image) or I(image_id) are required when instance is not already present. + - This is an alias for I(image.id). + type: str + security_groups: + description: + - A list of security group IDs or names (strings). + - Mutually exclusive with I(security_group). + type: list + elements: str + default: [] + security_group: + description: + - A security group ID or name. + - Mutually exclusive with I(security_groups). + type: str + name: + description: + - The Name tag for the instance. + type: str + vpc_subnet_id: + description: + - The subnet ID in which to launch the instance (VPC). + - If none is provided, M(amazon.aws.ec2_instance) will chose the default zone of the default VPC. + aliases: ['subnet_id'] + type: str + network: + description: + - Either a dictionary containing the key C(interfaces) corresponding to a list of network interface IDs or + containing specifications for a single network interface. + - Use the M(amazon.aws.ec2_eni) module to create ENIs with special settings. + type: dict + suboptions: + interfaces: + description: + - A list of ENI IDs (strings) or a list of objects containing the key I(id). + type: list + elements: str + assign_public_ip: + description: + - When C(true) assigns a public IP address to the interface. + type: bool + private_ip_address: + description: + - An IPv4 address to assign to the interface. + type: str + ipv6_addresses: + description: + - A list of IPv6 addresses to assign to the network interface. + type: list + elements: str + source_dest_check: + description: + - Controls whether source/destination checking is enabled on the interface. + type: bool + description: + description: + - A description for the network interface. + type: str + private_ip_addresses: + description: + - A list of IPv4 addresses to assign to the network interface. + type: list + elements: str + subnet_id: + description: + - The subnet to connect the network interface to. + type: str + delete_on_termination: + description: + - Delete the interface when the instance it is attached to is + terminated. + type: bool + device_index: + description: + - The index of the interface to modify. + type: int + groups: + description: + - A list of security group IDs to attach to the interface. + type: list + elements: str + volumes: + description: + - A list of block device mappings, by default this will always use the AMI root device so the volumes option is primarily for adding more storage. + - A mapping contains the (optional) keys C(device_name), C(virtual_name), C(ebs.volume_type), C(ebs.volume_size), C(ebs.kms_key_id), + C(ebs.snapshot_id), C(ebs.iops), and C(ebs.delete_on_termination). + - For more information about each parameter, see U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html). + type: list + elements: dict + launch_template: + description: + - The EC2 launch template to base instance configuration on. + type: dict + suboptions: + id: + description: + - The ID of the launch template (optional if name is specified). + type: str + name: + description: + - The pretty name of the launch template (optional if id is specified). + type: str + version: + description: + - The specific version of the launch template to use. If unspecified, the template default is chosen. + key_name: + description: + - Name of the SSH access key to assign to the instance - must exist in the region the instance is created. + - Use M(amazon.aws.ec2_key) to manage SSH keys. + type: str + availability_zone: + description: + - Specify an availability zone to use the default subnet it. Useful if not specifying the I(vpc_subnet_id) parameter. + - If no subnet, ENI, or availability zone is provided, the default subnet in the default VPC will be used in the first AZ (alphabetically sorted). + type: str + instance_initiated_shutdown_behavior: + description: + - Whether to stop or terminate an instance upon shutdown. + choices: ['stop', 'terminate'] + type: str + tenancy: + description: + - What type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges. + choices: ['dedicated', 'default'] + type: str + termination_protection: + description: + - Whether to enable termination protection. + - This module will not terminate an instance with termination protection active, it must be turned off first. + type: bool + hibernation_options: + description: + - Indicates whether an instance is enabled for hibernation. + Refer U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html) + for Hibernation prerequisits. + type: bool + default: False + version_added: 5.0.0 + cpu_credit_specification: + description: + - For T series instances, choose whether to allow increased charges to buy CPU credits if the default pool is depleted. + - Choose C(unlimited) to enable buying additional CPU credits. + choices: ['unlimited', 'standard'] + type: str + cpu_options: + description: + - Reduce the number of vCPU exposed to the instance. + - Those parameters can only be set at instance launch. The two suboptions threads_per_core and core_count are mandatory. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for combinations available. + type: dict + suboptions: + threads_per_core: + description: + - Select the number of threads per core to enable. Disable or Enable Intel HT. + choices: [1, 2] + required: true + type: int + core_count: + description: + - Set the number of core to enable. + required: true + type: int + detailed_monitoring: + description: + - Whether to allow detailed CloudWatch metrics to be collected, enabling more detailed alerting. + type: bool + ebs_optimized: + description: + - Whether instance is should use optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html). + type: bool + filters: + description: + - A dict of filters to apply when deciding whether existing instances match and should be altered. Each dict item + consists of a filter key and a filter value. See + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html). + for possible filters. Filter names and values are case sensitive. + - By default, instances are filtered for counting by their "Name" tag, base AMI, state (running, by default), and + subnet ID. Any queryable filter can be used. Good candidates are specific tags, SSH keys, or security groups. + type: dict + iam_instance_profile: + description: + - The ARN or name of an EC2-enabled IAM instance profile to be used. + - If a name is not provided in ARN format then the ListInstanceProfiles permission must also be granted. + U(https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListInstanceProfiles.html) + - If no full ARN is provided, the role with a matching name will be used from the active AWS account. + type: str + aliases: ['instance_role'] + placement_group: + description: + - The placement group that needs to be assigned to the instance. + type: str + metadata_options: + description: + - Modify the metadata options for the instance. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) for more information. + - The two suboptions I(http_endpoint) and I(http_tokens) are supported. + type: dict + version_added: 2.0.0 + suboptions: + http_endpoint: + description: + - Enables or disables the HTTP metadata endpoint on instances. + - If specified a value of disabled, metadata of the instance will not be accessible. + choices: [enabled, disabled] + default: enabled + type: str + http_tokens: + description: + - Set the state of token usage for instance metadata requests. + - If the state is optional (v1 and v2), instance metadata can be retrieved with or without a signed token header on request. + - If the state is required (v2), a signed token header must be sent with any instance metadata retrieval requests. + choices: [optional, required] + default: optional + type: str + http_put_response_hop_limit: + version_added: 4.0.0 + type: int + description: + - The desired HTTP PUT response hop limit for instance metadata requests. + - The larger the number, the further instance metadata requests can travel. + default: 1 + http_protocol_ipv6: + version_added: 4.0.0 + type: str + description: + - Wether the instance metadata endpoint is available via IPv6 (C(enabled)) or not (C(disabled)). + - Requires botocore >= 1.21.29 + choices: [enabled, disabled] + default: 'disabled' + instance_metadata_tags: + version_added: 4.0.0 + type: str + description: + - Wether the instance tags are availble (C(enabled)) via metadata endpoint or not (C(disabled)). + - Requires botocore >= 1.23.30 + choices: [enabled, disabled] + default: 'disabled' + +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Terminate every running instance in a region. Use with EXTREME caution. + amazon.aws.ec2_instance: + state: absent + filters: + instance-state-name: running + +- name: restart a particular instance by its ID + amazon.aws.ec2_instance: + state: restarted + instance_ids: + - i-12345678 + +- name: start an instance with a public IP address + amazon.aws.ec2_instance: + name: "public-compute-instance" + key_name: "prod-ssh-key" + vpc_subnet_id: subnet-5ca1ab1e + instance_type: c5.large + security_group: default + network: + assign_public_ip: true + image_id: ami-123456 + tags: + Environment: Testing + +- name: start an instance and Add EBS + amazon.aws.ec2_instance: + name: "public-withebs-instance" + vpc_subnet_id: subnet-5ca1ab1e + instance_type: t2.micro + key_name: "prod-ssh-key" + security_group: default + volumes: + - device_name: /dev/sda1 + ebs: + volume_size: 16 + delete_on_termination: true + +- name: start an instance and Add EBS volume from a snapshot + amazon.aws.ec2_instance: + name: "public-withebs-instance" + instance_type: t2.micro + image_id: ami-1234567890 + vpc_subnet_id: subnet-5ca1ab1e + volumes: + - device_name: /dev/sda2 + ebs: + snapshot_id: snap-1234567890 + +- name: start an instance with a cpu_options + amazon.aws.ec2_instance: + name: "public-cpuoption-instance" + vpc_subnet_id: subnet-5ca1ab1e + tags: + Environment: Testing + instance_type: c4.large + volumes: + - device_name: /dev/sda1 + ebs: + delete_on_termination: true + cpu_options: + core_count: 1 + threads_per_core: 1 + +- name: start an instance and have it begin a Tower callback on boot + amazon.aws.ec2_instance: + name: "tower-callback-test" + key_name: "prod-ssh-key" + vpc_subnet_id: subnet-5ca1ab1e + security_group: default + tower_callback: + # IP or hostname of tower server + tower_address: 1.2.3.4 + job_template_id: 876 + host_config_key: '[secret config key goes here]' + network: + assign_public_ip: true + image_id: ami-123456 + cpu_credit_specification: unlimited + tags: + SomeThing: "A value" + +- name: start an instance with ENI (An existing ENI ID is required) + amazon.aws.ec2_instance: + name: "public-eni-instance" + key_name: "prod-ssh-key" + vpc_subnet_id: subnet-5ca1ab1e + network: + interfaces: + - id: "eni-12345" + tags: + Env: "eni_on" + volumes: + - device_name: /dev/sda1 + ebs: + delete_on_termination: true + instance_type: t2.micro + image_id: ami-123456 + +- name: add second ENI interface + amazon.aws.ec2_instance: + name: "public-eni-instance" + network: + interfaces: + - id: "eni-12345" + - id: "eni-67890" + image_id: ami-123456 + tags: + Env: "eni_on" + instance_type: t2.micro + +- name: start an instance with metadata options + amazon.aws.ec2_instance: + name: "public-metadataoptions-instance" + vpc_subnet_id: subnet-5calable + instance_type: t3.small + image_id: ami-123456 + tags: + Environment: Testing + metadata_options: + http_endpoint: enabled + http_tokens: optional + +# ensure number of instances running with a tag matches exact_count +- name: start multiple instances + amazon.aws.ec2_instance: + instance_type: t3.small + image_id: ami-123456 + exact_count: 5 + region: us-east-2 + vpc_subnet_id: subnet-0123456 + network: + assign_public_ip: true + security_group: default + tags: + foo: bar + +# launches multiple instances - specific number of instances +- name: start specific number of multiple instances + amazon.aws.ec2_instance: + instance_type: t3.small + image_id: ami-123456 + count: 3 + region: us-east-2 + network: + assign_public_ip: true + security_group: default + vpc_subnet_id: subnet-0123456 + state: present + tags: + foo: bar +""" + +RETURN = r""" +instance_ids: + description: a list of ec2 instance IDs matching the provided specification and filters + returned: always + type: list + sample: ["i-0123456789abcdef0", "i-0123456789abcdef1"] + version_added: 5.3.0 +changed_ids: + description: a list of the set of ec2 instance IDs changed by the module action + returned: when instances that must be present are launched + type: list + sample: ["i-0123456789abcdef0"] + version_added: 5.3.0 +terminated_ids: + description: a list of the set of ec2 instance IDs terminated by the module action + returned: when instances that must be absent are terminated + type: list + sample: ["i-0123456789abcdef1"] + version_added: 5.3.0 +instances: + description: a list of ec2 instances + returned: when wait == true or when matching instances already exist + type: complex + contains: + ami_launch_index: + description: The AMI launch index, which can be used to find this instance in the launch group. + returned: always + type: int + sample: 0 + architecture: + description: The architecture of the image + returned: always + type: str + sample: x86_64 + block_device_mappings: + description: Any block device mapping entries for the instance. + returned: always + type: complex + contains: + device_name: + description: The device name exposed to the instance (for example, /dev/sdh or xvdh). + returned: always + type: str + sample: /dev/sdh + ebs: + description: Parameters used to automatically set up EBS volumes when the instance is launched. + returned: always + type: complex + contains: + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2017-03-23T22:51:24+00:00" + delete_on_termination: + description: Indicates whether the volume is deleted on instance termination. + returned: always + type: bool + sample: true + status: + description: The attachment state. + returned: always + type: str + sample: attached + volume_id: + description: The ID of the EBS volume + returned: always + type: str + sample: vol-12345678 + client_token: + description: The idempotency token you provided when you launched the instance, if applicable. + returned: always + type: str + sample: mytoken + ebs_optimized: + description: Indicates whether the instance is optimized for EBS I/O. + returned: always + type: bool + sample: false + hypervisor: + description: The hypervisor type of the instance. + returned: always + type: str + sample: xen + iam_instance_profile: + description: The IAM instance profile associated with the instance, if applicable. + returned: always + type: complex + contains: + arn: + description: The Amazon Resource Name (ARN) of the instance profile. + returned: always + type: str + sample: "arn:aws:iam::123456789012:instance-profile/myprofile" + id: + description: The ID of the instance profile + returned: always + type: str + sample: JFJ397FDG400FG9FD1N + image_id: + description: The ID of the AMI used to launch the instance. + returned: always + type: str + sample: ami-0011223344 + instance_id: + description: The ID of the instance. + returned: always + type: str + sample: i-012345678 + instance_type: + description: The instance type size of the running instance. + returned: always + type: str + sample: t2.micro + key_name: + description: The name of the key pair, if this instance was launched with an associated key pair. + returned: always + type: str + sample: my-key + launch_time: + description: The time the instance was launched. + returned: always + type: str + sample: "2017-03-23T22:51:24+00:00" + monitoring: + description: The monitoring for the instance. + returned: always + type: complex + contains: + state: + description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled. + returned: always + type: str + sample: disabled + network_interfaces: + description: One or more network interfaces for the instance. + returned: always + type: complex + contains: + association: + description: The association information for an Elastic IPv4 associated with the network interface. + returned: always + type: complex + contains: + ip_owner_id: + description: The ID of the owner of the Elastic IP address. + returned: always + type: str + sample: amazon + public_dns_name: + description: The public DNS name. + returned: always + type: str + sample: "" + public_ip: + description: The public IP address or Elastic IP address bound to the network interface. + returned: always + type: str + sample: 1.2.3.4 + attachment: + description: The network interface attachment. + returned: always + type: complex + contains: + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2017-03-23T22:51:24+00:00" + attachment_id: + description: The ID of the network interface attachment. + returned: always + type: str + sample: eni-attach-3aff3f + delete_on_termination: + description: Indicates whether the network interface is deleted when the instance is terminated. + returned: always + type: bool + sample: true + device_index: + description: The index of the device on the instance for the network interface attachment. + returned: always + type: int + sample: 0 + status: + description: The attachment state. + returned: always + type: str + sample: attached + description: + description: The description. + returned: always + type: str + sample: My interface + groups: + description: One or more security groups. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-abcdef12 + group_name: + description: The name of the security group. + returned: always + type: str + sample: mygroup + ipv6_addresses: + description: One or more IPv6 addresses associated with the network interface. + returned: always + type: list + elements: dict + contains: + ipv6_address: + description: The IPv6 address. + returned: always + type: str + sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + mac_address: + description: The MAC address. + returned: always + type: str + sample: "00:11:22:33:44:55" + network_interface_id: + description: The ID of the network interface. + returned: always + type: str + sample: eni-01234567 + owner_id: + description: The AWS account ID of the owner of the network interface. + returned: always + type: str + sample: 01234567890 + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + private_ip_addresses: + description: The private IPv4 addresses associated with the network interface. + returned: always + type: list + elements: dict + contains: + association: + description: The association information for an Elastic IP address (IPv4) associated with the network interface. + returned: always + type: complex + contains: + ip_owner_id: + description: The ID of the owner of the Elastic IP address. + returned: always + type: str + sample: amazon + public_dns_name: + description: The public DNS name. + returned: always + type: str + sample: "" + public_ip: + description: The public IP address or Elastic IP address bound to the network interface. + returned: always + type: str + sample: 1.2.3.4 + primary: + description: Indicates whether this IPv4 address is the primary private IP address of the network interface. + returned: always + type: bool + sample: true + private_ip_address: + description: The private IPv4 address of the network interface. + returned: always + type: str + sample: 10.0.0.1 + source_dest_check: + description: Indicates whether source/destination checking is enabled. + returned: always + type: bool + sample: true + status: + description: The status of the network interface. + returned: always + type: str + sample: in-use + subnet_id: + description: The ID of the subnet for the network interface. + returned: always + type: str + sample: subnet-0123456 + vpc_id: + description: The ID of the VPC for the network interface. + returned: always + type: str + sample: vpc-0123456 + placement: + description: The location where the instance launched, if applicable. + returned: always + type: complex + contains: + availability_zone: + description: The Availability Zone of the instance. + returned: always + type: str + sample: ap-southeast-2a + group_name: + description: The name of the placement group the instance is in (for cluster compute instances). + returned: always + type: str + sample: "" + tenancy: + description: The tenancy of the instance (if the instance is running in a VPC). + returned: always + type: str + sample: default + private_dns_name: + description: The private DNS name. + returned: always + type: str + sample: ip-10-0-0-1.ap-southeast-2.compute.internal + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + product_codes: + description: One or more product codes. + returned: always + type: list + elements: dict + contains: + product_code_id: + description: The product code. + returned: always + type: str + sample: aw0evgkw8ef3n2498gndfgasdfsd5cce + product_code_type: + description: The type of product code. + returned: always + type: str + sample: marketplace + public_dns_name: + description: The public DNS name assigned to the instance. + returned: always + type: str + sample: + public_ip_address: + description: The public IPv4 address assigned to the instance + returned: always + type: str + sample: 52.0.0.1 + root_device_name: + description: The device name of the root device + returned: always + type: str + sample: /dev/sda1 + root_device_type: + description: The type of root device used by the AMI. + returned: always + type: str + sample: ebs + security_groups: + description: One or more security groups for the instance. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-0123456 + group_name: + description: The name of the security group. + returned: always + type: str + sample: my-security-group + network.source_dest_check: + description: Indicates whether source/destination checking is enabled. + returned: always + type: bool + sample: true + state: + description: The current state of the instance. + returned: always + type: complex + contains: + code: + description: The low byte represents the state. + returned: always + type: int + sample: 16 + name: + description: The name of the state. + returned: always + type: str + sample: running + state_transition_reason: + description: The reason for the most recent state transition. + returned: always + type: str + sample: + subnet_id: + description: The ID of the subnet in which the instance is running. + returned: always + type: str + sample: subnet-00abcdef + tags: + description: Any tags assigned to the instance. + returned: always + type: dict + sample: + virtualization_type: + description: The type of virtualization of the AMI. + returned: always + type: str + sample: hvm + vpc_id: + description: The ID of the VPC the instance is in. + returned: always + type: dict + sample: vpc-0011223344 +""" + +from collections import namedtuple +import time +import uuid + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + + +from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible.module_utils.six import string_types + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.core import parse_aws_arn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.tower import tower_callback_script + +module = None + + +def build_volume_spec(params): + volumes = params.get('volumes') or [] + for volume in volumes: + if 'ebs' in volume: + for int_value in ['volume_size', 'iops']: + if int_value in volume['ebs']: + volume['ebs'][int_value] = int(volume['ebs'][int_value]) + if 'volume_type' in volume['ebs'] and volume['ebs']['volume_type'] == 'gp3': + if not volume['ebs'].get('iops'): + volume['ebs']['iops'] = 3000 + if 'throughput' in volume['ebs']: + volume['ebs']['throughput'] = int(volume['ebs']['throughput']) + else: + volume['ebs']['throughput'] = 125 + + return [snake_dict_to_camel_dict(v, capitalize_first=True) for v in volumes] + + +def add_or_update_instance_profile(instance, desired_profile_name): + instance_profile_setting = instance.get('IamInstanceProfile') + if instance_profile_setting and desired_profile_name: + if desired_profile_name in (instance_profile_setting.get('Name'), instance_profile_setting.get('Arn')): + # great, the profile we asked for is what's there + return False + else: + desired_arn = determine_iam_role(desired_profile_name) + if instance_profile_setting.get('Arn') == desired_arn: + return False + + # update association + try: + association = client.describe_iam_instance_profile_associations( + aws_retry=True, + Filters=[{'Name': 'instance-id', 'Values': [instance['InstanceId']]}]) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + # check for InvalidAssociationID.NotFound + module.fail_json_aws(e, "Could not find instance profile association") + try: + client.replace_iam_instance_profile_association( + aws_retry=True, + AssociationId=association['IamInstanceProfileAssociations'][0]['AssociationId'], + IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)} + ) + return True + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e, "Could not associate instance profile") + + if not instance_profile_setting and desired_profile_name: + # create association + try: + client.associate_iam_instance_profile( + aws_retry=True, + IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)}, + InstanceId=instance['InstanceId'] + ) + return True + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, "Could not associate new instance profile") + + return False + + +def build_network_spec(params): + """ + Returns list of interfaces [complex] + Interface type: { + 'AssociatePublicIpAddress': True|False, + 'DeleteOnTermination': True|False, + 'Description': 'string', + 'DeviceIndex': 123, + 'Groups': [ + 'string', + ], + 'Ipv6AddressCount': 123, + 'Ipv6Addresses': [ + { + 'Ipv6Address': 'string' + }, + ], + 'NetworkInterfaceId': 'string', + 'PrivateIpAddress': 'string', + 'PrivateIpAddresses': [ + { + 'Primary': True|False, + 'PrivateIpAddress': 'string' + }, + ], + 'SecondaryPrivateIpAddressCount': 123, + 'SubnetId': 'string' + }, + """ + + interfaces = [] + network = params.get('network') or {} + if not network.get('interfaces'): + # they only specified one interface + spec = { + 'DeviceIndex': 0, + } + if network.get('assign_public_ip') is not None: + spec['AssociatePublicIpAddress'] = network['assign_public_ip'] + + if params.get('vpc_subnet_id'): + spec['SubnetId'] = params['vpc_subnet_id'] + else: + default_vpc = get_default_vpc() + if default_vpc is None: + module.fail_json( + msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to create an instance") + else: + sub = get_default_subnet(default_vpc, availability_zone=module.params.get('availability_zone')) + spec['SubnetId'] = sub['SubnetId'] + + if network.get('private_ip_address'): + spec['PrivateIpAddress'] = network['private_ip_address'] + + if params.get('security_group') or params.get('security_groups'): + groups = discover_security_groups( + group=params.get('security_group'), + groups=params.get('security_groups'), + subnet_id=spec['SubnetId'], + ) + spec['Groups'] = groups + if network.get('description') is not None: + spec['Description'] = network['description'] + # TODO more special snowflake network things + + return [spec] + + # handle list of `network.interfaces` options + for idx, interface_params in enumerate(network.get('interfaces', [])): + spec = { + 'DeviceIndex': idx, + } + + if isinstance(interface_params, string_types): + # naive case where user gave + # network_interfaces: [eni-1234, eni-4567, ....] + # put into normal data structure so we don't dupe code + interface_params = {'id': interface_params} + + if interface_params.get('id') is not None: + # if an ID is provided, we don't want to set any other parameters. + spec['NetworkInterfaceId'] = interface_params['id'] + interfaces.append(spec) + continue + + spec['DeleteOnTermination'] = interface_params.get('delete_on_termination', True) + + if interface_params.get('ipv6_addresses'): + spec['Ipv6Addresses'] = [{'Ipv6Address': a} for a in interface_params.get('ipv6_addresses', [])] + + if interface_params.get('private_ip_address'): + spec['PrivateIpAddress'] = interface_params.get('private_ip_address') + + if interface_params.get('description'): + spec['Description'] = interface_params.get('description') + + if interface_params.get('subnet_id', params.get('vpc_subnet_id')): + spec['SubnetId'] = interface_params.get('subnet_id', params.get('vpc_subnet_id')) + elif not spec.get('SubnetId') and not interface_params['id']: + # TODO grab a subnet from default VPC + raise ValueError('Failed to assign subnet to interface {0}'.format(interface_params)) + + interfaces.append(spec) + return interfaces + + +def warn_if_public_ip_assignment_changed(instance): + # This is a non-modifiable attribute. + assign_public_ip = (module.params.get('network') or {}).get('assign_public_ip') + if assign_public_ip is None: + return + + # Check that public ip assignment is the same and warn if not + public_dns_name = instance.get('PublicDnsName') + if (public_dns_name and not assign_public_ip) or (assign_public_ip and not public_dns_name): + module.warn( + "Unable to modify public ip assignment to {0} for instance {1}. " + "Whether or not to assign a public IP is determined during instance creation.".format( + assign_public_ip, instance['InstanceId'])) + + +def warn_if_cpu_options_changed(instance): + # This is a non-modifiable attribute. + cpu_options = module.params.get('cpu_options') + if cpu_options is None: + return + + # Check that the CpuOptions set are the same and warn if not + core_count_curr = instance['CpuOptions'].get('CoreCount') + core_count = cpu_options.get('core_count') + threads_per_core_curr = instance['CpuOptions'].get('ThreadsPerCore') + threads_per_core = cpu_options.get('threads_per_core') + if core_count_curr != core_count: + module.warn( + "Unable to modify core_count from {0} to {1}. " + "Assigning a number of core is determinted during instance creation".format( + core_count_curr, core_count)) + + if threads_per_core_curr != threads_per_core: + module.warn( + "Unable to modify threads_per_core from {0} to {1}. " + "Assigning a number of threads per core is determined during instance creation.".format( + threads_per_core_curr, threads_per_core)) + + +def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None): + + if subnet_id is not None: + try: + sub = client.describe_subnets(aws_retry=True, SubnetIds=[subnet_id]) + except is_boto3_error_code('InvalidGroup.NotFound'): + module.fail_json( + "Could not find subnet {0} to associate security groups. Please check the vpc_subnet_id and security_groups parameters.".format( + subnet_id + ) + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id)) + parent_vpc_id = sub['Subnets'][0]['VpcId'] + + if group: + return get_ec2_security_group_ids_from_names(group, client, vpc_id=parent_vpc_id) + if groups: + return get_ec2_security_group_ids_from_names(groups, client, vpc_id=parent_vpc_id) + return [] + + +def build_userdata(params): + if params.get('user_data') is not None: + return {'UserData': to_native(params.get('user_data'))} + if params.get('aap_callback'): + userdata = tower_callback_script( + tower_address=params.get("aap_callback").get("tower_address"), + job_template_id=params.get("aap_callback").get("job_template_id"), + host_config_key=params.get("aap_callback").get("host_config_key"), + windows=params.get("aap_callback").get("windows"), + passwd=params.get("aap_callback").get("set_password"), + ) + return {'UserData': userdata} + return {} + + +def build_top_level_options(params): + spec = {} + if params.get('image_id'): + spec['ImageId'] = params['image_id'] + elif isinstance(params.get('image'), dict): + image = params.get('image', {}) + spec['ImageId'] = image.get('id') + if 'ramdisk' in image: + spec['RamdiskId'] = image['ramdisk'] + if 'kernel' in image: + spec['KernelId'] = image['kernel'] + if not spec.get('ImageId') and not params.get('launch_template'): + module.fail_json(msg="You must include an image_id or image.id parameter to create an instance, or use a launch_template.") + + if params.get('key_name') is not None: + spec['KeyName'] = params.get('key_name') + + spec.update(build_userdata(params)) + + if params.get('launch_template') is not None: + spec['LaunchTemplate'] = {} + if not params.get('launch_template').get('id') and not params.get('launch_template').get('name'): + module.fail_json(msg="Could not create instance with launch template. Either launch_template.name or launch_template.id parameters are required") + + if params.get('launch_template').get('id') is not None: + spec['LaunchTemplate']['LaunchTemplateId'] = params.get('launch_template').get('id') + if params.get('launch_template').get('name') is not None: + spec['LaunchTemplate']['LaunchTemplateName'] = params.get('launch_template').get('name') + if params.get('launch_template').get('version') is not None: + spec['LaunchTemplate']['Version'] = to_native(params.get('launch_template').get('version')) + + if params.get('detailed_monitoring', False): + spec['Monitoring'] = {'Enabled': True} + if params.get('cpu_credit_specification') is not None: + spec['CreditSpecification'] = {'CpuCredits': params.get('cpu_credit_specification')} + if params.get('tenancy') is not None: + spec['Placement'] = {'Tenancy': params.get('tenancy')} + if params.get('placement_group'): + if 'Placement' in spec: + spec['Placement']['GroupName'] = str(params.get('placement_group')) + else: + spec.setdefault('Placement', {'GroupName': str(params.get('placement_group'))}) + if params.get('ebs_optimized') is not None: + spec['EbsOptimized'] = params.get('ebs_optimized') + if params.get('instance_initiated_shutdown_behavior'): + spec['InstanceInitiatedShutdownBehavior'] = params.get('instance_initiated_shutdown_behavior') + if params.get('termination_protection') is not None: + spec['DisableApiTermination'] = params.get('termination_protection') + if params.get('hibernation_options') and params.get('volumes'): + for vol in params['volumes']: + if vol.get('ebs') and vol['ebs'].get('encrypted'): + spec['HibernationOptions'] = {'Configured': True} + else: + module.fail_json( + msg="Hibernation prerequisites not satisfied. Refer {0}".format( + "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html") + ) + if params.get('cpu_options') is not None: + spec['CpuOptions'] = {} + spec['CpuOptions']['ThreadsPerCore'] = params.get('cpu_options').get('threads_per_core') + spec['CpuOptions']['CoreCount'] = params.get('cpu_options').get('core_count') + if params.get('metadata_options'): + spec['MetadataOptions'] = {} + spec['MetadataOptions']['HttpEndpoint'] = params.get( + 'metadata_options').get('http_endpoint') + spec['MetadataOptions']['HttpTokens'] = params.get( + 'metadata_options').get('http_tokens') + spec['MetadataOptions']['HttpPutResponseHopLimit'] = params.get( + 'metadata_options').get('http_put_response_hop_limit') + + if not module.botocore_at_least('1.23.30'): + # fail only if enabled is requested + if params.get('metadata_options').get('instance_metadata_tags') == 'enabled': + module.require_botocore_at_least('1.23.30', reason='to set instance_metadata_tags') + else: + spec['MetadataOptions']['InstanceMetadataTags'] = params.get( + 'metadata_options').get('instance_metadata_tags') + + if not module.botocore_at_least('1.21.29'): + # fail only if enabled is requested + if params.get('metadata_options').get('http_protocol_ipv6') == 'enabled': + module.require_botocore_at_least('1.21.29', reason='to set http_protocol_ipv6') + else: + spec['MetadataOptions']['HttpProtocolIpv6'] = params.get( + 'metadata_options').get('http_protocol_ipv6') + + return spec + + +def build_instance_tags(params, propagate_tags_to_volumes=True): + tags = params.get('tags') or {} + if params.get('name') is not None: + tags['Name'] = params.get('name') + specs = boto3_tag_specifications(tags, ['volume', 'instance']) + return specs + + +def build_run_instance_spec(params): + + spec = dict( + ClientToken=uuid.uuid4().hex, + MaxCount=1, + MinCount=1, + ) + spec.update(**build_top_level_options(params)) + + spec['NetworkInterfaces'] = build_network_spec(params) + spec['BlockDeviceMappings'] = build_volume_spec(params) + + tag_spec = build_instance_tags(params) + if tag_spec is not None: + spec['TagSpecifications'] = tag_spec + + # IAM profile + if params.get('iam_instance_profile'): + spec['IamInstanceProfile'] = dict(Arn=determine_iam_role(params.get('iam_instance_profile'))) + + if params.get('exact_count'): + spec['MaxCount'] = params.get('to_launch') + spec['MinCount'] = params.get('to_launch') + + if params.get('count'): + spec['MaxCount'] = params.get('count') + spec['MinCount'] = params.get('count') + + if not params.get('launch_template'): + spec['InstanceType'] = params['instance_type'] if params.get('instance_type') else 't2.micro' + + if params.get('launch_template') and params.get('instance_type'): + spec['InstanceType'] = params['instance_type'] + + return spec + + +def await_instances(ids, desired_module_state='present', force_wait=False): + if not module.params.get('wait', True) and not force_wait: + # the user asked not to wait for anything + return + + if module.check_mode: + # In check mode, there is no change even if you wait. + return + + # Map ansible state to boto3 waiter type + state_to_boto3_waiter = { + 'present': 'instance_exists', + 'started': 'instance_status_ok', + 'running': 'instance_running', + 'stopped': 'instance_stopped', + 'restarted': 'instance_status_ok', + 'rebooted': 'instance_running', + 'terminated': 'instance_terminated', + 'absent': 'instance_terminated', + } + if desired_module_state not in state_to_boto3_waiter: + module.fail_json(msg="Cannot wait for state {0}, invalid state".format(desired_module_state)) + boto3_waiter_type = state_to_boto3_waiter[desired_module_state] + waiter = client.get_waiter(boto3_waiter_type) + try: + waiter.wait( + InstanceIds=ids, + WaiterConfig={ + 'Delay': 15, + 'MaxAttempts': module.params.get('wait_timeout', 600) // 15, + } + ) + except botocore.exceptions.WaiterConfigError as e: + module.fail_json(msg="{0}. Error waiting for instances {1} to reach state {2}".format( + to_native(e), ', '.join(ids), boto3_waiter_type)) + except botocore.exceptions.WaiterError as e: + module.warn("Instances {0} took too long to reach state {1}. {2}".format( + ', '.join(ids), boto3_waiter_type, to_native(e))) + + +def diff_instance_and_params(instance, params, skip=None): + """boto3 instance obj, module params""" + + if skip is None: + skip = [] + + changes_to_apply = [] + id_ = instance['InstanceId'] + + ParamMapper = namedtuple('ParamMapper', ['param_key', 'instance_key', 'attribute_name', 'add_value']) + + def value_wrapper(v): + return {'Value': v} + + param_mappings = [ + ParamMapper('ebs_optimized', 'EbsOptimized', 'ebsOptimized', value_wrapper), + ParamMapper('termination_protection', 'DisableApiTermination', 'disableApiTermination', value_wrapper), + # user data is an immutable property + # ParamMapper('user_data', 'UserData', 'userData', value_wrapper), + ] + + for mapping in param_mappings: + if params.get(mapping.param_key) is None: + continue + if mapping.instance_key in skip: + continue + + try: + value = client.describe_instance_attribute(aws_retry=True, Attribute=mapping.attribute_name, InstanceId=id_) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not describe attribute {0} for instance {1}".format(mapping.attribute_name, id_)) + if value[mapping.instance_key]['Value'] != params.get(mapping.param_key): + arguments = dict( + InstanceId=instance['InstanceId'], + # Attribute=mapping.attribute_name, + ) + arguments[mapping.instance_key] = mapping.add_value(params.get(mapping.param_key)) + changes_to_apply.append(arguments) + + if params.get('security_group') or params.get('security_groups'): + try: + value = client.describe_instance_attribute(aws_retry=True, Attribute="groupSet", InstanceId=id_) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not describe attribute groupSet for instance {0}".format(id_)) + # managing security groups + if params.get('vpc_subnet_id'): + subnet_id = params.get('vpc_subnet_id') + else: + default_vpc = get_default_vpc() + if default_vpc is None: + module.fail_json( + msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to modify security groups.") + else: + sub = get_default_subnet(default_vpc) + subnet_id = sub['SubnetId'] + + groups = discover_security_groups( + group=params.get('security_group'), + groups=params.get('security_groups'), + subnet_id=subnet_id, + ) + expected_groups = groups + instance_groups = [g['GroupId'] for g in value['Groups']] + if set(instance_groups) != set(expected_groups): + changes_to_apply.append(dict( + Groups=expected_groups, + InstanceId=instance['InstanceId'] + )) + + if (params.get('network') or {}).get('source_dest_check') is not None: + # network.source_dest_check is nested, so needs to be treated separately + check = bool(params.get('network').get('source_dest_check')) + if instance['SourceDestCheck'] != check: + changes_to_apply.append(dict( + InstanceId=instance['InstanceId'], + SourceDestCheck={'Value': check}, + )) + + return changes_to_apply + + +def change_network_attachments(instance, params): + if (params.get('network') or {}).get('interfaces') is not None: + new_ids = [] + for inty in params.get('network').get('interfaces'): + if isinstance(inty, dict) and 'id' in inty: + new_ids.append(inty['id']) + elif isinstance(inty, string_types): + new_ids.append(inty) + # network.interfaces can create the need to attach new interfaces + old_ids = [inty['NetworkInterfaceId'] for inty in instance['NetworkInterfaces']] + to_attach = set(new_ids) - set(old_ids) + if not module.check_mode: + for eni_id in to_attach: + try: + client.attach_network_interface( + aws_retry=True, + DeviceIndex=new_ids.index(eni_id), + InstanceId=instance["InstanceId"], + NetworkInterfaceId=eni_id, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws( + e, msg=f"Could not attach interface {eni_id} to instance {instance['InstanceId']}" + ) + return bool(len(to_attach)) + return False + + +def find_instances(ids=None, filters=None): + sanitized_filters = dict() + + if ids: + params = dict(InstanceIds=ids) + elif filters is None: + module.fail_json(msg="No filters provided when they were required") + else: + for key in list(filters.keys()): + if not key.startswith("tag:"): + sanitized_filters[key.replace("_", "-")] = filters[key] + else: + sanitized_filters[key] = filters[key] + params = dict(Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)) + + try: + results = _describe_instances(**params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not describe instances") + + retval = list(results) + return retval + + +@AWSRetry.jittered_backoff() +def _describe_instances(**params): + paginator = client.get_paginator('describe_instances') + return paginator.paginate(**params).search('Reservations[].Instances[]') + + +def get_default_vpc(): + try: + vpcs = client.describe_vpcs( + aws_retry=True, + Filters=ansible_dict_to_boto3_filter_list({'isDefault': 'true'})) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not describe default VPC") + if len(vpcs.get('Vpcs', [])): + return vpcs.get('Vpcs')[0] + return None + + +def get_default_subnet(vpc, availability_zone=None): + try: + subnets = client.describe_subnets( + aws_retry=True, + Filters=ansible_dict_to_boto3_filter_list({ + 'vpc-id': vpc['VpcId'], + 'state': 'available', + 'default-for-az': 'true', + }) + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not describe default subnets for VPC {0}".format(vpc['VpcId'])) + if len(subnets.get('Subnets', [])): + if availability_zone is not None: + subs_by_az = dict((subnet['AvailabilityZone'], subnet) for subnet in subnets.get('Subnets')) + if availability_zone in subs_by_az: + return subs_by_az[availability_zone] + + # to have a deterministic sorting order, we sort by AZ so we'll always pick the `a` subnet first + # there can only be one default-for-az subnet per AZ, so the AZ key is always unique in this list + by_az = sorted(subnets.get('Subnets'), key=lambda s: s['AvailabilityZone']) + return by_az[0] + return None + + +def ensure_instance_state(desired_module_state): + """ + Sets return keys depending on the desired instance state + """ + results = dict() + changed = False + if desired_module_state in ('running', 'started'): + _changed, failed, instances, failure_reason = change_instance_state( + filters=module.params.get('filters'), desired_module_state=desired_module_state) + changed |= bool(len(_changed)) + + if failed: + module.fail_json( + msg="Unable to start instances: {0}".format(failure_reason), + reboot_success=list(_changed), + reboot_failed=failed) + + results = dict( + msg='Instances started', + start_success=list(_changed), + start_failed=[], + # Avoid breaking things 'reboot' is wrong but used to be returned + reboot_success=list(_changed), + reboot_failed=[], + changed=changed, + instances=[pretty_instance(i) for i in instances], + ) + elif desired_module_state in ('restarted', 'rebooted'): + # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-reboot.html + # The Ansible behaviour of issuing a stop/start has a minor impact on user billing + # This will need to be changelogged if we ever change to client.reboot_instance + _changed, failed, instances, failure_reason = change_instance_state( + filters=module.params.get('filters'), + desired_module_state='stopped', + ) + + if failed: + module.fail_json( + msg="Unable to stop instances: {0}".format(failure_reason), + stop_success=list(_changed), + stop_failed=failed) + + changed |= bool(len(_changed)) + _changed, failed, instances, failure_reason = change_instance_state( + filters=module.params.get('filters'), + desired_module_state=desired_module_state, + ) + changed |= bool(len(_changed)) + + if failed: + module.fail_json( + msg="Unable to restart instances: {0}".format(failure_reason), + reboot_success=list(_changed), + reboot_failed=failed) + + results = dict( + msg='Instances restarted', + reboot_success=list(_changed), + changed=changed, + reboot_failed=[], + instances=[pretty_instance(i) for i in instances], + ) + elif desired_module_state in ('stopped',): + _changed, failed, instances, failure_reason = change_instance_state( + filters=module.params.get('filters'), + desired_module_state=desired_module_state, + ) + changed |= bool(len(_changed)) + + if failed: + module.fail_json( + msg="Unable to stop instances: {0}".format(failure_reason), + stop_success=list(_changed), + stop_failed=failed) + + results = dict( + msg='Instances stopped', + stop_success=list(_changed), + changed=changed, + stop_failed=[], + instances=[pretty_instance(i) for i in instances], + ) + elif desired_module_state in ('absent', 'terminated'): + terminated, terminate_failed, instances, failure_reason = change_instance_state( + filters=module.params.get('filters'), + desired_module_state=desired_module_state, + ) + + if terminate_failed: + module.fail_json( + msg="Unable to terminate instances: {0}".format(failure_reason), + terminate_success=list(terminated), + terminate_failed=terminate_failed) + results = dict( + msg='Instances terminated', + terminate_success=list(terminated), + changed=bool(len(terminated)), + terminate_failed=[], + instances=[pretty_instance(i) for i in instances], + ) + return results + + +def change_instance_state(filters, desired_module_state): + + # Map ansible state to ec2 state + ec2_instance_states = { + 'present': 'running', + 'started': 'running', + 'running': 'running', + 'stopped': 'stopped', + 'restarted': 'running', + 'rebooted': 'running', + 'terminated': 'terminated', + 'absent': 'terminated', + } + desired_ec2_state = ec2_instance_states[desired_module_state] + changed = set() + instances = find_instances(filters=filters) + to_change = set(i['InstanceId'] for i in instances if i['State']['Name'] != desired_ec2_state) + unchanged = set() + failure_reason = "" + + for inst in instances: + try: + if desired_ec2_state == 'terminated': + # Before terminating an instance we need for them to leave + # 'pending' or 'stopping' (if they're in those states) + if inst['State']['Name'] == 'stopping': + await_instances([inst['InstanceId']], desired_module_state='stopped', force_wait=True) + elif inst['State']['Name'] == 'pending': + await_instances([inst['InstanceId']], desired_module_state='running', force_wait=True) + + if module.check_mode: + changed.add(inst['InstanceId']) + continue + + # TODO use a client-token to prevent double-sends of these start/stop/terminate commands + # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html + resp = client.terminate_instances(aws_retry=True, InstanceIds=[inst['InstanceId']]) + [changed.add(i['InstanceId']) for i in resp['TerminatingInstances']] + if desired_ec2_state == 'stopped': + # Before stopping an instance we need for them to leave + # 'pending' + if inst['State']['Name'] == 'pending': + await_instances([inst['InstanceId']], desired_module_state='running', force_wait=True) + # Already moving to the relevant state + elif inst['State']['Name'] in ('stopping', 'stopped'): + unchanged.add(inst['InstanceId']) + continue + + if module.check_mode: + changed.add(inst['InstanceId']) + continue + resp = client.stop_instances(aws_retry=True, InstanceIds=[inst['InstanceId']]) + [changed.add(i['InstanceId']) for i in resp['StoppingInstances']] + if desired_ec2_state == 'running': + if inst['State']['Name'] in ('pending', 'running'): + unchanged.add(inst['InstanceId']) + continue + elif inst['State']['Name'] == 'stopping': + await_instances([inst['InstanceId']], desired_module_state='stopped', force_wait=True) + + if module.check_mode: + changed.add(inst['InstanceId']) + continue + + resp = client.start_instances(aws_retry=True, InstanceIds=[inst['InstanceId']]) + [changed.add(i['InstanceId']) for i in resp['StartingInstances']] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + try: + failure_reason = to_native(e.message) + except AttributeError: + failure_reason = to_native(e) + + if changed: + await_instances(ids=list(changed) + list(unchanged), desired_module_state=desired_module_state) + + change_failed = list(to_change - changed) + + if instances: + instances = find_instances(ids=list(i['InstanceId'] for i in instances)) + return changed, change_failed, instances, failure_reason + + +def pretty_instance(i): + instance = camel_dict_to_snake_dict(i, ignore_list=['Tags']) + instance['tags'] = boto3_tag_list_to_ansible_dict(i.get('Tags', {})) + return instance + + +def determine_iam_role(name_or_arn): + result = parse_aws_arn(name_or_arn) + if result and result['service'] == 'iam' and result['resource'].startswith('instance-profile/'): + return name_or_arn + iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + try: + role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) + return role['InstanceProfile']['Arn'] + except is_boto3_error_code('NoSuchEntity') as e: + module.fail_json_aws(e, msg="Could not find iam_instance_profile {0}".format(name_or_arn)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="An error occurred while searching for iam_instance_profile {0}. Please try supplying the full ARN.".format(name_or_arn)) + + +def handle_existing(existing_matches, state): + tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') + name = module.params.get('name') + + # Name is a tag rather than a direct parameter, we need to inject 'Name' + # into tags, but since tags isn't explicitly passed we'll treat it not being + # set as purge_tags == False + if name: + if tags is None: + purge_tags = False + tags = {} + tags.update({'Name': name}) + + changed = False + all_changes = list() + + for instance in existing_matches: + changed |= ensure_ec2_tags(client, module, instance['InstanceId'], tags=tags, purge_tags=purge_tags) + changes = diff_instance_and_params(instance, module.params) + for c in changes: + if not module.check_mode: + try: + client.modify_instance_attribute(aws_retry=True, **c) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not apply change {0} to existing instance.".format(str(c))) + all_changes.extend(changes) + changed |= bool(changes) + changed |= add_or_update_instance_profile(existing_matches[0], module.params.get('iam_instance_profile')) + changed |= change_network_attachments(existing_matches[0], module.params) + + altered = find_instances(ids=[i['InstanceId'] for i in existing_matches]) + alter_config_result = dict( + changed=changed, + instances=[pretty_instance(i) for i in altered], + instance_ids=[i['InstanceId'] for i in altered], + changes=changes, + ) + + state_results = ensure_instance_state(state) + alter_config_result['changed'] |= state_results.pop('changed', False) + result = {**state_results, **alter_config_result} + + return result + + +def enforce_count(existing_matches, module, desired_module_state): + exact_count = module.params.get('exact_count') + + try: + current_count = len(existing_matches) + if current_count == exact_count: + module.exit_json( + changed=False, + instances=[pretty_instance(i) for i in existing_matches], + instance_ids=[i["InstanceId"] for i in existing_matches], + msg=f"{exact_count} instances already running, nothing to do.", + ) + + elif current_count < exact_count: + to_launch = exact_count - current_count + module.params['to_launch'] = to_launch + # launch instances + try: + ensure_present(existing_matches=existing_matches, desired_module_state=desired_module_state) + except botocore.exceptions.ClientError as e: + module.fail_json(e, msg='Unable to launch instances') + elif current_count > exact_count: + to_terminate = current_count - exact_count + # sort the instances from least recent to most recent based on launch time + existing_matches = sorted(existing_matches, key=lambda inst: inst['LaunchTime']) + # get the instance ids of instances with the count tag on them + all_instance_ids = [x['InstanceId'] for x in existing_matches] + terminate_ids = all_instance_ids[0:to_terminate] + if module.check_mode: + module.exit_json( + changed=True, + terminated_ids=terminate_ids, + instance_ids=all_instance_ids, + msg=f"Would have terminated following instances if not in check mode {terminate_ids}", + ) + # terminate instances + try: + client.terminate_instances(aws_retry=True, InstanceIds=terminate_ids) + await_instances(terminate_ids, desired_module_state='terminated', force_wait=True) + except is_boto3_error_code('InvalidInstanceID.NotFound'): + pass + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json(e, msg='Unable to terminate instances') + # include data for all matched instances in addition to the list of terminations + # allowing for recovery of metadata from the destructive operation + module.exit_json( + changed=True, + msg='Successfully terminated instances.', + terminated_ids=terminate_ids, + instance_ids=all_instance_ids, + instances=existing_matches, + ) + + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to enforce instance count") + + +def ensure_present(existing_matches, desired_module_state): + tags = dict(module.params.get('tags') or {}) + name = module.params.get('name') + if name: + tags['Name'] = name + + try: + instance_spec = build_run_instance_spec(module.params) + # If check mode is enabled,suspend 'ensure function'. + if module.check_mode: + if existing_matches: + instance_ids = [x["InstanceId"] for x in existing_matches] + module.exit_json( + changed=True, + instance_ids=instance_ids, + instances=existing_matches, + spec=instance_spec, + msg="Would have launched instances if not in check_mode.", + ) + else: + module.exit_json( + changed=True, + spec=instance_spec, + msg="Would have launched instances if not in check_mode.", + ) + instance_response = run_instances(**instance_spec) + instances = instance_response['Instances'] + instance_ids = [i['InstanceId'] for i in instances] + + # Wait for instances to exist in the EC2 API before + # attempting to modify them + await_instances(instance_ids, desired_module_state='present', force_wait=True) + + for ins in instances: + # Wait for instances to exist (don't check state) + try: + AWSRetry.jittered_backoff( + catch_extra_error_codes=['InvalidInstanceID.NotFound'], + )( + client.describe_instance_status + )( + InstanceIds=[ins['InstanceId']], + IncludeAllInstances=True, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to fetch status of new EC2 instance") + changes = diff_instance_and_params(ins, module.params, skip=['UserData', 'EbsOptimized']) + for c in changes: + try: + client.modify_instance_attribute(aws_retry=True, **c) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e, msg="Could not apply change {0} to new instance.".format(str(c))) + if existing_matches: + # If we came from enforce_count, create a second list to distinguish + # between existing and new instances when returning the entire cohort + all_instance_ids = [x["InstanceId"] for x in existing_matches] + instance_ids + if not module.params.get("wait"): + if existing_matches: + module.exit_json( + changed=True, + changed_ids=instance_ids, + instance_ids=all_instance_ids, + spec=instance_spec, + ) + else: + module.exit_json( + changed=True, + instance_ids=instance_ids, + spec=instance_spec, + ) + await_instances(instance_ids, desired_module_state=desired_module_state) + instances = find_instances(ids=instance_ids) + + if existing_matches: + all_instances = existing_matches + instances + module.exit_json( + changed=True, + changed_ids=instance_ids, + instance_ids=all_instance_ids, + instances=[pretty_instance(i) for i in all_instances], + spec=instance_spec, + ) + else: + module.exit_json( + changed=True, + instance_ids=instance_ids, + instances=[pretty_instance(i) for i in instances], + spec=instance_spec, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to create new EC2 instance") + + +def run_instances(**instance_spec): + try: + return client.run_instances(aws_retry=True, **instance_spec) + except is_boto3_error_message('Invalid IAM Instance Profile ARN'): + # If the instance profile has just been created, it takes some time to be visible by ec2 + # So we wait 10 second and retry the run_instances + time.sleep(10) + return client.run_instances(aws_retry=True, **instance_spec) + + +def build_filters(): + filters = { + # all states except shutting-down and terminated + 'instance-state-name': ['pending', 'running', 'stopping', 'stopped'], + } + if isinstance(module.params.get('instance_ids'), string_types): + filters['instance-id'] = [module.params.get('instance_ids')] + elif isinstance(module.params.get('instance_ids'), list) and len(module.params.get('instance_ids')): + filters['instance-id'] = module.params.get('instance_ids') + else: + if not module.params.get('vpc_subnet_id'): + if module.params.get('network'): + # grab AZ from one of the ENIs + ints = module.params.get('network').get('interfaces') + if ints: + filters['network-interface.network-interface-id'] = [] + for i in ints: + if isinstance(i, dict): + i = i['id'] + filters['network-interface.network-interface-id'].append(i) + else: + sub = get_default_subnet(get_default_vpc(), availability_zone=module.params.get('availability_zone')) + filters['subnet-id'] = sub['SubnetId'] + else: + filters['subnet-id'] = [module.params.get('vpc_subnet_id')] + + if module.params.get('name'): + filters['tag:Name'] = [module.params.get('name')] + elif module.params.get('tags'): + name_tag = module.params.get('tags').get('Name', None) + if name_tag: + filters['tag:Name'] = [name_tag] + + if module.params.get('image_id'): + filters['image-id'] = [module.params.get('image_id')] + elif (module.params.get('image') or {}).get('id'): + filters['image-id'] = [module.params.get('image', {}).get('id')] + return filters + + +def main(): + global module + global client + + argument_spec = dict( + state=dict(default='present', choices=['present', 'started', 'running', 'stopped', 'restarted', 'rebooted', 'terminated', 'absent']), + wait=dict(default=True, type='bool'), + wait_timeout=dict(default=600, type='int'), + count=dict(type='int'), + exact_count=dict(type='int'), + image=dict(type='dict'), + image_id=dict(type='str'), + instance_type=dict(type='str'), + user_data=dict(type='str'), + aap_callback=dict( + type='dict', aliases=['tower_callback'], + required_if=[ + ('windows', False, ('tower_address', 'job_template_id', 'host_config_key',), False), + ], + options=dict( + windows=dict(type='bool', default=False), + set_password=dict(type='str', no_log=True), + tower_address=dict(type='str'), + job_template_id=dict(type='str'), + host_config_key=dict(type='str', no_log=True), + ), + ), + ebs_optimized=dict(type='bool'), + vpc_subnet_id=dict(type='str', aliases=['subnet_id']), + availability_zone=dict(type='str'), + security_groups=dict(default=[], type='list', elements='str'), + security_group=dict(type='str'), + iam_instance_profile=dict(type='str', aliases=['instance_role']), + name=dict(type='str'), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + filters=dict(type='dict', default=None), + launch_template=dict(type='dict'), + key_name=dict(type='str'), + cpu_credit_specification=dict(type='str', choices=['standard', 'unlimited']), + cpu_options=dict(type='dict', options=dict( + core_count=dict(type='int', required=True), + threads_per_core=dict(type='int', choices=[1, 2], required=True) + )), + tenancy=dict(type='str', choices=['dedicated', 'default']), + placement_group=dict(type='str'), + instance_initiated_shutdown_behavior=dict(type='str', choices=['stop', 'terminate']), + termination_protection=dict(type='bool'), + hibernation_options=dict(type='bool', default=False), + detailed_monitoring=dict(type='bool'), + instance_ids=dict(default=[], type='list', elements='str'), + network=dict(default=None, type='dict'), + volumes=dict(default=None, type='list', elements='dict'), + metadata_options=dict( + type='dict', + options=dict( + http_endpoint=dict(choices=['enabled', 'disabled'], default='enabled'), + http_put_response_hop_limit=dict(type='int', default=1), + http_tokens=dict(choices=['optional', 'required'], default='optional'), + http_protocol_ipv6=dict(choices=['disabled', 'enabled'], default='disabled'), + instance_metadata_tags=dict(choices=['disabled', 'enabled'], default='disabled'), + ) + ), + ) + # running/present are synonyms + # as are terminated/absent + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['security_groups', 'security_group'], + ['availability_zone', 'vpc_subnet_id'], + ['aap_callback', 'user_data'], + ['image_id', 'image'], + ['exact_count', 'count'], + ['exact_count', 'instance_ids'], + ], + supports_check_mode=True + ) + + if not module.params.get('instance_type') and not module.params.get('launch_template'): + if module.params.get('state') not in ('absent', 'stopped'): + if module.params.get('count') or module.params.get('exact_count'): + module.deprecate("Default value instance_type has been deprecated, in the future you must set an instance_type or a launch_template", + date='2023-01-01', collection_name='amazon.aws') + result = dict() + + if module.params.get('network'): + if module.params.get('network').get('interfaces'): + if module.params.get('security_group'): + module.fail_json(msg="Parameter network.interfaces can't be used with security_group") + if module.params.get('security_groups'): + module.fail_json(msg="Parameter network.interfaces can't be used with security_groups") + + state = module.params.get('state') + + retry_decorator = AWSRetry.jittered_backoff( + catch_extra_error_codes=[ + 'IncorrectState', + 'InsuffienctInstanceCapacity', + ] + ) + client = module.client('ec2', retry_decorator=retry_decorator) + + if module.params.get('filters') is None: + module.params['filters'] = build_filters() + + existing_matches = find_instances(filters=module.params.get('filters')) + + if state in ('terminated', 'absent'): + if existing_matches: + result = ensure_instance_state(state) + else: + result = dict( + msg='No matching instances found', + changed=False, + ) + elif module.params.get('exact_count'): + enforce_count(existing_matches, module, desired_module_state=state) + elif existing_matches and not module.params.get('count'): + for match in existing_matches: + warn_if_public_ip_assignment_changed(match) + warn_if_cpu_options_changed(match) + result = handle_existing(existing_matches, state) + else: + result = ensure_present(existing_matches=existing_matches, desired_module_state=state) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py new file mode 100644 index 000000000..e1ef2ec41 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py @@ -0,0 +1,588 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: ec2_instance_info +version_added: 1.0.0 +short_description: Gather information about ec2 instances in AWS +description: + - Gather information about ec2 instances in AWS +author: + - Michael Schuett (@michaeljs1990) + - Rob White (@wimnat) +options: + instance_ids: + description: + - If you specify one or more instance IDs, only instances that have the specified IDs are returned. + required: false + type: list + elements: str + default: [] + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters. Filter + names and values are case sensitive. + required: false + default: {} + type: dict + minimum_uptime: + description: + - Minimum running uptime in minutes of instances. For example if I(uptime) is C(60) return all instances that have run more than 60 minutes. + required: false + aliases: ['uptime'] + type: int + + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all instances + amazon.aws.ec2_instance_info: + +- name: Gather information about all instances in AZ ap-southeast-2a + amazon.aws.ec2_instance_info: + filters: + availability-zone: ap-southeast-2a + +- name: Gather information about a particular instance using ID + amazon.aws.ec2_instance_info: + instance_ids: + - i-12345678 + +- name: Gather information about any instance with a tag key Name and value Example + amazon.aws.ec2_instance_info: + filters: + "tag:Name": Example + +- name: Gather information about any instance in states "shutting-down", "stopping", "stopped" + amazon.aws.ec2_instance_info: + filters: + instance-state-name: [ "shutting-down", "stopping", "stopped" ] + +- name: Gather information about any instance with Name beginning with RHEL and an uptime of at least 60 minutes + amazon.aws.ec2_instance_info: + region: "{{ ec2_region }}" + uptime: 60 + filters: + "tag:Name": "RHEL-*" + instance-state-name: [ "running"] + register: ec2_node_info + +''' + +RETURN = r''' +instances: + description: A list of ec2 instances. + returned: always + type: complex + contains: + ami_launch_index: + description: The AMI launch index, which can be used to find this instance in the launch group. + returned: always + type: int + sample: 0 + architecture: + description: The architecture of the image. + returned: always + type: str + sample: x86_64 + block_device_mappings: + description: Any block device mapping entries for the instance. + returned: always + type: complex + contains: + device_name: + description: The device name exposed to the instance (for example, /dev/sdh or xvdh). + returned: always + type: str + sample: /dev/sdh + ebs: + description: Parameters used to automatically set up EBS volumes when the instance is launched. + returned: always + type: complex + contains: + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2017-03-23T22:51:24+00:00" + delete_on_termination: + description: Indicates whether the volume is deleted on instance termination. + returned: always + type: bool + sample: true + status: + description: The attachment state. + returned: always + type: str + sample: attached + volume_id: + description: The ID of the EBS volume. + returned: always + type: str + sample: vol-12345678 + cpu_options: + description: The CPU options set for the instance. + returned: always + type: complex + contains: + core_count: + description: The number of CPU cores for the instance. + returned: always + type: int + sample: 1 + threads_per_core: + description: The number of threads per CPU core. On supported instance, a value of 1 means Intel Hyper-Threading Technology is disabled. + returned: always + type: int + sample: 1 + client_token: + description: The idempotency token you provided when you launched the instance, if applicable. + returned: always + type: str + sample: mytoken + ebs_optimized: + description: Indicates whether the instance is optimized for EBS I/O. + returned: always + type: bool + sample: false + hypervisor: + description: The hypervisor type of the instance. + returned: always + type: str + sample: xen + iam_instance_profile: + description: The IAM instance profile associated with the instance, if applicable. + returned: always + type: complex + contains: + arn: + description: The Amazon Resource Name (ARN) of the instance profile. + returned: always + type: str + sample: "arn:aws:iam::123456789012:instance-profile/myprofile" + id: + description: The ID of the instance profile. + returned: always + type: str + sample: JFJ397FDG400FG9FD1N + image_id: + description: The ID of the AMI used to launch the instance. + returned: always + type: str + sample: ami-0011223344 + instance_id: + description: The ID of the instance. + returned: always + type: str + sample: i-012345678 + instance_type: + description: The instance type size of the running instance. + returned: always + type: str + sample: t2.micro + key_name: + description: The name of the key pair, if this instance was launched with an associated key pair. + returned: always + type: str + sample: my-key + launch_time: + description: The time the instance was launched. + returned: always + type: str + sample: "2017-03-23T22:51:24+00:00" + monitoring: + description: The monitoring for the instance. + returned: always + type: complex + contains: + state: + description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled. + returned: always + type: str + sample: disabled + network_interfaces: + description: One or more network interfaces for the instance. + returned: always + type: complex + contains: + association: + description: The association information for an Elastic IPv4 associated with the network interface. + returned: always + type: complex + contains: + ip_owner_id: + description: The ID of the owner of the Elastic IP address. + returned: always + type: str + sample: amazon + public_dns_name: + description: The public DNS name. + returned: always + type: str + sample: "" + public_ip: + description: The public IP address or Elastic IP address bound to the network interface. + returned: always + type: str + sample: 1.2.3.4 + attachment: + description: The network interface attachment. + returned: always + type: complex + contains: + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2017-03-23T22:51:24+00:00" + attachment_id: + description: The ID of the network interface attachment. + returned: always + type: str + sample: eni-attach-3aff3f + delete_on_termination: + description: Indicates whether the network interface is deleted when the instance is terminated. + returned: always + type: bool + sample: true + device_index: + description: The index of the device on the instance for the network interface attachment. + returned: always + type: int + sample: 0 + status: + description: The attachment state. + returned: always + type: str + sample: attached + description: + description: The description. + returned: always + type: str + sample: My interface + groups: + description: One or more security groups. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-abcdef12 + group_name: + description: The name of the security group. + returned: always + type: str + sample: mygroup + ipv6_addresses: + description: One or more IPv6 addresses associated with the network interface. + returned: always + type: list + elements: dict + contains: + ipv6_address: + description: The IPv6 address. + returned: always + type: str + sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + mac_address: + description: The MAC address. + returned: always + type: str + sample: "00:11:22:33:44:55" + network_interface_id: + description: The ID of the network interface. + returned: always + type: str + sample: eni-01234567 + owner_id: + description: The AWS account ID of the owner of the network interface. + returned: always + type: str + sample: 01234567890 + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + private_ip_addresses: + description: The private IPv4 addresses associated with the network interface. + returned: always + type: list + elements: dict + contains: + association: + description: The association information for an Elastic IP address (IPv4) associated with the network interface. + returned: always + type: complex + contains: + ip_owner_id: + description: The ID of the owner of the Elastic IP address. + returned: always + type: str + sample: amazon + public_dns_name: + description: The public DNS name. + returned: always + type: str + sample: "" + public_ip: + description: The public IP address or Elastic IP address bound to the network interface. + returned: always + type: str + sample: 1.2.3.4 + primary: + description: Indicates whether this IPv4 address is the primary private IP address of the network interface. + returned: always + type: bool + sample: true + private_ip_address: + description: The private IPv4 address of the network interface. + returned: always + type: str + sample: 10.0.0.1 + source_dest_check: + description: Indicates whether source/destination checking is enabled. + returned: always + type: bool + sample: true + status: + description: The status of the network interface. + returned: always + type: str + sample: in-use + subnet_id: + description: The ID of the subnet for the network interface. + returned: always + type: str + sample: subnet-0123456 + vpc_id: + description: The ID of the VPC for the network interface. + returned: always + type: str + sample: vpc-0123456 + placement: + description: The location where the instance launched, if applicable. + returned: always + type: complex + contains: + availability_zone: + description: The Availability Zone of the instance. + returned: always + type: str + sample: ap-southeast-2a + group_name: + description: The name of the placement group the instance is in (for cluster compute instances). + returned: always + type: str + sample: "" + tenancy: + description: The tenancy of the instance (if the instance is running in a VPC). + returned: always + type: str + sample: default + private_dns_name: + description: The private DNS name. + returned: always + type: str + sample: ip-10-0-0-1.ap-southeast-2.compute.internal + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + product_codes: + description: One or more product codes. + returned: always + type: list + elements: dict + contains: + product_code_id: + description: The product code. + returned: always + type: str + sample: aw0evgkw8ef3n2498gndfgasdfsd5cce + product_code_type: + description: The type of product code. + returned: always + type: str + sample: marketplace + public_dns_name: + description: The public DNS name assigned to the instance. + returned: always + type: str + sample: + public_ip_address: + description: The public IPv4 address assigned to the instance. + returned: always + type: str + sample: 52.0.0.1 + root_device_name: + description: The device name of the root device. + returned: always + type: str + sample: /dev/sda1 + root_device_type: + description: The type of root device used by the AMI. + returned: always + type: str + sample: ebs + security_groups: + description: One or more security groups for the instance. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-0123456 + group_name: + description: The name of the security group. + returned: always + type: str + sample: my-security-group + source_dest_check: + description: Indicates whether source/destination checking is enabled. + returned: always + type: bool + sample: true + state: + description: The current state of the instance. + returned: always + type: complex + contains: + code: + description: The low byte represents the state. + returned: always + type: int + sample: 16 + name: + description: The name of the state. + returned: always + type: str + sample: running + state_transition_reason: + description: The reason for the most recent state transition. + returned: always + type: str + sample: + subnet_id: + description: The ID of the subnet in which the instance is running. + returned: always + type: str + sample: subnet-00abcdef + tags: + description: Any tags assigned to the instance. + returned: always + type: dict + sample: + virtualization_type: + description: The type of virtualization of the AMI. + returned: always + type: str + sample: hvm + vpc_id: + description: The ID of the VPC the instance is in. + returned: always + type: dict + sample: vpc-0011223344 +''' + +import datetime + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +@AWSRetry.jittered_backoff() +def _describe_instances(connection, **params): + paginator = connection.get_paginator('describe_instances') + return paginator.paginate(**params).build_full_result() + + +def list_ec2_instances(connection, module): + + instance_ids = module.params.get("instance_ids") + uptime = module.params.get('minimum_uptime') + filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + + try: + reservations = _describe_instances(connection, InstanceIds=instance_ids, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to list ec2 instances") + + instances = [] + + if uptime: + timedelta = int(uptime) if uptime else 0 + oldest_launch_time = datetime.datetime.utcnow() - datetime.timedelta(minutes=timedelta) + # Get instances from reservations + for reservation in reservations['Reservations']: + instances += [instance for instance in reservation['Instances'] if instance['LaunchTime'].replace(tzinfo=None) < oldest_launch_time] + else: + for reservation in reservations['Reservations']: + instances = instances + reservation['Instances'] + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_instances = [camel_dict_to_snake_dict(instance) for instance in instances] + + # Turn the boto3 result in to ansible friendly tag dictionary + for instance in snaked_instances: + instance['tags'] = boto3_tag_list_to_ansible_dict(instance.get('tags', []), 'key', 'value') + + module.exit_json(instances=snaked_instances) + + +def main(): + + argument_spec = dict( + minimum_uptime=dict(required=False, type='int', default=None, aliases=['uptime']), + instance_ids=dict(default=[], type='list', elements='str'), + filters=dict(default={}, type='dict') + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['instance_ids', 'filters'] + ], + supports_check_mode=True, + ) + + try: + connection = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + list_ec2_instances(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_key.py b/ansible_collections/amazon/aws/plugins/modules/ec2_key.py new file mode 100644 index 000000000..8358d9dba --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_key.py @@ -0,0 +1,397 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_key +version_added: 1.0.0 +short_description: Create or delete an EC2 key pair +description: + - Create or delete an EC2 key pair. +options: + name: + description: + - Name of the key pair. + required: true + type: str + key_material: + description: + - Public key material. + required: false + type: str + force: + description: + - Force overwrite of already existing key pair if key has changed. + required: false + default: true + type: bool + state: + description: + - Create or delete keypair. + required: false + choices: [ present, absent ] + default: 'present' + type: str + key_type: + description: + - The type of key pair to create. + - Note that ED25519 keys are not supported for Windows instances, + EC2 Instance Connect, and EC2 Serial Console. + - By default Amazon will create an RSA key. + - Mutually exclusive with parameter I(key_material). + - Requires at least botocore version 1.21.23. + type: str + choices: + - rsa + - ed25519 + version_added: 3.1.0 +notes: + - Support for I(tags) and I(purge_tags) was added in release 2.1.0. +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 + +author: + - "Vincent Viallet (@zbal)" + - "Prasad Katti (@prasadkatti)" +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: create a new EC2 key pair, returns generated private key + amazon.aws.ec2_key: + name: my_keypair + +- name: create key pair using provided key_material + amazon.aws.ec2_key: + name: my_keypair + key_material: 'ssh-rsa AAAAxyz...== me@example.com' + +- name: create key pair using key_material obtained using 'file' lookup plugin + amazon.aws.ec2_key: + name: my_keypair + key_material: "{{ lookup('file', '/path/to/public_key/id_rsa.pub') }}" + +- name: Create ED25519 key pair + amazon.aws.ec2_key: + name: my_keypair + key_type: ed25519 + +# try creating a key pair with the name of an already existing keypair +# but don't overwrite it even if the key is different (force=false) +- name: try creating a key pair with name of an already existing keypair + amazon.aws.ec2_key: + name: my_existing_keypair + key_material: 'ssh-rsa AAAAxyz...== me@example.com' + force: false + +- name: remove key pair by name + amazon.aws.ec2_key: + name: my_keypair + state: absent +''' + +RETURN = ''' +changed: + description: whether a keypair was created/deleted + returned: always + type: bool + sample: true +msg: + description: short message describing the action taken + returned: always + type: str + sample: key pair created +key: + description: details of the keypair (this is set to null when state is absent) + returned: always + type: complex + contains: + fingerprint: + description: fingerprint of the key + returned: when state is present + type: str + sample: 'b0:22:49:61:d9:44:9d:0c:7e:ac:8a:32:93:21:6c:e8:fb:59:62:43' + name: + description: name of the keypair + returned: when state is present + type: str + sample: my_keypair + id: + description: id of the keypair + returned: when state is present + type: str + sample: key-123456789abc + tags: + description: a dictionary representing the tags attached to the key pair + returned: when state is present + type: dict + sample: '{"my_key": "my value"}' + private_key: + description: private key of a newly created keypair + returned: when a new keypair is created by AWS (key_material is not provided) + type: str + sample: '-----BEGIN RSA PRIVATE KEY----- + MIIEowIBAAKC... + -----END RSA PRIVATE KEY-----' + type: + description: type of a newly created keypair + returned: when a new keypair is created by AWS + type: str + sample: rsa + version_added: 3.1.0 +''' + +import uuid + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils._text import to_bytes + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + + +class Ec2KeyFailure(Exception): + def __init__(self, message=None, original_e=None): + super().__init__(message) + self.original_e = original_e + self.message = message + + +def _import_key_pair(ec2_client, name, key_material, tag_spec=None): + params = { + 'KeyName': name, + 'PublicKeyMaterial': to_bytes(key_material), + 'TagSpecifications': tag_spec + } + + params = scrub_none_parameters(params) + + try: + key = ec2_client.import_key_pair(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err: + raise Ec2KeyFailure(err, "error importing key") + return key + + +def extract_key_data(key, key_type=None): + data = { + 'name': key['KeyName'], + 'fingerprint': key['KeyFingerprint'], + 'id': key['KeyPairId'], + 'tags': boto3_tag_list_to_ansible_dict(key.get('Tags') or []), + # KeyMaterial is returned by create_key_pair, but not by describe_key_pairs + 'private_key': key.get('KeyMaterial'), + # KeyType is only set by describe_key_pairs + 'type': key.get('KeyType') or key_type + } + + return scrub_none_parameters(data) + + +def get_key_fingerprint(check_mode, ec2_client, key_material): + ''' + EC2's fingerprints are non-trivial to generate, so push this key + to a temporary name and make ec2 calculate the fingerprint for us. + http://blog.jbrowne.com/?p=23 + https://forums.aws.amazon.com/thread.jspa?messageID=352828 + ''' + # find an unused name + name_in_use = True + while name_in_use: + random_name = "ansible-" + str(uuid.uuid4()) + name_in_use = find_key_pair(ec2_client, random_name) + temp_key = _import_key_pair(ec2_client, random_name, key_material) + delete_key_pair(check_mode, ec2_client, random_name, finish_task=False) + return temp_key['KeyFingerprint'] + + +def find_key_pair(ec2_client, name): + try: + key = ec2_client.describe_key_pairs(aws_retry=True, KeyNames=[name]) + except is_boto3_error_code('InvalidKeyPair.NotFound'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err: # pylint: disable=duplicate-except + raise Ec2KeyFailure(err, "error finding keypair") + except IndexError: + key = None + + return key['KeyPairs'][0] + + +def _create_key_pair(ec2_client, name, tag_spec, key_type): + params = { + 'KeyName': name, + 'TagSpecifications': tag_spec, + 'KeyType': key_type, + } + + params = scrub_none_parameters(params) + + try: + key = ec2_client.create_key_pair(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err: + raise Ec2KeyFailure(err, "error creating key") + return key + + +def create_new_key_pair(ec2_client, name, key_material, key_type, tags, check_mode): + ''' + key does not exist, we create new key + ''' + if check_mode: + return {'changed': True, 'key': None, 'msg': 'key pair created'} + + tag_spec = boto3_tag_specifications(tags, ['key-pair']) + if key_material: + key = _import_key_pair(ec2_client, name, key_material, tag_spec) + else: + key = _create_key_pair(ec2_client, name, tag_spec, key_type) + key_data = extract_key_data(key, key_type) + + result = {'changed': True, 'key': key_data, 'msg': 'key pair created'} + return result + + +def update_key_pair_by_key_material(check_mode, ec2_client, name, key, key_material, tag_spec): + if check_mode: + return {'changed': True, 'key': None, 'msg': 'key pair updated'} + new_fingerprint = get_key_fingerprint(check_mode, ec2_client, key_material) + changed = False + msg = "key pair already exists" + if key['KeyFingerprint'] != new_fingerprint: + delete_key_pair(check_mode, ec2_client, name, finish_task=False) + key = _import_key_pair(ec2_client, name, key_material, tag_spec) + msg = "key pair updated" + changed = True + key_data = extract_key_data(key) + return {"changed": changed, "key": key_data, "msg": msg} + + +def update_key_pair_by_key_type(check_mode, ec2_client, name, key_type, tag_spec): + if check_mode: + return {'changed': True, 'key': None, 'msg': 'key pair updated'} + else: + delete_key_pair(check_mode, ec2_client, name, finish_task=False) + key = _create_key_pair(ec2_client, name, tag_spec, key_type) + key_data = extract_key_data(key, key_type) + return {'changed': True, 'key': key_data, 'msg': "key pair updated"} + + +def _delete_key_pair(ec2_client, key_name): + try: + ec2_client.delete_key_pair(aws_retry=True, KeyName=key_name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err: + raise Ec2KeyFailure(err, "error deleting key") + + +def delete_key_pair(check_mode, ec2_client, name, finish_task=True): + key = find_key_pair(ec2_client, name) + + if key and check_mode: + result = {'changed': True, 'key': None, 'msg': 'key deleted'} + elif not key: + result = {'key': None, 'msg': 'key did not exist'} + else: + _delete_key_pair(ec2_client, name) + if not finish_task: + return + result = {'changed': True, 'key': None, 'msg': 'key deleted'} + + return result + + +def handle_existing_key_pair_update(module, ec2_client, name, key): + key_material = module.params.get('key_material') + force = module.params.get('force') + key_type = module.params.get('key_type') + tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') + tag_spec = boto3_tag_specifications(tags, ['key-pair']) + check_mode = module.check_mode + if key_material and force: + result = update_key_pair_by_key_material(check_mode, ec2_client, name, key, key_material, tag_spec) + elif key_type and key_type != key['KeyType']: + result = update_key_pair_by_key_type(check_mode, ec2_client, name, key_type, tag_spec) + else: + changed = False + changed |= ensure_ec2_tags(ec2_client, module, key['KeyPairId'], tags=tags, purge_tags=purge_tags) + key = find_key_pair(ec2_client, name) + key_data = extract_key_data(key) + result = {"changed": changed, "key": key_data, "msg": "key pair already exists"} + return result + + +def main(): + + argument_spec = dict( + name=dict(required=True), + key_material=dict(no_log=False), + force=dict(type='bool', default=True), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + key_type=dict(type='str', choices=['rsa', 'ed25519']), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['key_material', 'key_type'] + ], + supports_check_mode=True + ) + + ec2_client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + name = module.params['name'] + state = module.params.get('state') + key_material = module.params.get('key_material') + key_type = module.params.get('key_type') + tags = module.params.get('tags') + + result = {} + + if key_type: + module.require_botocore_at_least('1.21.23', reason='to set the key_type for a keypair') + try: + if state == 'absent': + result = delete_key_pair(module.check_mode, ec2_client, name) + + elif state == 'present': + # check if key already exists + key = find_key_pair(ec2_client, name) + if key: + result = handle_existing_key_pair_update(module, ec2_client, name, key) + else: + result = create_new_key_pair(ec2_client, name, key_material, key_type, tags, module.check_mode) + + except Ec2KeyFailure as e: + if e.original_e: + module.fail_json_aws(e.original_e, e.message) + else: + module.fail_json(e.message) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py b/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py new file mode 100644 index 000000000..f7e9d509f --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py @@ -0,0 +1,629 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_metadata_facts +version_added: 1.0.0 +short_description: Gathers facts (instance metadata) about remote hosts within EC2 +author: + - Silviu Dicu (@silviud) + - Vinay Dandekar (@roadmapper) +description: + - This module fetches data from the instance metadata endpoint in EC2 as per + U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). + - The module must be called from within the EC2 instance itself. + - The module is configured to utilize the session oriented Instance Metadata Service v2 (IMDSv2) + U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html). + - If the HttpEndpoint parameter + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceMetadataOptions.html#API_ModifyInstanceMetadataOptions_RequestParameters) + is set to disabled for the EC2 instance, the module will return an error while retrieving a session token. +notes: + - Parameters to filter on ec2_metadata_facts may be added later. +''' + +EXAMPLES = ''' +# Gather EC2 metadata facts +- amazon.aws.ec2_metadata_facts: + +- debug: + msg: "This instance is a t1.micro" + when: ansible_ec2_instance_type == "t1.micro" +''' + +RETURN = ''' +ansible_facts: + description: Dictionary of new facts representing discovered properties of the EC2 instance. + returned: changed + type: complex + contains: + ansible_ec2_ami_id: + description: The AMI ID used to launch the instance. + type: str + sample: "ami-XXXXXXXX" + ansible_ec2_ami_launch_index: + description: + - If you started more than one instance at the same time, this value indicates the order in which the instance was launched. + - The value of the first instance launched is 0. + type: str + sample: "0" + ansible_ec2_ami_manifest_path: + description: + - The path to the AMI manifest file in Amazon S3. + - If you used an Amazon EBS-backed AMI to launch the instance, the returned result is unknown. + type: str + sample: "(unknown)" + ansible_ec2_ancestor_ami_ids: + description: + - The AMI IDs of any instances that were rebundled to create this AMI. + - This value will only exist if the AMI manifest file contained an ancestor-amis key. + type: str + sample: "(unknown)" + ansible_ec2_block_device_mapping_ami: + description: The virtual device that contains the root/boot file system. + type: str + sample: "/dev/sda1" + ansible_ec2_block_device_mapping_ebsN: + description: + - The virtual devices associated with Amazon EBS volumes, if any are present. + - Amazon EBS volumes are only available in metadata if they were present at launch time or when the instance was last started. + - The N indicates the index of the Amazon EBS volume (such as ebs1 or ebs2). + type: str + sample: "/dev/xvdb" + ansible_ec2_block_device_mapping_ephemeralN: + description: The virtual devices associated with ephemeral devices, if any are present. The N indicates the index of the ephemeral volume. + type: str + sample: "/dev/xvdc" + ansible_ec2_block_device_mapping_root: + description: + - The virtual devices or partitions associated with the root devices, or partitions on the virtual device, + where the root (/ or C) file system is associated with the given instance. + type: str + sample: "/dev/sda1" + ansible_ec2_block_device_mapping_swap: + description: The virtual devices associated with swap. Not always present. + type: str + sample: "/dev/sda2" + ansible_ec2_fws_instance_monitoring: + description: "Value showing whether the customer has enabled detailed one-minute monitoring in CloudWatch." + type: str + sample: "enabled" + ansible_ec2_hostname: + description: + - The private IPv4 DNS hostname of the instance. + - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0). + type: str + sample: "ip-10-0-0-1.ec2.internal" + ansible_ec2_iam_info: + description: + - If there is an IAM role associated with the instance, contains information about the last time the instance profile was updated, + including the instance's LastUpdated date, InstanceProfileArn, and InstanceProfileId. Otherwise, not present. + type: complex + sample: "" + contains: + LastUpdated: + description: The last time which InstanceProfile is associated with the Instance changed. + type: str + InstanceProfileArn: + description: The ARN of the InstanceProfile associated with the Instance. + type: str + InstanceProfileId: + description: The Id of the InstanceProfile associated with the Instance. + type: str + ansible_ec2_iam_info_instanceprofilearn: + description: The IAM instance profile ARN. + type: str + sample: "arn:aws:iam::123456789012:instance-profile/role_name" + ansible_ec2_iam_info_instanceprofileid: + description: IAM instance profile ID. + type: str + sample: "" + ansible_ec2_iam_info_lastupdated: + description: IAM info last updated time. + type: str + sample: "2017-05-12T02:42:27Z" + ansible_ec2_iam_instance_profile_role: + description: IAM instance role. + type: str + sample: "role_name" + ansible_ec2_iam_security_credentials_role_name: + description: + - If there is an IAM role associated with the instance, role-name is the name of the role, + and role-name contains the temporary security credentials associated with the role. Otherwise, not present. + type: str + sample: "" + ansible_ec2_iam_security_credentials_role_name_accesskeyid: + description: IAM role access key ID. + type: str + sample: "" + ansible_ec2_iam_security_credentials_role_name_code: + description: IAM code. + type: str + sample: "Success" + ansible_ec2_iam_security_credentials_role_name_expiration: + description: IAM role credentials expiration time. + type: str + sample: "2017-05-12T09:11:41Z" + ansible_ec2_iam_security_credentials_role_name_lastupdated: + description: IAM role last updated time. + type: str + sample: "2017-05-12T02:40:44Z" + ansible_ec2_iam_security_credentials_role_name_secretaccesskey: + description: IAM role secret access key. + type: str + sample: "" + ansible_ec2_iam_security_credentials_role_name_token: + description: IAM role token. + type: str + sample: "" + ansible_ec2_iam_security_credentials_role_name_type: + description: IAM role type. + type: str + sample: "AWS-HMAC" + ansible_ec2_instance_action: + description: Notifies the instance that it should reboot in preparation for bundling. + type: str + sample: "none" + ansible_ec2_instance_id: + description: The ID of this instance. + type: str + sample: "i-XXXXXXXXXXXXXXXXX" + ansible_ec2_instance_identity_document: + description: JSON containing instance attributes, such as instance-id, private IP address, etc. + type: str + sample: "" + ansible_ec2_instance_identity_document_accountid: + description: "" + type: str + sample: "123456789012" + ansible_ec2_instance_identity_document_architecture: + description: Instance system architecture. + type: str + sample: "x86_64" + ansible_ec2_instance_identity_document_availabilityzone: + description: The Availability Zone in which the instance launched. + type: str + sample: "us-east-1a" + ansible_ec2_instance_identity_document_billingproducts: + description: Billing products for this instance. + type: str + sample: "" + ansible_ec2_instance_identity_document_devpayproductcodes: + description: Product codes for the launched AMI. + type: str + sample: "" + ansible_ec2_instance_identity_document_imageid: + description: The AMI ID used to launch the instance. + type: str + sample: "ami-01234567" + ansible_ec2_instance_identity_document_instanceid: + description: The ID of this instance. + type: str + sample: "i-0123456789abcdef0" + ansible_ec2_instance_identity_document_instancetype: + description: The type of instance. + type: str + sample: "m4.large" + ansible_ec2_instance_identity_document_kernelid: + description: The ID of the kernel launched with this instance, if applicable. + type: str + sample: "" + ansible_ec2_instance_identity_document_pendingtime: + description: The instance pending time. + type: str + sample: "2017-05-11T20:51:20Z" + ansible_ec2_instance_identity_document_privateip: + description: + - The private IPv4 address of the instance. + - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0). + type: str + sample: "10.0.0.1" + ansible_ec2_instance_identity_document_ramdiskid: + description: The ID of the RAM disk specified at launch time, if applicable. + type: str + sample: "" + ansible_ec2_instance_identity_document_region: + description: The Region in which the instance launched. + type: str + sample: "us-east-1" + ansible_ec2_instance_identity_document_version: + description: Identity document version. + type: str + sample: "2010-08-31" + ansible_ec2_instance_identity_pkcs7: + description: Used to verify the document's authenticity and content against the signature. + type: str + sample: "" + ansible_ec2_instance_identity_rsa2048: + description: Used to verify the document's authenticity and content against the signature. + type: str + sample: "" + ansible_ec2_instance_identity_signature: + description: Data that can be used by other parties to verify its origin and authenticity. + type: str + sample: "" + ansible_ec2_instance_life_cycle: + description: The purchasing option of the instance. + type: str + sample: "on-demand" + ansible_ec2_instance_tags_keys: + description: + - The list of tags keys of the instance. + - Returns empty list if access to tags (InstanceMetadataTags) in instance metadata is not enabled. + type: list + elements: str + sample: ["tagKey1", "tag_key2"] + version_added: 5.5.0 + ansible_ec2_instance_type: + description: The type of the instance. + type: str + sample: "m4.large" + ansible_ec2_local_hostname: + description: + - The private IPv4 DNS hostname of the instance. + - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0). + type: str + sample: "ip-10-0-0-1.ec2.internal" + ansible_ec2_local_ipv4: + description: + - The private IPv4 address of the instance. + - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0). + type: str + sample: "10.0.0.1" + ansible_ec2_mac: + description: + - The instance's media access control (MAC) address. + - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0). + type: str + sample: "00:11:22:33:44:55" + ansible_ec2_metrics_vhostmd: + description: Metrics; no longer available. + type: str + sample: "" + ansible_ec2_network_interfaces_macs_mac_address_device_number: + description: + - The unique device number associated with that interface. The device number corresponds to the device name; + for example, a device-number of 2 is for the eth2 device. + - This category corresponds to the DeviceIndex and device-index fields that are used by the Amazon EC2 API and the EC2 commands for the AWS CLI. + type: str + sample: "0" + ansible_ec2_network_interfaces_macs_mac_address_interface_id: + description: The elastic network interface ID. + type: str + sample: "eni-12345678" + ansible_ec2_network_interfaces_macs_mac_address_ipv4_associations_ip_address: + description: The private IPv4 addresses that are associated with each public-ip address and assigned to that interface. + type: str + sample: "" + ansible_ec2_network_interfaces_macs_mac_address_ipv6s: + description: The IPv6 addresses associated with the interface. Returned only for instances launched into a VPC. + type: str + sample: "" + ansible_ec2_network_interfaces_macs_mac_address_local_hostname: + description: The interface's local hostname. + type: str + sample: "" + ansible_ec2_network_interfaces_macs_mac_address_local_ipv4s: + description: The private IPv4 addresses associated with the interface. + type: str + sample: "" + ansible_ec2_network_interfaces_macs_mac_address_mac: + description: The instance's MAC address. + type: str + sample: "00:11:22:33:44:55" + ansible_ec2_network_interfaces_macs_mac_address_owner_id: + description: + - The ID of the owner of the network interface. + - In multiple-interface environments, an interface can be attached by a third party, such as Elastic Load Balancing. + - Traffic on an interface is always billed to the interface owner. + type: str + sample: "123456789012" + ansible_ec2_network_interfaces_macs_mac_address_public_hostname: + description: + - The interface's public DNS (IPv4). If the instance is in a VPC, + this category is only returned if the enableDnsHostnames attribute is set to true. + type: str + sample: "ec2-1-2-3-4.compute-1.amazonaws.com" + ansible_ec2_network_interfaces_macs_mac_address_public_ipv4s: + description: The Elastic IP addresses associated with the interface. There may be multiple IPv4 addresses on an instance. + type: str + sample: "1.2.3.4" + ansible_ec2_network_interfaces_macs_mac_address_security_group_ids: + description: The IDs of the security groups to which the network interface belongs. Returned only for instances launched into a VPC. + type: str + sample: "sg-01234567,sg-01234568" + ansible_ec2_network_interfaces_macs_mac_address_security_groups: + description: Security groups to which the network interface belongs. Returned only for instances launched into a VPC. + type: str + sample: "secgroup1,secgroup2" + ansible_ec2_network_interfaces_macs_mac_address_subnet_id: + description: The ID of the subnet in which the interface resides. Returned only for instances launched into a VPC. + type: str + sample: "subnet-01234567" + ansible_ec2_network_interfaces_macs_mac_address_subnet_ipv4_cidr_block: + description: The IPv4 CIDR block of the subnet in which the interface resides. Returned only for instances launched into a VPC. + type: str + sample: "10.0.1.0/24" + ansible_ec2_network_interfaces_macs_mac_address_subnet_ipv6_cidr_blocks: + description: The IPv6 CIDR block of the subnet in which the interface resides. Returned only for instances launched into a VPC. + type: str + sample: "" + ansible_ec2_network_interfaces_macs_mac_address_vpc_id: + description: The ID of the VPC in which the interface resides. Returned only for instances launched into a VPC. + type: str + sample: "vpc-0123456" + ansible_ec2_network_interfaces_macs_mac_address_vpc_ipv4_cidr_block: + description: The IPv4 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC. + type: str + sample: "10.0.0.0/16" + ansible_ec2_network_interfaces_macs_mac_address_vpc_ipv4_cidr_blocks: + description: The IPv4 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC. + type: str + sample: "10.0.0.0/16" + ansible_ec2_network_interfaces_macs_mac_address_vpc_ipv6_cidr_blocks: + description: The IPv6 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC. + type: str + sample: "" + ansible_ec2_placement_availability_zone: + description: The Availability Zone in which the instance launched. + type: str + sample: "us-east-1a" + ansible_ec2_placement_region: + description: The Region in which the instance launched. + type: str + sample: "us-east-1" + ansible_ec2_product_codes: + description: Product codes associated with the instance, if any. + type: str + sample: "aw0evgkw8e5c1q413zgy5pjce" + ansible_ec2_profile: + description: EC2 instance hardware profile. + type: str + sample: "default-hvm" + ansible_ec2_public_hostname: + description: + - The instance's public DNS. If the instance is in a VPC, this category is only returned if the enableDnsHostnames attribute is set to true. + type: str + sample: "ec2-1-2-3-4.compute-1.amazonaws.com" + ansible_ec2_public_ipv4: + description: The public IPv4 address. If an Elastic IP address is associated with the instance, the value returned is the Elastic IP address. + type: str + sample: "1.2.3.4" + ansible_ec2_public_key: + description: Public key. Only available if supplied at instance launch time. + type: str + sample: "" + ansible_ec2_ramdisk_id: + description: The ID of the RAM disk specified at launch time, if applicable. + type: str + sample: "" + ansible_ec2_reservation_id: + description: The ID of the reservation. + type: str + sample: "r-0123456789abcdef0" + ansible_ec2_security_groups: + description: + - The names of the security groups applied to the instance. After launch, you can only change the security groups of instances running in a VPC. + - Such changes are reflected here and in network/interfaces/macs/mac/security-groups. + type: str + sample: "securitygroup1,securitygroup2" + ansible_ec2_services_domain: + description: The domain for AWS resources for the region; for example, amazonaws.com for us-east-1. + type: str + sample: "amazonaws.com" + ansible_ec2_services_partition: + description: + - The partition that the resource is in. For standard AWS regions, the partition is aws. + - If you have resources in other partitions, the partition is aws-partitionname. + - For example, the partition for resources in the China (Beijing) region is aws-cn. + type: str + sample: "aws" + ansible_ec2_spot_termination_time: + description: + - The approximate time, in UTC, that the operating system for your Spot instance will receive the shutdown signal. + - This item is present and contains a time value only if the Spot instance has been marked for termination by Amazon EC2. + - The termination-time item is not set to a time if you terminated the Spot instance yourself. + type: str + sample: "2015-01-05T18:02:00Z" + ansible_ec2_user_data: + description: The instance user data. + type: str + sample: "#!/bin/bash" +''' + +import json +import re +import socket +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_text +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.six.moves.urllib.parse import quote + +socket.setdefaulttimeout(5) + +# The ec2_metadata_facts module is a special case, while we generally dropped support for Python < 3.6 +# this module doesn't depend on the SDK and still has valid use cases for folks working with older +# OSes. +try: + json_decode_error = json.JSONDecodeError +except AttributeError: + json_decode_error = ValueError + + +class Ec2Metadata(object): + ec2_metadata_token_uri = 'http://169.254.169.254/latest/api/token' + ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/' + ec2_metadata_instance_tags_uri = 'http://169.254.169.254/latest/meta-data/tags/instance' + ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key' + ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/' + ec2_dynamicdata_uri = 'http://169.254.169.254/latest/dynamic/' + + def __init__( + self, + module, + ec2_metadata_token_uri=None, + ec2_metadata_uri=None, + ec2_metadata_instance_tags_uri=None, + ec2_sshdata_uri=None, + ec2_userdata_uri=None, + ec2_dynamicdata_uri=None, + ): + self.module = module + self.uri_token = ec2_metadata_token_uri or self.ec2_metadata_token_uri + self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri + self.uri_instance_tags = ec2_metadata_instance_tags_uri or self.ec2_metadata_instance_tags_uri + self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri + self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri + self.uri_dynamic = ec2_dynamicdata_uri or self.ec2_dynamicdata_uri + self._data = {} + self._token = None + self._prefix = 'ansible_ec2_%s' + + def _fetch(self, url): + encoded_url = quote(url, safe='%/:=&?~#+!$,;\'@()*[]') + headers = {} + if self._token: + headers = {'X-aws-ec2-metadata-token': self._token} + response, info = fetch_url(self.module, encoded_url, headers=headers, force=True) + + if info.get('status') in (401, 403): + self.module.fail_json(msg='Failed to retrieve metadata from AWS: {0}'.format(info['msg']), response=info) + elif info.get('status') not in (200, 404): + time.sleep(3) + # request went bad, retry once then raise + self.module.warn('Retrying query to metadata service. First attempt failed: {0}'.format(info['msg'])) + response, info = fetch_url(self.module, encoded_url, headers=headers, force=True) + if info.get('status') not in (200, 404): + # fail out now + self.module.fail_json(msg='Failed to retrieve metadata from AWS: {0}'.format(info['msg']), response=info) + if response and info['status'] < 400: + data = response.read() + else: + data = None + return to_text(data) + + def _mangle_fields(self, fields, uri, filter_patterns=None): + filter_patterns = ['public-keys-0'] if filter_patterns is None else filter_patterns + + new_fields = {} + for key, value in fields.items(): + split_fields = key[len(uri):].split('/') + # Parse out the IAM role name (which is _not_ the same as the instance profile name) + if len(split_fields) == 3 and split_fields[0:2] == ['iam', 'security-credentials'] and ':' not in split_fields[2]: + new_fields[self._prefix % "iam-instance-profile-role"] = split_fields[2] + if len(split_fields) > 1 and split_fields[1]: + new_key = "-".join(split_fields) + new_fields[self._prefix % new_key] = value + else: + new_key = "".join(split_fields) + new_fields[self._prefix % new_key] = value + for pattern in filter_patterns: + for key in dict(new_fields): + match = re.search(pattern, key) + if match: + new_fields.pop(key) + return new_fields + + def fetch(self, uri, recurse=True): + raw_subfields = self._fetch(uri) + if not raw_subfields: + return + subfields = raw_subfields.split('\n') + for field in subfields: + if field.endswith('/') and recurse: + self.fetch(uri + field) + if uri.endswith('/'): + new_uri = uri + field + else: + new_uri = uri + '/' + field + if new_uri not in self._data and not new_uri.endswith('/'): + content = self._fetch(new_uri) + if field == 'security-groups' or field == 'security-group-ids': + sg_fields = ",".join(content.split('\n')) + self._data['%s' % (new_uri)] = sg_fields + else: + try: + json_dict = json.loads(content) + self._data['%s' % (new_uri)] = content + for (key, value) in json_dict.items(): + self._data['%s:%s' % (new_uri, key.lower())] = value + except (json_decode_error, AttributeError): + self._data['%s' % (new_uri)] = content # not a stringified JSON string + + def fix_invalid_varnames(self, data): + """Change ':'' and '-' to '_' to ensure valid template variable names""" + new_data = data.copy() + for key, value in data.items(): + if ':' in key or '-' in key: + newkey = re.sub(':|-', '_', key) + new_data[newkey] = value + del new_data[key] + + return new_data + + def fetch_session_token(self, uri_token): + """Used to get a session token for IMDSv2""" + headers = {'X-aws-ec2-metadata-token-ttl-seconds': '60'} + response, info = fetch_url(self.module, uri_token, method='PUT', headers=headers, force=True) + + if info.get('status') == 403: + self.module.fail_json(msg='Failed to retrieve metadata token from AWS: {0}'.format(info['msg']), response=info) + elif info.get('status') not in (200, 404): + time.sleep(3) + # request went bad, retry once then raise + self.module.warn('Retrying query to metadata service. First attempt failed: {0}'.format(info['msg'])) + response, info = fetch_url(self.module, uri_token, method='PUT', headers=headers, force=True) + if info.get('status') not in (200, 404): + # fail out now + self.module.fail_json(msg='Failed to retrieve metadata token from AWS: {0}'.format(info['msg']), response=info) + if response: + token_data = response.read() + else: + token_data = None + return to_text(token_data) + + def run(self): + self._token = self.fetch_session_token(self.uri_token) # create session token for IMDS + self.fetch(self.uri_meta) # populate _data with metadata + data = self._mangle_fields(self._data, self.uri_meta) + data[self._prefix % 'user-data'] = self._fetch(self.uri_user) + data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh) + + self._data = {} # clear out metadata in _data + self.fetch(self.uri_dynamic) # populate _data with dynamic data + dyndata = self._mangle_fields(self._data, self.uri_dynamic) + data.update(dyndata) + data = self.fix_invalid_varnames(data) + + instance_tags_keys = self._fetch(self.uri_instance_tags) + instance_tags_keys = instance_tags_keys.split('\n') if instance_tags_keys != "None" else [] + data[self._prefix % 'instance_tags_keys'] = instance_tags_keys + + # Maintain old key for backwards compatibility + if 'ansible_ec2_instance_identity_document_region' in data: + data['ansible_ec2_placement_region'] = data['ansible_ec2_instance_identity_document_region'] + return data + + +def main(): + module = AnsibleModule( + argument_spec={}, + supports_check_mode=True, + ) + + ec2_metadata_facts = Ec2Metadata(module).run() + ec2_metadata_facts_result = dict(changed=False, ansible_facts=ec2_metadata_facts) + + module.exit_json(**ec2_metadata_facts_result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py new file mode 100644 index 000000000..d4fa9b564 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py @@ -0,0 +1,1483 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_security_group +version_added: 1.0.0 +author: + - "Andrew de Quincey (@adq)" + - "Razique Mahroua (@Razique)" +short_description: Maintain an EC2 security group +description: + - Maintains EC2 security groups. +options: + name: + description: + - Name of the security group. + - One of and only one of I(name) or I(group_id) is required. + - Required if I(state=present). + required: false + type: str + group_id: + description: + - Id of group to delete (works only with absent). + - One of and only one of I(name) or I(group_id) is required. + required: false + type: str + description: + description: + - Description of the security group. Required when C(state) is C(present). + required: false + type: str + vpc_id: + description: + - ID of the VPC to create the group in. + required: false + type: str + rules: + description: + - List of firewall inbound rules to enforce in this group (see example). If none are supplied, + no inbound rules will be enabled. Rules list may include its own name in I(group_name). + This allows idempotent loopback additions (e.g. allow group to access itself). + required: false + type: list + elements: dict + suboptions: + cidr_ip: + type: str + description: + - The IPv4 CIDR range traffic is coming from. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + - Support for passing nested lists of strings to I(cidr_ip) has been deprecated and will + be removed in a release after 2024-12-01. + cidr_ipv6: + type: str + description: + - The IPv6 CIDR range traffic is coming from. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + - Support for passing nested lists of strings to I(cidr_ipv6) has been deprecated and will + be removed in a release after 2024-12-01. + ip_prefix: + type: str + description: + - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html) + that traffic is coming from. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_id: + type: str + description: + - The ID of the Security Group that traffic is coming from. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_name: + type: list + elements: str + description: + - Name of the Security Group that traffic is coming from. + - If the Security Group doesn't exist a new Security Group will be + created with I(group_desc) as the description. + - I(group_name) can accept values of type str and list. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_desc: + type: str + description: + - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be + created with I(group_desc) as the description. + proto: + type: str + description: + - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or + - number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)) + - When using C(icmp) or C(icmpv6) as the protocol, you can pass + - the C(icmp_type) and C(icmp_code) parameters instead of + - C(from_port) and C(to_port). + from_port: + type: int + description: + - The start of the range of ports that traffic is coming from. + - A value can be between C(0) to C(65535). + - A value of C(-1) indicates all ports (only supported when I(proto=icmp)). + to_port: + type: int + description: + - The end of the range of ports that traffic is coming from. + - A value can be between C(0) to C(65535). + - A value of C(-1) indicates all ports (only supported when I(proto=icmp)). + icmp_type: + version_added: 3.3.0 + type: int + description: + - When using C(icmp) or C(icmpv6) as the protocol, allows you to + - specify the ICMP type to use. The option is mutually exclusive with C(from_port). + - A value of C(-1) indicates all ICMP types. + icmp_code: + version_added: 3.3.0 + type: int + description: + - When using C(icmp) or C(icmpv6) as the protocol, allows you to specify + - the ICMP code to use. The option is mutually exclusive with C(to_port). + - A value of C(-1) indicates all ICMP codes. + rule_desc: + type: str + description: A description for the rule. + rules_egress: + description: + - List of firewall outbound rules to enforce in this group (see example). If none are supplied, + a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled. + required: false + type: list + elements: dict + aliases: ['egress_rules'] + suboptions: + cidr_ip: + type: str + description: + - The IPv4 CIDR range traffic is going to. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + - Support for passing nested lists of strings to I(cidr_ip) has been deprecated and will + be removed in a release after 2024-12-01. + cidr_ipv6: + type: str + description: + - The IPv6 CIDR range traffic is going to. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + - Support for passing nested lists of strings to I(cidr_ipv6) has been deprecated and will + be removed in a release after 2024-12-01. + ip_prefix: + type: str + description: + - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html) + that traffic is going to. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_id: + type: str + description: + - The ID of the Security Group that traffic is going to. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_name: + type: str + description: + - Name of the Security Group that traffic is going to. + - If the Security Group doesn't exist a new Security Group will be + created with I(group_desc) as the description. + - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) + and I(group_name). + group_desc: + type: str + description: + - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be + created with I(group_desc) as the description. + proto: + type: str + description: + - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or + - number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)) + - When using C(icmp) or C(icmpv6) as the protocol, you can pass the + - C(icmp_type) and C(icmp_code) parameters instead of C(from_port) and C(to_port). + from_port: + type: int + description: + - The start of the range of ports that traffic is going to. + - A value can be between C(0) to C(65535). + - A value of C(-1) indicates all ports (only supported when I(proto=icmp)). + to_port: + type: int + description: + - The end of the range of ports that traffic is going to. + - A value can be between C(0) to C(65535). + - A value of C(-1) indicates all ports (only supported when I(proto=icmp)). + icmp_type: + version_added: 3.3.0 + type: int + description: + - When using C(icmp) or C(icmpv6) as the protocol, allows you to specify + - the ICMP type to use. The option is mutually exclusive with C(from_port). + - A value of C(-1) indicates all ICMP types. + icmp_code: + version_added: 3.3.0 + type: int + description: + - When using C(icmp) or C(icmpv6) as the protocol, allows you to specify + - the ICMP code to use. The option is mutually exclusive with C(to_port). + - A value of C(-1) indicates all ICMP codes. + rule_desc: + type: str + description: A description for the rule. + state: + description: + - Create or delete a security group. + required: false + default: 'present' + choices: [ "present", "absent" ] + aliases: [] + type: str + purge_rules: + description: + - Purge existing rules on security group that are not found in rules. + required: false + default: 'true' + aliases: [] + type: bool + purge_rules_egress: + description: + - Purge existing rules_egress on security group that are not found in rules_egress. + required: false + default: 'true' + aliases: ['purge_egress_rules'] + type: bool + +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 + +notes: + - If a rule declares a group_name and that group doesn't exist, it will be + automatically created. In that case, group_desc should be provided as well. + The module will refuse to create a depended-on group without a description. + - Prior to release 5.0.0 this module was called C(amazon.aws.ec2_group_info). The usage did not + change. +''' + +EXAMPLES = ''' +- name: example using security group rule descriptions + amazon.aws.ec2_security_group: + name: "{{ name }}" + description: sg with rule descriptions + vpc_id: vpc-xxxxxxxx + profile: "{{ aws_profile }}" + region: us-east-1 + rules: + - proto: tcp + ports: + - 80 + cidr_ip: 0.0.0.0/0 + rule_desc: allow all on port 80 + +- name: example using ICMP types and codes + amazon.aws.ec2_security_group: + name: "{{ name }}" + description: sg for ICMP + vpc_id: vpc-xxxxxxxx + profile: "{{ aws_profile }}" + region: us-east-1 + rules: + - proto: icmp + icmp_type: 3 + icmp_code: 1 + cidr_ip: 0.0.0.0/0 + +- name: example ec2 group + amazon.aws.ec2_security_group: + name: example + description: an example EC2 group + vpc_id: 12345 + region: eu-west-1 + aws_secret_key: SECRET + aws_access_key: ACCESS + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 10.0.0.0/8 + - proto: tcp + from_port: 443 + to_port: 443 + # this should only be needed for EC2 Classic security group rules + # because in a VPC an ELB will use a user-account security group + group_id: amazon-elb/sg-87654321/amazon-elb-sg + - proto: tcp + from_port: 3306 + to_port: 3306 + group_id: 123456789012/sg-87654321/exact-name-of-sg + - proto: udp + from_port: 10050 + to_port: 10050 + cidr_ip: 10.0.0.0/8 + - proto: udp + from_port: 10051 + to_port: 10051 + group_id: sg-12345678 + - proto: icmp + from_port: 8 # icmp type, -1 = any type + to_port: -1 # icmp subtype, -1 = any subtype + cidr_ip: 10.0.0.0/8 + - proto: all + # the containing group name may be specified here + group_name: example + - proto: all + # in the 'proto' attribute, if you specify -1 (only supported when I(proto=icmp)), all, or a protocol number + # other than tcp, udp, icmp, or 58 (ICMPv6), traffic on all ports is allowed, regardless of any ports that + # you specify. + from_port: 10050 # this value is ignored + to_port: 10050 # this value is ignored + cidr_ip: 10.0.0.0/8 + + rules_egress: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + cidr_ipv6: 64:ff9b::/96 + group_name: example-other + # description to use if example-other needs to be created + group_desc: other example EC2 group + +- name: example2 ec2 group + amazon.aws.ec2_security_group: + name: example2 + description: an example2 EC2 group + vpc_id: 12345 + region: eu-west-1 + rules: + # 'ports' rule keyword was introduced in version 2.4. It accepts a single + # port value or a list of values including ranges (from_port-to_port). + - proto: tcp + ports: 22 + group_name: example-vpn + - proto: tcp + ports: + - 80 + - 443 + - 8080-8099 + cidr_ip: 0.0.0.0/0 + # Rule sources list support was added in version 2.4. This allows to + # define multiple sources per source type as well as multiple source types per rule. + - proto: tcp + ports: + - 6379 + - 26379 + group_name: + - example-vpn + - example-redis + - proto: tcp + ports: 5665 + group_name: example-vpn + cidr_ip: + - 172.16.1.0/24 + - 172.16.17.0/24 + cidr_ipv6: + - 2607:F8B0::/32 + - 64:ff9b::/96 + group_id: + - sg-edcd9784 + diff: True + +- name: "Delete group by its id" + amazon.aws.ec2_security_group: + region: eu-west-1 + group_id: sg-33b4ee5b + state: absent +''' + +RETURN = ''' +group_name: + description: Security group name + sample: My Security Group + type: str + returned: on create/update +group_id: + description: Security group id + sample: sg-abcd1234 + type: str + returned: on create/update +description: + description: Description of security group + sample: My Security Group + type: str + returned: on create/update +tags: + description: Tags associated with the security group + sample: + Name: My Security Group + Purpose: protecting stuff + type: dict + returned: on create/update +vpc_id: + description: ID of VPC to which the security group belongs + sample: vpc-abcd1234 + type: str + returned: on create/update +ip_permissions: + description: Inbound rules associated with the security group. + sample: + - from_port: 8182 + ip_protocol: tcp + ip_ranges: + - cidr_ip: "198.51.100.1/32" + ipv6_ranges: [] + prefix_list_ids: [] + to_port: 8182 + user_id_group_pairs: [] + type: list + returned: on create/update +ip_permissions_egress: + description: Outbound rules associated with the security group. + sample: + - ip_protocol: -1 + ip_ranges: + - cidr_ip: "0.0.0.0/0" + ipv6_ranges: [] + prefix_list_ids: [] + user_id_group_pairs: [] + type: list + returned: on create/update +owner_id: + description: AWS Account ID of the security group + sample: 123456789012 + type: int + returned: on create/update +''' + +import itertools +import json +import re +from collections import namedtuple +from copy import deepcopy +from ipaddress import IPv6Network +from ipaddress import ip_network +from time import sleep + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_text +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.network import to_ipv6_subnet +from ansible.module_utils.common.network import to_subnet +from ansible.module_utils.six import string_types + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_id +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + + +Rule = namedtuple('Rule', ['port_range', 'protocol', 'target', 'target_type', 'description']) +valid_targets = set(['ipv4', 'ipv6', 'group', 'ip_prefix']) +current_account_id = None + + +def rule_cmp(a, b): + """Compare rules without descriptions""" + for prop in ['port_range', 'protocol', 'target', 'target_type']: + if prop == 'port_range' and to_text(a.protocol) == to_text(b.protocol): + # equal protocols can interchange `(-1, -1)` and `(None, None)` + if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)): + continue + elif getattr(a, prop) != getattr(b, prop): + return False + elif getattr(a, prop) != getattr(b, prop): + return False + return True + + +def rules_to_permissions(rules): + return [to_permission(rule) for rule in rules] + + +def to_permission(rule): + # take a Rule, output the serialized grant + perm = { + 'IpProtocol': rule.protocol, + } + perm['FromPort'], perm['ToPort'] = rule.port_range + if rule.target_type == 'ipv4': + perm['IpRanges'] = [{ + 'CidrIp': rule.target, + }] + if rule.description: + perm['IpRanges'][0]['Description'] = rule.description + elif rule.target_type == 'ipv6': + perm['Ipv6Ranges'] = [{ + 'CidrIpv6': rule.target, + }] + if rule.description: + perm['Ipv6Ranges'][0]['Description'] = rule.description + elif rule.target_type == 'group': + if isinstance(rule.target, tuple): + pair = {} + if rule.target[0]: + pair['UserId'] = rule.target[0] + # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific + if rule.target[1]: + pair['GroupId'] = rule.target[1] + elif rule.target[2]: + pair['GroupName'] = rule.target[2] + perm['UserIdGroupPairs'] = [pair] + else: + perm['UserIdGroupPairs'] = [{ + 'GroupId': rule.target + }] + if rule.description: + perm['UserIdGroupPairs'][0]['Description'] = rule.description + elif rule.target_type == 'ip_prefix': + perm['PrefixListIds'] = [{ + 'PrefixListId': rule.target, + }] + if rule.description: + perm['PrefixListIds'][0]['Description'] = rule.description + elif rule.target_type not in valid_targets: + raise ValueError('Invalid target type for rule {0}'.format(rule)) + return fix_port_and_protocol(perm) + + +def rule_from_group_permission(perm): + """ + Returns a rule dict from an existing security group. + + When using a security group as a target all 3 fields (OwnerId, GroupId, and + GroupName) need to exist in the target. This ensures consistency of the + values that will be compared to desired_ingress or desired_egress + in wait_for_rule_propagation(). + GroupId is preferred as it is more specific except when targeting 'amazon-' + prefixed security groups (such as EC2 Classic ELBs). + """ + def ports_from_permission(p): + if 'FromPort' not in p and 'ToPort' not in p: + return (None, None) + return (int(perm['FromPort']), int(perm['ToPort'])) + + # outputs a rule tuple + for target_key, target_subkey, target_type in [ + ('IpRanges', 'CidrIp', 'ipv4'), + ('Ipv6Ranges', 'CidrIpv6', 'ipv6'), + ('PrefixListIds', 'PrefixListId', 'ip_prefix'), + ]: + if target_key not in perm: + continue + for r in perm[target_key]: + # there may be several IP ranges here, which is ok + yield Rule( + ports_from_permission(perm), + to_text(perm['IpProtocol']), + r[target_subkey], + target_type, + r.get('Description') + ) + if 'UserIdGroupPairs' in perm and perm['UserIdGroupPairs']: + for pair in perm['UserIdGroupPairs']: + target = ( + pair.get('UserId', current_account_id), + pair.get('GroupId', None), + None, + ) + if pair.get('UserId', '').startswith('amazon-'): + # amazon-elb and amazon-prefix rules don't need + # group-id specified, so remove it when querying + # from permission + target = ( + pair.get('UserId', None), + None, + pair.get('GroupName', None), + ) + elif 'VpcPeeringConnectionId' not in pair and pair['UserId'] != current_account_id: + # EC2-Classic cross-account + pass + elif 'VpcPeeringConnectionId' in pair: + # EC2-VPC cross-account VPC peering + target = ( + pair.get('UserId', None), + pair.get('GroupId', None), + None, + ) + + yield Rule( + ports_from_permission(perm), + to_text(perm['IpProtocol']), + target, + 'group', + pair.get('Description') + ) + + +# Wrap just this method so we can retry on missing groups +@AWSRetry.jittered_backoff(retries=5, delay=5, catch_extra_error_codes=['InvalidGroup.NotFound']) +def get_security_groups_with_backoff(client, **kwargs): + return client.describe_security_groups(**kwargs) + + +def sg_exists_with_backoff(client, **kwargs): + try: + return client.describe_security_groups(aws_retry=True, **kwargs) + except is_boto3_error_code('InvalidGroup.NotFound'): + return {'SecurityGroups': []} + + +def deduplicate_rules_args(rules): + """Returns unique rules""" + if rules is None: + return None + return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values()) + + +def validate_rule(module, rule): + VALID_PARAMS = ( + 'cidr_ip', + 'cidr_ipv6', + 'ip_prefix', + 'group_id', + 'group_name', + 'group_desc', + 'proto', + 'from_port', + 'to_port', + 'icmp_type', + 'icmp_code', + 'icmp_keys', + 'rule_desc', + ) + if not isinstance(rule, dict): + module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule)) + for k in rule: + if k not in VALID_PARAMS: + module.fail_json(msg='Invalid rule parameter \'{0}\' for rule: {1}'.format(k, rule)) + + if 'group_id' in rule and 'cidr_ip' in rule: + module.fail_json(msg='Specify group_id OR cidr_ip, not both') + elif 'group_name' in rule and 'cidr_ip' in rule: + module.fail_json(msg='Specify group_name OR cidr_ip, not both') + elif 'group_id' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify group_id OR cidr_ipv6, not both") + elif 'group_name' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify group_name OR cidr_ipv6, not both") + elif 'cidr_ip' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both") + elif 'group_id' in rule and 'group_name' in rule: + module.fail_json(msg='Specify group_id OR group_name, not both') + elif ('icmp_type' in rule or 'icmp_code' in rule) and 'ports' in rule: + module.fail_json(msg='Specify icmp_code/icmp_type OR ports, not both') + elif ('from_port' in rule or 'to_port' in rule) and ('icmp_type' in rule or 'icmp_code' in rule) and 'icmp_keys' not in rule: + module.fail_json(msg='Specify from_port/to_port OR icmp_type/icmp_code, not both') + elif ('icmp_type' in rule or 'icmp_code' in rule) and ('icmp' not in rule['proto']): + module.fail_json(msg='Specify proto: icmp or icmpv6 when using icmp_type/icmp_code') + + +def get_target_from_rule(module, client, rule, name, group, groups, vpc_id): + """ + Returns tuple of (target_type, target, group_created) after validating rule params. + + rule: Dict describing a rule. + name: Name of the security group being managed. + groups: Dict of all available security groups. + + AWS accepts an ip range or a security group as target of a rule. This + function validate the rule specification and return either a non-None + group_id or a non-None ip range. + + When using a security group as a target all 3 fields (OwnerId, GroupId, and + GroupName) need to exist in the target. This ensures consistency of the + values that will be compared to current_rules (from current_ingress and + current_egress) in wait_for_rule_propagation(). + """ + FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)' + owner_id = current_account_id + group_id = None + group_name = None + target_group_created = False + + validate_rule(module, rule) + if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']): + # this is a foreign Security Group. Since you can't fetch it you must create an instance of it + # Matches on groups like amazon-elb/sg-5a9c116a/amazon-elb-sg, amazon-elb/amazon-elb-sg, + # and peer-VPC groups like 0987654321/sg-1234567890/example + owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups() + group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name) + groups[group_id] = group_instance + groups[group_name] = group_instance + if group_id and group_name: + if group_name.startswith('amazon-'): + # amazon-elb and amazon-prefix rules don't need group_id specified, + group_id = None + else: + # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific + group_name = None + return 'group', (owner_id, group_id, group_name), False + elif 'group_id' in rule: + return 'group', (owner_id, rule['group_id'], None), False + elif 'group_name' in rule: + group_name = rule['group_name'] + if group_name == name: + group_id = group['GroupId'] + groups[group_id] = group + groups[group_name] = group + elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'): + # both are VPC groups, this is ok + group_id = groups[group_name]['GroupId'] + elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')): + # both are EC2 classic, this is ok + group_id = groups[group_name]['GroupId'] + else: + auto_group = None + filters = {'group-name': group_name} + if vpc_id: + filters['vpc-id'] = vpc_id + # if we got here, either the target group does not exist, or there + # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC + # is bad, so we have to create a new SG because no compatible group + # exists + if not rule.get('group_desc', '').strip(): + # retry describing the group once + try: + auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0] + except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError): + module.fail_json(msg="group %s will be automatically created by rule %s but " + "no description was provided" % (group_name, rule)) + except ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + elif not module.check_mode: + params = dict(GroupName=group_name, Description=rule['group_desc']) + if vpc_id: + params['VpcId'] = vpc_id + try: + auto_group = client.create_security_group(aws_retry=True, **params) + get_waiter( + client, 'security_group_exists', + ).wait( + GroupIds=[auto_group['GroupId']], + ) + except is_boto3_error_code('InvalidGroup.Duplicate'): + # The group exists, but didn't show up in any of our describe-security-groups calls + # Try searching on a filter for the name, and allow a retry window for AWS to update + # the model on their end. + try: + auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0] + except IndexError: + module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name)) + except ClientError as e: + module.fail_json_aws( + e, + msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name)) + if auto_group is not None: + group_id = auto_group['GroupId'] + groups[group_id] = auto_group + groups[group_name] = auto_group + target_group_created = True + return 'group', (owner_id, group_id, None), target_group_created + elif 'cidr_ip' in rule: + return 'ipv4', validate_ip(module, rule['cidr_ip']), False + elif 'cidr_ipv6' in rule: + return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False + elif 'ip_prefix' in rule: + return 'ip_prefix', rule['ip_prefix'], False + + module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule) + + +def ports_expand(ports): + # takes a list of ports and returns a list of (port_from, port_to) + ports_expanded = [] + for port in ports: + if not isinstance(port, string_types): + ports_expanded.append((port,) * 2) + elif '-' in port: + ports_expanded.append(tuple(int(p.strip()) for p in port.split('-', 1))) + else: + ports_expanded.append((int(port.strip()),) * 2) + + return ports_expanded + + +def rule_expand_ports(rule): + # takes a rule dict and returns a list of expanded rule dicts + # uses icmp_code and icmp_type instead of from_ports and to_ports when + # available. + if 'ports' not in rule: + non_icmp_params = any([ + rule.get('icmp_type', None) is None, rule.get('icmp_code', None) is None]) + conflict = not non_icmp_params and any([ + rule.get('from_port', None), rule.get('to_port', None)]) + + if non_icmp_params: + if isinstance(rule.get('from_port'), string_types): + rule['from_port'] = int(rule.get('from_port')) + if isinstance(rule.get('to_port'), string_types): + rule['to_port'] = int(rule.get('to_port')) + else: + rule['from_port'] = int(rule.get('icmp_type')) if isinstance(rule.get('icmp_type'), string_types) else rule.get('icmp_type') + rule['to_port'] = int(rule.get('icmp_code')) if isinstance(rule.get('icmp_code'), string_types) else rule.get('icmp_code') + # Used temporarily to track the fact that icmp keys were converted + # to from_port/to_port + if not conflict: + rule['icmp_keys'] = True + + return [rule] + + ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']] + + rule_expanded = [] + for from_to in ports_expand(ports): + temp_rule = rule.copy() + del temp_rule['ports'] + temp_rule['from_port'], temp_rule['to_port'] = sorted(from_to) + rule_expanded.append(temp_rule) + + return rule_expanded + + +def rules_expand_ports(rules): + # takes a list of rules and expands it based on 'ports' + if not rules: + return rules + + return [rule for rule_complex in rules + for rule in rule_expand_ports(rule_complex)] + + +def rule_expand_source(rule, source_type): + # takes a rule dict and returns a list of expanded rule dicts for specified source_type + sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]] + source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') + + rule_expanded = [] + for source in sources: + temp_rule = rule.copy() + for s in source_types_all: + temp_rule.pop(s, None) + temp_rule[source_type] = source + rule_expanded.append(temp_rule) + + return rule_expanded + + +def rule_expand_sources(rule): + # takes a rule dict and returns a list of expanded rule dicts + source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') if stype in rule) + + return [r for stype in source_types + for r in rule_expand_source(rule, stype)] + + +def rules_expand_sources(rules): + # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name' + if not rules: + return rules + + return [rule for rule_complex in rules + for rule in rule_expand_sources(rule_complex)] + + +def update_rules_description(module, client, rule_type, group_id, ip_permissions): + if module.check_mode: + return + try: + if rule_type == "in": + client.update_security_group_rule_descriptions_ingress( + aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + if rule_type == "out": + client.update_security_group_rule_descriptions_egress( + aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id) + + +def fix_port_and_protocol(permission): + for key in ('FromPort', 'ToPort'): + if key in permission: + if permission[key] is None: + del permission[key] + else: + permission[key] = int(permission[key]) + + permission['IpProtocol'] = to_text(permission['IpProtocol']) + + return permission + + +def remove_old_permissions(client, module, revoke_ingress, revoke_egress, group_id): + if revoke_ingress: + revoke(client, module, revoke_ingress, group_id, 'in') + if revoke_egress: + revoke(client, module, revoke_egress, group_id, 'out') + return bool(revoke_ingress or revoke_egress) + + +def revoke(client, module, ip_permissions, group_id, rule_type): + if not module.check_mode: + try: + if rule_type == 'in': + client.revoke_security_group_ingress( + aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + elif rule_type == 'out': + client.revoke_security_group_egress( + aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + except (BotoCoreError, ClientError) as e: + rules = 'ingress rules' if rule_type == 'in' else 'egress rules' + module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions)) + + +def add_new_permissions(client, module, new_ingress, new_egress, group_id): + if new_ingress: + authorize(client, module, new_ingress, group_id, 'in') + if new_egress: + authorize(client, module, new_egress, group_id, 'out') + return bool(new_ingress or new_egress) + + +def authorize(client, module, ip_permissions, group_id, rule_type): + if not module.check_mode: + try: + if rule_type == 'in': + client.authorize_security_group_ingress( + aws_retry=True, + GroupId=group_id, IpPermissions=ip_permissions) + elif rule_type == 'out': + client.authorize_security_group_egress( + aws_retry=True, + GroupId=group_id, IpPermissions=ip_permissions) + except (BotoCoreError, ClientError) as e: + rules = 'ingress rules' if rule_type == 'in' else 'egress rules' + module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions)) + + +def validate_ip(module, cidr_ip): + split_addr = cidr_ip.split('/') + if len(split_addr) == 2: + # this_ip is a IPv4 or IPv6 CIDR that may or may not have host bits set + # Get the network bits if IPv4, and validate if IPv6. + try: + ip = to_subnet(split_addr[0], split_addr[1]) + if ip != cidr_ip: + module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, " + "check the network mask and make sure that only network bits are set: {1}.".format( + cidr_ip, ip)) + except ValueError: + # to_subnet throws a ValueError on IPv6 networks, so we should be working with v6 if we get here + try: + isinstance(ip_network(to_text(cidr_ip)), IPv6Network) + ip = cidr_ip + except ValueError: + # If a host bit is set on something other than a /128, IPv6Network will throw a ValueError + # The ipv6_cidr in this case probably looks like "2001:DB8:A0B:12F0::1/64" and we just want the network bits + ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1] + if ip6 != cidr_ip: + module.warn("One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, " + "check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6)) + return ip6 + return ip + return cidr_ip + + +def update_tags(client, module, group_id, current_tags, tags, purge_tags): + tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags) + + if not module.check_mode: + if tags_to_delete: + try: + client.delete_tags(aws_retry=True, Resources=[group_id], Tags=[{'Key': tag} for tag in tags_to_delete]) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_delete)) + + # Add/update tags + if tags_need_modify: + try: + client.create_tags(aws_retry=True, Resources=[group_id], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to add tags {0}".format(tags_need_modify)) + + return bool(tags_need_modify or tags_to_delete) + + +def update_rule_descriptions(module, client, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list): + changed = False + ingress_needs_desc_update = [] + egress_needs_desc_update = [] + + for present_rule in present_egress: + needs_update = [r for r in named_tuple_egress_list if rule_cmp(r, present_rule) and r.description != present_rule.description] + for r in needs_update: + named_tuple_egress_list.remove(r) + egress_needs_desc_update.extend(needs_update) + for present_rule in present_ingress: + needs_update = [r for r in named_tuple_ingress_list if rule_cmp(r, present_rule) and r.description != present_rule.description] + for r in needs_update: + named_tuple_ingress_list.remove(r) + ingress_needs_desc_update.extend(needs_update) + + if ingress_needs_desc_update: + update_rules_description(module, client, 'in', group_id, rules_to_permissions(ingress_needs_desc_update)) + changed |= True + if egress_needs_desc_update: + update_rules_description(module, client, 'out', group_id, rules_to_permissions(egress_needs_desc_update)) + changed |= True + return changed + + +def create_security_group(client, module, name, description, vpc_id): + if not module.check_mode: + params = dict(GroupName=name, Description=description) + if vpc_id: + params['VpcId'] = vpc_id + try: + group = client.create_security_group(aws_retry=True, **params) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to create security group") + # When a group is created, an egress_rule ALLOW ALL + # to 0.0.0.0/0 is added automatically but it's not + # reflected in the object returned by the AWS API + # call. We re-read the group for getting an updated object + # amazon sometimes takes a couple seconds to update the security group so wait till it exists + while True: + sleep(3) + group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] + if group.get('VpcId') and not group.get('IpPermissionsEgress'): + pass + else: + break + return group + return None + + +def wait_for_rule_propagation(module, client, group, desired_ingress, desired_egress, purge_ingress, purge_egress): + group_id = group['GroupId'] + tries = 6 + + def await_rules(group, desired_rules, purge, rule_key): + for _i in range(tries): + current_rules = set(sum([list(rule_from_group_permission(p)) for p in group[rule_key]], [])) + if purge and len(current_rules ^ set(desired_rules)) == 0: + return group + elif purge: + conflicts = current_rules ^ set(desired_rules) + # For cases where set comparison is equivalent, but invalid port/proto exist + for a, b in itertools.combinations(conflicts, 2): + if rule_cmp(a, b): + conflicts.discard(a) + conflicts.discard(b) + if not len(conflicts): + return group + elif current_rules.issuperset(desired_rules) and not purge: + return group + sleep(10) + group = get_security_groups_with_backoff(client, GroupIds=[group_id])['SecurityGroups'][0] + module.warn("Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format(group_id, rule_key, current_rules, desired_rules)) + return group + + group = get_security_groups_with_backoff(client, GroupIds=[group_id])['SecurityGroups'][0] + if 'VpcId' in group and module.params.get('rules_egress') is not None: + group = await_rules(group, desired_egress, purge_egress, 'IpPermissionsEgress') + return await_rules(group, desired_ingress, purge_ingress, 'IpPermissions') + + +def group_exists(client, module, vpc_id, group_id, name): + params = {'Filters': []} + if group_id: + params['GroupIds'] = [group_id] + if name: + # Add name to filters rather than params['GroupNames'] + # because params['GroupNames'] only checks the default vpc if no vpc is provided + params['Filters'].append({'Name': 'group-name', 'Values': [name]}) + if vpc_id: + params['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]}) + # Don't filter by description to maintain backwards compatibility + + try: + security_groups = sg_exists_with_backoff(client, **params).get('SecurityGroups', []) + all_groups = get_security_groups_with_backoff(client).get('SecurityGroups', []) + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Error in describe_security_groups") + + if security_groups: + groups = dict((group['GroupId'], group) for group in all_groups) + groups.update(dict((group['GroupName'], group) for group in all_groups)) + if vpc_id: + vpc_wins = dict((group['GroupName'], group) for group in all_groups if group.get('VpcId') and group['VpcId'] == vpc_id) + groups.update(vpc_wins) + # maintain backwards compatibility by using the last matching group + return security_groups[-1], groups + return None, {} + + +def get_diff_final_resource(client, module, security_group): + def get_account_id(security_group, module): + try: + owner_id = security_group.get('owner_id', current_account_id) + except (BotoCoreError, ClientError) as e: + owner_id = "Unable to determine owner_id: {0}".format(to_text(e)) + return owner_id + + def get_final_tags(security_group_tags, specified_tags, purge_tags): + if specified_tags is None: + return security_group_tags + tags_need_modify, tags_to_delete = compare_aws_tags(security_group_tags, specified_tags, purge_tags) + end_result_tags = dict((k, v) for k, v in specified_tags.items() if k not in tags_to_delete) + end_result_tags.update(dict((k, v) for k, v in security_group_tags.items() if k not in tags_to_delete)) + end_result_tags.update(tags_need_modify) + return end_result_tags + + def get_final_rules(client, module, security_group_rules, specified_rules, purge_rules): + if specified_rules is None: + return security_group_rules + if purge_rules: + final_rules = [] + else: + final_rules = list(security_group_rules) + specified_rules = flatten_nested_targets(module, deepcopy(specified_rules)) + for rule in specified_rules: + format_rule = { + 'from_port': None, 'to_port': None, 'ip_protocol': rule.get('proto', 'tcp'), + 'ip_ranges': [], 'ipv6_ranges': [], 'prefix_list_ids': [], 'user_id_group_pairs': [] + } + if rule.get('proto', 'tcp') in ('all', '-1', -1): + format_rule['ip_protocol'] = '-1' + format_rule.pop('from_port') + format_rule.pop('to_port') + elif rule.get('ports'): + if rule.get('ports') and (isinstance(rule['ports'], string_types) or isinstance(rule['ports'], int)): + rule['ports'] = [rule['ports']] + for port in rule.get('ports'): + if isinstance(port, string_types) and '-' in port: + format_rule['from_port'], format_rule['to_port'] = port.split('-') + else: + format_rule['from_port'] = format_rule['to_port'] = port + elif rule.get('from_port') or rule.get('to_port'): + format_rule['from_port'] = rule.get('from_port', rule.get('to_port')) + format_rule['to_port'] = rule.get('to_port', rule.get('from_port')) + for source_type in ('cidr_ip', 'cidr_ipv6', 'prefix_list_id'): + if rule.get(source_type): + rule_key = {'cidr_ip': 'ip_ranges', 'cidr_ipv6': 'ipv6_ranges', 'prefix_list_id': 'prefix_list_ids'}.get(source_type) + if rule.get('rule_desc'): + format_rule[rule_key] = [{source_type: rule[source_type], 'description': rule['rule_desc']}] + else: + if not isinstance(rule[source_type], list): + rule[source_type] = [rule[source_type]] + format_rule[rule_key] = [{source_type: target} for target in rule[source_type]] + if rule.get('group_id') or rule.get('group_name'): + rule_sg = group_exists(client, module, module.params['vpc_id'], rule.get('group_id'), rule.get('group_name'))[0] + if rule_sg is None: + # --diff during --check + format_rule['user_id_group_pairs'] = [{ + 'group_id': rule.get('group_id'), + 'group_name': rule.get('group_name'), + 'peering_status': None, + 'user_id': get_account_id(security_group, module), + 'vpc_id': module.params['vpc_id'], + 'vpc_peering_connection_id': None + }] + else: + rule_sg = camel_dict_to_snake_dict(rule_sg) + format_rule['user_id_group_pairs'] = [{ + 'description': rule_sg.get('description', rule_sg.get('group_desc')), + 'group_id': rule_sg.get('group_id', rule.get('group_id')), + 'group_name': rule_sg.get('group_name', rule.get('group_name')), + 'peering_status': rule_sg.get('peering_status'), + 'user_id': rule_sg.get('user_id', get_account_id(security_group, module)), + 'vpc_id': rule_sg.get('vpc_id', module.params['vpc_id']), + 'vpc_peering_connection_id': rule_sg.get('vpc_peering_connection_id') + }] + for k, v in list(format_rule['user_id_group_pairs'][0].items()): + if v is None: + format_rule['user_id_group_pairs'][0].pop(k) + final_rules.append(format_rule) + # Order final rules consistently + final_rules.sort(key=get_ip_permissions_sort_key) + return final_rules + + security_group_ingress = security_group.get('ip_permissions', []) + specified_ingress = module.params['rules'] + purge_ingress = module.params['purge_rules'] + security_group_egress = security_group.get('ip_permissions_egress', []) + specified_egress = module.params['rules_egress'] + purge_egress = module.params['purge_rules_egress'] + return { + 'description': module.params['description'], + 'group_id': security_group.get('group_id', 'sg-xxxxxxxx'), + 'group_name': security_group.get('group_name', module.params['name']), + 'ip_permissions': get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress), + 'ip_permissions_egress': get_final_rules(client, module, security_group_egress, specified_egress, purge_egress), + 'owner_id': get_account_id(security_group, module), + 'tags': get_final_tags(security_group.get('tags', {}), module.params['tags'], module.params['purge_tags']), + 'vpc_id': security_group.get('vpc_id', module.params['vpc_id'])} + + +def flatten_nested_targets(module, rules): + def _flatten(targets): + for target in targets: + if isinstance(target, list): + module.deprecate('Support for nested lists in cidr_ip and cidr_ipv6 has been ' + 'deprecated. The flatten filter can be used instead.', + date='2024-12-01', collection_name='amazon.aws') + for t in _flatten(target): + yield t + elif isinstance(target, string_types): + yield target + + if rules is not None: + for rule in rules: + target_list_type = None + if isinstance(rule.get('cidr_ip'), list): + target_list_type = 'cidr_ip' + elif isinstance(rule.get('cidr_ipv6'), list): + target_list_type = 'cidr_ipv6' + if target_list_type is not None: + rule[target_list_type] = list(_flatten(rule[target_list_type])) + return rules + + +def get_rule_sort_key(dicts): + if dicts.get('cidr_ip'): + return dicts.get('cidr_ip') + elif dicts.get('cidr_ipv6'): + return dicts.get('cidr_ipv6') + elif dicts.get('prefix_list_id'): + return dicts.get('prefix_list_id') + elif dicts.get('group_id'): + return dicts.get('group_id') + return None + + +def get_ip_permissions_sort_key(rule): + if rule.get('ip_ranges'): + rule.get('ip_ranges').sort(key=get_rule_sort_key) + return rule.get('ip_ranges')[0]['cidr_ip'] + elif rule.get('ipv6_ranges'): + rule.get('ipv6_ranges').sort(key=get_rule_sort_key) + return rule.get('ipv6_ranges')[0]['cidr_ipv6'] + elif rule.get('prefix_list_ids'): + rule.get('prefix_list_ids').sort(key=get_rule_sort_key) + return rule.get('prefix_list_ids')[0]['prefix_list_id'] + elif rule.get('user_id_group_pairs'): + rule.get('user_id_group_pairs').sort(key=get_rule_sort_key) + return rule.get('user_id_group_pairs')[0].get('group_id', '') + return None + + +def main(): + argument_spec = dict( + name=dict(), + group_id=dict(), + description=dict(), + vpc_id=dict(), + rules=dict(type='list', elements='dict'), + rules_egress=dict(type='list', elements='dict', aliases=['egress_rules']), + state=dict(default='present', type='str', choices=['present', 'absent']), + purge_rules=dict(default=True, required=False, type='bool'), + purge_rules_egress=dict(default=True, required=False, type='bool', aliases=['purge_egress_rules']), + tags=dict(required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, required=False, type='bool') + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[['name', 'group_id']], + required_if=[['state', 'present', ['name']]], + ) + + name = module.params['name'] + group_id = module.params['group_id'] + description = module.params['description'] + vpc_id = module.params['vpc_id'] + rules = flatten_nested_targets(module, deepcopy(module.params['rules'])) + rules_egress = flatten_nested_targets(module, deepcopy(module.params['rules_egress'])) + rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules))) + rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules_egress))) + state = module.params.get('state') + purge_rules = module.params['purge_rules'] + purge_rules_egress = module.params['purge_rules_egress'] + tags = module.params['tags'] + purge_tags = module.params['purge_tags'] + + if state == 'present' and not description: + module.fail_json(msg='Must provide description when state is present.') + + changed = False + client = module.client('ec2', AWSRetry.jittered_backoff()) + + group, groups = group_exists(client, module, vpc_id, group_id, name) + group_created_new = not bool(group) + + global current_account_id + current_account_id = get_aws_account_id(module) + + before = {} + after = {} + + # Ensure requested group is absent + if state == 'absent': + if group: + # found a match, delete it + before = camel_dict_to_snake_dict(group, ignore_list=['Tags']) + before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', [])) + try: + if not module.check_mode: + client.delete_security_group(aws_retry=True, GroupId=group['GroupId']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to delete security group '%s'" % group) + else: + group = None + changed = True + else: + # no match found, no changes required + pass + + # Ensure requested group is present + elif state == 'present': + if group: + # existing group + before = camel_dict_to_snake_dict(group, ignore_list=['Tags']) + before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', [])) + if group['Description'] != description: + module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting " + "and re-creating the security group. Try using state=absent to delete, then rerunning this task.") + else: + # no match found, create it + group = create_security_group(client, module, name, description, vpc_id) + changed = True + + if tags is not None and group is not None: + current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', [])) + changed |= update_tags(client, module, group['GroupId'], current_tags, tags, purge_tags) + + if group: + named_tuple_ingress_list = [] + named_tuple_egress_list = [] + current_ingress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissions']], []) + current_egress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissionsEgress']], []) + + for new_rules, _rule_type, named_tuple_rule_list in [(rules, 'in', named_tuple_ingress_list), + (rules_egress, 'out', named_tuple_egress_list)]: + if new_rules is None: + continue + for rule in new_rules: + target_type, target, target_group_created = get_target_from_rule( + module, client, rule, name, group, groups, vpc_id) + changed |= target_group_created + + rule.pop('icmp_type', None) + rule.pop('icmp_code', None) + rule.pop('icmp_keys', None) + + if rule.get('proto', 'tcp') in ('all', '-1', -1): + rule['proto'] = '-1' + rule['from_port'] = None + rule['to_port'] = None + + try: + int(rule.get('proto', 'tcp')) + rule['proto'] = to_text(rule.get('proto', 'tcp')) + rule['from_port'] = None + rule['to_port'] = None + except ValueError: + # rule does not use numeric protocol spec + pass + named_tuple_rule_list.append( + Rule( + port_range=(rule['from_port'], rule['to_port']), + protocol=to_text(rule.get('proto', 'tcp')), + target=target, target_type=target_type, + description=rule.get('rule_desc'), + ) + ) + + # List comprehensions for rules to add, rules to modify, and rule ids to determine purging + new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] + new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))] + + if module.params.get('rules_egress') is None and 'VpcId' in group: + # when no egress rules are specified and we're in a VPC, + # we add in a default allow all out rule, which was the + # default behavior before egress rules were added + rule = Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None) + if rule in current_egress: + named_tuple_egress_list.append(rule) + if rule not in current_egress: + current_egress.append(rule) + + # List comprehensions for rules to add, rules to modify, and rule ids to determine purging + present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress))) + present_egress = list(set(named_tuple_egress_list).union(set(current_egress))) + + if purge_rules: + revoke_ingress = [] + for p in present_ingress: + if not any(rule_cmp(p, b) for b in named_tuple_ingress_list): + revoke_ingress.append(to_permission(p)) + else: + revoke_ingress = [] + if purge_rules_egress and module.params.get('rules_egress') is not None: + if module.params.get('rules_egress') is []: + revoke_egress = [ + to_permission(r) for r in set(present_egress) - set(named_tuple_egress_list) + if r != Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None) + ] + else: + revoke_egress = [] + for p in present_egress: + if not any(rule_cmp(p, b) for b in named_tuple_egress_list): + revoke_egress.append(to_permission(p)) + else: + revoke_egress = [] + + # named_tuple_ingress_list and named_tuple_egress_list get updated by + # method update_rule_descriptions, deep copy these two lists to new + # variables for the record of the 'desired' ingress and egress sg permissions + desired_ingress = deepcopy(named_tuple_ingress_list) + desired_egress = deepcopy(named_tuple_egress_list) + + changed |= update_rule_descriptions(module, client, group['GroupId'], present_ingress, + named_tuple_ingress_list, present_egress, named_tuple_egress_list) + + # Revoke old rules + changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group['GroupId']) + + new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] + new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress)) + new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress)) + # Authorize new rules + changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group['GroupId']) + + if group_created_new and module.params.get('rules') is None and module.params.get('rules_egress') is None: + # A new group with no rules provided is already being awaited. + # When it is created we wait for the default egress rule to be added by AWS + security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] + elif changed and not module.check_mode: + # keep pulling until current security group rules match the desired ingress and egress rules + security_group = wait_for_rule_propagation(module, client, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress) + else: + security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] + security_group = camel_dict_to_snake_dict(security_group, ignore_list=['Tags']) + security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', [])) + + else: + security_group = {'group_id': None} + + if module._diff: + if module.params['state'] == 'present': + after = get_diff_final_resource(client, module, security_group) + if before.get('ip_permissions'): + before['ip_permissions'].sort(key=get_ip_permissions_sort_key) + + security_group['diff'] = [{'before': before, 'after': after}] + + module.exit_json(changed=changed, **security_group) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py new file mode 100644 index 000000000..3440f90e8 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_security_group_info +version_added: 1.0.0 +short_description: Gather information about EC2 security groups in AWS +description: + - Gather information about EC2 security groups in AWS. +author: +- Henrique Rodrigues (@Sodki) +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) for + possible filters. Filter names and values are case sensitive. You can also use underscores (_) + instead of dashes (-) in the filter keys, which will take precedence in case of conflict. + required: false + default: {} + type: dict +notes: + - By default, the module will return all security groups in a region. To limit results use the + appropriate filters. + - Prior to release 5.0.0 this module was called C(amazon.aws.ec2_group_info). The usage did not + change. + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all security groups +- amazon.aws.ec2_security_group_info: + +# Gather information about all security groups in a specific VPC +- amazon.aws.ec2_security_group_info: + filters: + vpc-id: vpc-12345678 + +# Gather information about all security groups in a specific VPC +- amazon.aws.ec2_security_group_info: + filters: + vpc-id: vpc-12345678 + +# Gather information about a security group +- amazon.aws.ec2_security_group_info: + filters: + group-name: example-1 + +# Gather information about a security group by id +- amazon.aws.ec2_security_group_info: + filters: + group-id: sg-12345678 + +# Gather information about a security group with multiple filters, also mixing the use of underscores as filter keys +- amazon.aws.ec2_security_group_info: + filters: + group_id: sg-12345678 + vpc-id: vpc-12345678 + +# Gather information about various security groups +- amazon.aws.ec2_security_group_info: + filters: + group-name: + - example-1 + - example-2 + - example-3 + +# Gather information about any security group with a tag key Name and value Example. +# The quotes around 'tag:name' are important because of the colon in the value +- amazon.aws.ec2_security_group_info: + filters: + "tag:Name": Example +''' + +RETURN = ''' +security_groups: + description: Security groups that match the provided filters. Each element consists of a dict with all the information related to that security group. + type: list + returned: always + elements: dict + contains: + description: + description: The description of the security group. + returned: always + type: str + group_id: + description: The ID of the security group. + returned: always + type: str + group_name: + description: The name of the security group. + returned: always + type: str + ip_permissions: + description: The inbound rules associated with the security group. + returned: always + type: list + elements: dict + contains: + ip_protocol: + description: The IP protocol name or number. + returned: always + type: str + ip_ranges: + description: The IPv4 ranges. + returned: always + type: list + elements: dict + contains: + cidr_ip: + description: The IPv4 CIDR range. + returned: always + type: str + ipv6_ranges: + description: The IPv6 ranges. + returned: always + type: list + elements: dict + contains: + cidr_ipv6: + description: The IPv6 CIDR range. + returned: always + type: str + prefix_list_ids: + description: The prefix list IDs. + returned: always + type: list + elements: dict + contains: + prefix_list_id: + description: The ID of the prefix. + returned: always + type: str + user_id_group_pairs: + description: The security group and AWS account ID pairs. + returned: always + type: list + elements: dict + contains: + group_id: + description: The security group ID of the pair. + returned: always + type: str + user_id: + description: The user ID of the pair. + returned: always + type: str + ip_permissions_egress: + description: The outbound rules associated with the security group. + returned: always + type: list + elements: dict + contains: + ip_protocol: + description: The IP protocol name or number. + returned: always + type: str + ip_ranges: + description: The IPv4 ranges. + returned: always + type: list + elements: dict + contains: + cidr_ip: + description: The IPv4 CIDR range. + returned: always + type: str + ipv6_ranges: + description: The IPv6 ranges. + returned: always + type: list + elements: dict + contains: + cidr_ipv6: + description: The IPv6 CIDR range. + returned: always + type: str + prefix_list_ids: + description: The prefix list IDs. + returned: always + type: list + elements: dict + contains: + prefix_list_id: + description: The ID of the prefix. + returned: always + type: str + user_id_group_pairs: + description: The security group and AWS account ID pairs. + returned: always + type: list + elements: dict + contains: + group_id: + description: The security group ID of the pair. + returned: always + type: str + user_id: + description: The user ID of the pair. + returned: always + type: str + owner_id: + description: The AWS account ID of the owner of the security group. + returned: always + type: str + tags: + description: The tags associated with the security group. + returned: always + type: dict + vpc_id: + description: The ID of the VPC for the security group. + returned: always + type: str + sample: [ + { + "description": "created by rds_instance integration tests", + "group_id": "sg-036496a610b79da88", + "group_name": "ansible-test-89355088-unknown5c5f67f3ad09-sg-1", + "ip_permissions": [], + "ip_permissions_egress": [ + { + "ip_protocol": "-1", + "ip_ranges": [ + { + "cidr_ip": "0.0.0.0/0" + } + ], + "ipv6_ranges": [], + "prefix_list_ids": [], + "user_id_group_pairs": [] + } + ], + "owner_id": "123456789012", + "tags": {}, + "vpc_id": "vpc-0bc3bb03f97405435" + } + ] +''' + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def main(): + argument_spec = dict( + filters=dict(default={}, type='dict') + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + connection = module.client('ec2', AWSRetry.jittered_backoff()) + + # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags + filters = module.params.get("filters") + sanitized_filters = dict() + + for key in filters: + if key.startswith("tag:"): + sanitized_filters[key] = filters[key] + else: + sanitized_filters[key.replace("_", "-")] = filters[key] + + try: + security_groups = connection.describe_security_groups( + aws_retry=True, + Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Failed to describe security groups') + + snaked_security_groups = [] + for security_group in security_groups['SecurityGroups']: + # Modify boto3 tags list to be ansible friendly dict + # but don't camel case tags + security_group = camel_dict_to_snake_dict(security_group) + security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', {}), tag_name_key_name='key', tag_value_key_name='value') + snaked_security_groups.append(security_group) + + module.exit_json(security_groups=snaked_security_groups) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py b/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py new file mode 100644 index 000000000..62952cf32 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py @@ -0,0 +1,421 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_snapshot +version_added: 1.0.0 +short_description: Creates a snapshot from an existing volume +description: + - Creates an EC2 snapshot from an existing EBS volume. +options: + volume_id: + description: + - Volume from which to take the snapshot. + required: false + type: str + description: + description: + - Description to be applied to the snapshot. + required: false + type: str + instance_id: + description: + - Instance that has the required volume to snapshot mounted. + required: false + type: str + device_name: + description: + - Device name of a mounted volume to be snapshotted. + required: false + type: str + snapshot_tags: + description: + - A dictionary of tags to add to the snapshot. + - If the volume has a C(Name) tag this will be automatically added to the + snapshot. + type: dict + required: false + default: {} + wait: + description: + - Wait for the snapshot to be ready. + type: bool + required: false + default: true + wait_timeout: + description: + - How long before wait gives up, in seconds. + required: false + default: 600 + type: int + state: + description: + - Whether to add or create a snapshot. + required: false + default: present + choices: ['absent', 'present'] + type: str + snapshot_id: + description: + - Snapshot id to remove. + required: false + type: str + last_snapshot_min_age: + description: + - If the volume's most recent snapshot has started less than I(last_snapshot_min_age) minutes ago, a new snapshot will not be created. + required: false + default: 0 + type: int +author: "Will Thames (@willthames)" +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Simple snapshot of volume using volume_id +- amazon.aws.ec2_snapshot: + volume_id: vol-abcdef12 + description: snapshot of /data from DB123 taken 2013/11/28 12:18:32 + +# Snapshot of volume mounted on device_name attached to instance_id +- amazon.aws.ec2_snapshot: + instance_id: i-12345678 + device_name: /dev/sdb1 + description: snapshot of /data from DB123 taken 2013/11/28 12:18:32 + +# Snapshot of volume with tagging +- amazon.aws.ec2_snapshot: + instance_id: i-12345678 + device_name: /dev/sdb1 + snapshot_tags: + frequency: hourly + source: /data + +# Remove a snapshot +- amazon.aws.ec2_snapshot: + snapshot_id: snap-abcd1234 + state: absent + +# Create a snapshot only if the most recent one is older than 1 hour +- amazon.aws.ec2_snapshot: + volume_id: vol-abcdef12 + last_snapshot_min_age: 60 +''' + +RETURN = ''' +snapshot_id: + description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created. + type: str + returned: always + sample: snap-01234567 +tags: + description: Any tags assigned to the snapshot. + type: dict + returned: always + sample: "{ 'Name': 'instance-name' }" +volume_id: + description: The ID of the volume that was used to create the snapshot. + type: str + returned: always + sample: vol-01234567 +volume_size: + description: The size of the volume, in GiB. + type: int + returned: always + sample: 8 +''' + +import datetime + +try: + import botocore +except ImportError: + pass # Taken care of by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + + +def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None): + """ + Gets the most recently created snapshot and optionally filters the result + if the snapshot is too old + :param snapshots: list of snapshots to search + :param max_snapshot_age_secs: filter the result if its older than this + :param now: simulate time -- used for unit testing + :return: + """ + if len(snapshots) == 0: + return None + + if not now: + now = datetime.datetime.now(datetime.timezone.utc) + + youngest_snapshot = max(snapshots, key=lambda s: s['StartTime']) + snapshot_start = youngest_snapshot['StartTime'] + snapshot_age = now - snapshot_start + + if max_snapshot_age_secs is not None: + if snapshot_age.total_seconds() > max_snapshot_age_secs: + return None + + return youngest_snapshot + + +def get_volume_by_instance(module, ec2, device_name, instance_id): + try: + _filter = { + 'attachment.instance-id': instance_id, + 'attachment.device': device_name + } + volumes = ec2.describe_volumes( + aws_retry=True, + Filters=ansible_dict_to_boto3_filter_list(_filter) + )['Volumes'] + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to describe Volume") + + if not volumes: + module.fail_json( + msg="Could not find volume with name {0} attached to instance {1}".format( + device_name, instance_id + ) + ) + + volume = volumes[0] + return volume + + +def get_volume_by_id(module, ec2, volume): + try: + volumes = ec2.describe_volumes( + aws_retry=True, + VolumeIds=[volume], + )['Volumes'] + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to describe Volume") + + if not volumes: + module.fail_json( + msg="Could not find volume with id {0}".format(volume) + ) + + volume = volumes[0] + return volume + + +@AWSRetry.jittered_backoff() +def _describe_snapshots(ec2, **params): + paginator = ec2.get_paginator('describe_snapshots') + return paginator.paginate(**params).build_full_result() + + +# Handle SnapshotCreationPerVolumeRateExceeded separately because we need a much +# longer delay than normal +@AWSRetry.jittered_backoff(catch_extra_error_codes=['SnapshotCreationPerVolumeRateExceeded'], delay=15) +def _create_snapshot(ec2, **params): + # Fast retry on common failures ('global' rate limits) + return ec2.create_snapshot(aws_retry=True, **params) + + +def get_snapshots_by_volume(module, ec2, volume_id): + _filter = {'volume-id': volume_id} + try: + results = _describe_snapshots( + ec2, + Filters=ansible_dict_to_boto3_filter_list(_filter) + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to describe snapshots from volume") + + return results['Snapshots'] + + +def create_snapshot(module, ec2, description=None, wait=None, + wait_timeout=None, volume_id=None, instance_id=None, + snapshot_id=None, device_name=None, snapshot_tags=None, + last_snapshot_min_age=None): + snapshot = None + changed = False + + if instance_id: + volume = get_volume_by_instance( + module, ec2, device_name, instance_id + ) + volume_id = volume['VolumeId'] + else: + volume = get_volume_by_id(module, ec2, volume_id) + if 'Tags' not in volume: + volume['Tags'] = {} + if last_snapshot_min_age > 0: + current_snapshots = get_snapshots_by_volume(module, ec2, volume_id) + last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds + snapshot = _get_most_recent_snapshot( + current_snapshots, + max_snapshot_age_secs=last_snapshot_min_age + ) + # Create a new snapshot if we didn't find an existing one to use + if snapshot is None: + volume_tags = boto3_tag_list_to_ansible_dict(volume['Tags']) + volume_name = volume_tags.get('Name') + _tags = dict() + if volume_name: + _tags['Name'] = volume_name + if snapshot_tags: + _tags.update(snapshot_tags) + + params = {'VolumeId': volume_id} + if description: + params['Description'] = description + if _tags: + params['TagSpecifications'] = [{ + 'ResourceType': 'snapshot', + 'Tags': ansible_dict_to_boto3_tag_list(_tags), + }] + try: + if module.check_mode: + module.exit_json(changed=True, msg='Would have created a snapshot if not in check mode', + volume_id=volume['VolumeId'], volume_size=volume['Size']) + snapshot = _create_snapshot(ec2, **params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to create snapshot") + changed = True + if wait: + waiter = get_waiter(ec2, 'snapshot_completed') + try: + waiter.wait( + SnapshotIds=[snapshot['SnapshotId']], + WaiterConfig=dict(Delay=3, MaxAttempts=wait_timeout // 3) + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg='Timed out while creating snapshot') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws( + e, msg='Error while waiting for snapshot creation' + ) + + _tags = boto3_tag_list_to_ansible_dict(snapshot['Tags']) + _snapshot = camel_dict_to_snake_dict(snapshot) + _snapshot['tags'] = _tags + results = { + 'snapshot_id': snapshot['SnapshotId'], + 'volume_id': snapshot['VolumeId'], + 'volume_size': snapshot['VolumeSize'], + 'tags': _tags, + 'snapshots': [_snapshot], + } + + module.exit_json(changed=changed, **results) + + +def delete_snapshot(module, ec2, snapshot_id): + if module.check_mode: + try: + _describe_snapshots(ec2, SnapshotIds=[(snapshot_id)]) + module.exit_json(changed=True, msg='Would have deleted snapshot if not in check mode') + except is_boto3_error_code('InvalidSnapshot.NotFound'): + module.exit_json(changed=False, msg='Invalid snapshot ID - snapshot not found') + try: + ec2.delete_snapshot(aws_retry=True, SnapshotId=snapshot_id) + except is_boto3_error_code('InvalidSnapshot.NotFound'): + module.exit_json(changed=False) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to delete snapshot") + + # successful delete + module.exit_json(changed=True) + + +def create_snapshot_ansible_module(): + argument_spec = dict( + volume_id=dict(), + description=dict(), + instance_id=dict(), + snapshot_id=dict(), + device_name=dict(), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + last_snapshot_min_age=dict(type='int', default=0), + snapshot_tags=dict(type='dict', default=dict()), + state=dict(choices=['absent', 'present'], default='present'), + ) + mutually_exclusive = [ + ('instance_id', 'snapshot_id', 'volume_id'), + ] + required_if = [ + ('state', 'absent', ('snapshot_id',)), + ] + required_one_of = [ + ('instance_id', 'snapshot_id', 'volume_id'), + ] + required_together = [ + ('instance_id', 'device_name'), + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + required_one_of=required_one_of, + required_together=required_together, + supports_check_mode=True, + ) + + return module + + +def main(): + module = create_snapshot_ansible_module() + + volume_id = module.params.get('volume_id') + snapshot_id = module.params.get('snapshot_id') + description = module.params.get('description') + instance_id = module.params.get('instance_id') + device_name = module.params.get('device_name') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + last_snapshot_min_age = module.params.get('last_snapshot_min_age') + snapshot_tags = module.params.get('snapshot_tags') + state = module.params.get('state') + + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + + if state == 'absent': + delete_snapshot( + module=module, + ec2=ec2, + snapshot_id=snapshot_id, + ) + else: + create_snapshot( + module=module, + description=description, + wait=wait, + wait_timeout=wait_timeout, + ec2=ec2, + volume_id=volume_id, + instance_id=instance_id, + snapshot_id=snapshot_id, + device_name=device_name, + snapshot_tags=snapshot_tags, + last_snapshot_min_age=last_snapshot_min_age, + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py new file mode 100644 index 000000000..2b7b51158 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: ec2_snapshot_info +version_added: 1.0.0 +short_description: Gathers information about EC2 volume snapshots in AWS +description: + - Gathers information about EC2 volume snapshots in AWS. +author: + - Rob White (@wimnat) + - Aubin Bikouo (@abikouo) +options: + snapshot_ids: + description: + - If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. + required: false + default: [] + type: list + elements: str + owner_ids: + description: + - If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have + access are returned. + required: false + default: [] + type: list + elements: str + restorable_by_user_ids: + description: + - If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are + returned. + required: false + default: [] + type: list + elements: str + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) for possible filters. Filter + names and values are case sensitive. + required: false + type: dict + default: {} + max_results: + description: + - The maximum number of snapshot results returned in paginated output. + - When used only a single page along with a C(next_token_id) response element will be returned. + - The remaining results of the initial request can be seen by sending another request with the returned C(next_token_id) value. + - This value can be between 5 and 1000; if I(next_token_id) is given a value larger than 1000, only 1000 results are returned. + - If this parameter is not used, then DescribeSnapshots returns all results. + - This parameter is mutually exclusive with I(snapshot_ids). + required: False + type: int + next_token_id: + description: + - Contains the value returned from a previous paginated request where I(max_results) was used and the results exceeded the value of that parameter. + - Pagination continues from the end of the previous results that returned the I(next_token_id) value. + - This parameter is mutually exclusive with I(snapshot_ids) + required: false + type: str +notes: + - By default, the module will return all snapshots, including public ones. To limit results to snapshots owned by + the account use the filter 'owner-id'. + +extends_documentation_fragment: + - amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.boto3 +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all snapshots, including public ones +- amazon.aws.ec2_snapshot_info: + +# Gather information about all snapshots owned by the account 123456789012 +- amazon.aws.ec2_snapshot_info: + filters: + owner-id: 123456789012 + +# Or alternatively... +- amazon.aws.ec2_snapshot_info: + owner_ids: + - 123456789012 + +# Gather information about a particular snapshot using ID +- amazon.aws.ec2_snapshot_info: + filters: + snapshot-id: snap-00112233 + +# Or alternatively... +- amazon.aws.ec2_snapshot_info: + snapshot_ids: + - snap-00112233 + +# Gather information about any snapshot with a tag key Name and value Example +- amazon.aws.ec2_snapshot_info: + filters: + "tag:Name": Example + +# Gather information about any snapshot with an error status +- amazon.aws.ec2_snapshot_info: + filters: + status: error + +''' + +RETURN = r''' +snapshots: + description: List of snapshots retrieved with their respective info. + type: list + returned: success + elements: dict + contains: + snapshot_id: + description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created. + type: str + returned: always + sample: snap-01234567 + volume_id: + description: The ID of the volume that was used to create the snapshot. + type: str + returned: always + sample: vol-01234567 + state: + description: The snapshot state (completed, pending or error). + type: str + returned: always + sample: completed + state_message: + description: + - Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper + AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the + error occurred. + type: str + returned: always + sample: + start_time: + description: The time stamp when the snapshot was initiated. + type: str + returned: always + sample: "2015-02-12T02:14:02+00:00" + progress: + description: The progress of the snapshot, as a percentage. + type: str + returned: always + sample: "100%" + owner_id: + description: The AWS account ID of the EBS snapshot owner. + type: str + returned: always + sample: "123456789012" + description: + description: The description for the snapshot. + type: str + returned: always + sample: "My important backup" + volume_size: + description: The size of the volume, in GiB. + type: int + returned: always + sample: 8 + owner_alias: + description: The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot. + type: str + returned: always + sample: "123456789012" + tags: + description: Any tags assigned to the snapshot. + type: dict + returned: always + sample: "{ 'my_tag_key': 'my_tag_value' }" + encrypted: + description: Indicates whether the snapshot is encrypted. + type: bool + returned: always + sample: "True" + kms_key_id: + description: + - The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to + protect the volume encryption key for the parent volume. + type: str + returned: always + sample: "74c9742a-a1b2-45cb-b3fe-abcdef123456" + data_encryption_key_id: + description: + - The data encryption key identifier for the snapshot. This value is a unique identifier that + corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy. + type: str + returned: always + sample: "arn:aws:kms:ap-southeast-2:123456789012:key/74c9742a-a1b2-45cb-b3fe-abcdef123456" +next_token_id: + description: + - Contains the value returned from a previous paginated request where C(max_results) was used and the results exceeded the value of that parameter. + - This value is null when there are no more results to return. + type: str + returned: when option C(max_results) is set in input +''' + +try: + from botocore.exceptions import ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def list_ec2_snapshots(connection, module): + + snapshot_ids = module.params.get("snapshot_ids") + owner_ids = [str(owner_id) for owner_id in module.params.get("owner_ids")] + restorable_by_user_ids = [str(user_id) for user_id in module.params.get("restorable_by_user_ids")] + filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + max_results = module.params.get('max_results') + next_token = module.params.get('next_token_id') + optional_param = {} + if max_results: + optional_param['MaxResults'] = max_results + if next_token: + optional_param['NextToken'] = next_token + + try: + snapshots = connection.describe_snapshots( + aws_retry=True, + SnapshotIds=snapshot_ids, OwnerIds=owner_ids, + RestorableByUserIds=restorable_by_user_ids, Filters=filters, + **optional_param) + except is_boto3_error_code('InvalidSnapshot.NotFound') as e: + if len(snapshot_ids) > 1: + module.warn("Some of your snapshots may exist, but %s" % str(e)) + snapshots = {'Snapshots': []} + except ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to describe snapshots') + + result = {} + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_snapshots = [] + for snapshot in snapshots['Snapshots']: + snaked_snapshots.append(camel_dict_to_snake_dict(snapshot)) + + # Turn the boto3 result in to ansible friendly tag dictionary + for snapshot in snaked_snapshots: + if 'tags' in snapshot: + snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'], 'key', 'value') + + result['snapshots'] = snaked_snapshots + + if snapshots.get('NextToken'): + result.update(camel_dict_to_snake_dict({'NextTokenId': snapshots.get('NextToken')})) + + module.exit_json(**result) + + +def main(): + + argument_spec = dict( + snapshot_ids=dict(default=[], type='list', elements='str'), + owner_ids=dict(default=[], type='list', elements='str'), + restorable_by_user_ids=dict(default=[], type='list', elements='str'), + filters=dict(default={}, type='dict'), + max_results=dict(type='int'), + next_token_id=dict(type='str') + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters'], + ['snapshot_ids', 'max_results'], + ['snapshot_ids', 'next_token_id'] + ], + supports_check_mode=True + ) + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + list_ec2_snapshots(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance.py b/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance.py new file mode 100644 index 000000000..a5d8f2ca8 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance.py @@ -0,0 +1,650 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_spot_instance +version_added: 2.0.0 +short_description: Request, stop, reboot or cancel spot instance +description: + - Creates or cancels spot instance requests. +author: + - Sri Rachana Achyuthuni (@srirachanaachyuthuni) +options: + zone_group: + description: + - Name for logical grouping of spot requests. + - All spot instances in the request are launched in the same availability zone. + type: str + client_token: + description: The idempotency token you provided when you launched the instance, if applicable. + type: str + count: + description: + - Number of instances to launch. + default: 1 + type: int + interruption: + description: + - The behavior when a Spot Instance is interrupted. + choices: [ "hibernate", "stop", "terminate" ] + type: str + default: terminate + launch_group: + description: + - Launch group for spot requests, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group). + type: str + launch_specification: + description: + - The launch specification. + type: dict + suboptions: + security_group_ids: + description: + - Security group id (or list of ids) to use with the instance. + type: list + elements: str + security_groups: + description: + - Security group name (or list of group names) to use with the instance. + - Only supported with EC2 Classic. To launch in a VPC, use C(group_id) + type: list + elements: str + key_name: + description: + - Key to use on the instance. + - The SSH key must already exist in AWS in order to use this argument. + - Keys can be created / deleted using the M(amazon.aws.ec2_key) module. + type: str + subnet_id: + description: + - The ID of the subnet in which to launch the instance. + type: str + user_data: + description: + - The base64-encoded user data for the instance. User data is limited to 16 KB. + type: str + block_device_mappings: + description: + - A list of hash/dictionaries of volumes to add to the new instance. + type: list + elements: dict + suboptions: + device_name: + description: + - The device name (for example, /dev/sdh or xvdh ). + type: str + virtual_name: + description: + - The virtual device name + type: str + ebs: + description: + - Parameters used to automatically set up EBS volumes when the instance is launched, + see U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.request_spot_instances) + type: dict + no_device: + description: + - To omit the device from the block device mapping, specify an empty string. + type: str + ebs_optimized: + description: + - Whether instance is using optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html). + default: false + type: bool + iam_instance_profile: + description: + - The IAM instance profile. + type: dict + suboptions: + arn: + description: + - The Amazon Resource Name (ARN) of the instance profile. + - Only one of I(arn) or I(name) may be specified. + type: str + name: + description: + - The name of the instance profile. + - Only one of I(arn) or I(name) may be specified. + type: str + image_id: + description: + - The ID of the AMI. + type: str + instance_type: + description: + - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + - Required when creating a new instance. + type: str + kernel_id: + description: + - The ID of the kernel. + type: str + network_interfaces: + description: + - One or more network interfaces. If you specify a network interface, you must specify subnet IDs and security group IDs using the network interface. + type: list + elements: dict + default: [] + suboptions: + associate_public_ip_address: + description: + - Indicates whether to assign a public IPv4 address to an instance you launch in a VPC. + type: bool + delete_on_termination: + description: + - If set to true , the interface is deleted when the instance is terminated. + You can specify true only if creating a new network interface when launching an instance. + type: bool + description: + description: + - The description of the network interface. Applies only if creating a network interface when launching an instance. + type: str + device_index: + description: + - The position of the network interface in the attachment order. A primary network interface has a device index of 0. + - If you specify a network interface when launching an instance, you must specify the device index. + type: int + groups: + description: + - The IDs of the security groups for the network interface. Applies only if creating a network interface when launching an instance. + type: list + elements: str + ipv6_address_count: + description: + - A number of IPv6 addresses to assign to the network interface + type: int + ipv6_addresses: + description: + - One or more IPv6 addresses to assign to the network interface. + type: list + elements: dict + suboptions: + ipv6address: + description: The IPv6 address. + type: str + network_interface_id: + description: + - The ID of the network interface. + type: str + private_ip_address: + description: + - The private IPv4 address of the network interface + type: str + private_ip_addresses: + description: + - One or more private IPv4 addresses to assign to the network interface + type: list + elements: dict + secondary_private_ip_address_count: + description: + - The number of secondary private IPv4 addresses. + type: int + subnet_id: + description: + - The ID of the subnet associated with the network interface + type: str + associate_carrier_ip_address: + description: + - Indicates whether to assign a carrier IP address to the network interface. + type: bool + interface_type: + description: + - The type of network interface. + type: str + choices: ['interface', 'efa'] + network_card_index: + description: + - The index of the network card. + type: int + ipv4_prefixes: + description: + - One or more IPv4 delegated prefixes to be assigned to the network interface. + type: list + elements: dict + ipv4_prefix_count: + description: + - The number of IPv4 delegated prefixes to be automatically assigned to the network interface + type: int + ipv6_prefixes: + description: + - One or more IPv6 delegated prefixes to be assigned to the network interface + type: list + elements: dict + ipv6_prefix_count: + description: + - The number of IPv6 delegated prefixes to be automatically assigned to the network interface + type: int + placement: + description: + - The placement information for the instance. + type: dict + suboptions: + availability_zone: + description: + - The Availability Zone. + type: str + group_name: + description: + - The name of the placement group. + type: str + tenancy: + description: + - the tenancy of the host + type: str + choices: ['default', 'dedicated', 'host'] + default: default + ramdisk_id: + description: + - The ID of the RAM disk. + type: str + monitoring: + description: + - Indicates whether basic or detailed monitoring is enabled for the instance. + type: dict + suboptions: + enabled: + description: + - Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled. + type: bool + default: false + state: + description: + - Whether the spot request should be created or removed. + - When I(state=present), I(launch_specification) is required. + - When I(state=absent), I(spot_instance_request_ids) is required. + default: 'present' + choices: [ 'absent', 'present' ] + type: str + spot_price: + description: + - Maximum spot price to bid. If not set, a regular on-demand instance is requested. + - A spot request is made with this maximum bid. When it is filled, the instance is started. + type: str + spot_type: + description: + - The type of spot request. + - After being interrupted a C(persistent) spot instance will be started once there is capacity to fill the request again. + default: 'one-time' + choices: [ "one-time", "persistent" ] + type: str + tags: + description: + - A dictionary of key-value pairs for tagging the Spot Instance request on creation. + type: dict + spot_instance_request_ids: + description: + - List of strings with IDs of spot requests to be cancelled + type: list + elements: str + terminate_instances: + description: + - Boolean value to set whether or not to terminate instances associated to spot request. + - Can be used only when I(state=absent). + default: False + type: bool + version_added: 5.4.0 +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Simple Spot Request Creation + amazon.aws.ec2_spot_instance: + launch_specification: + image_id: ami-123456789 + key_name: my-keypair + instance_type: t2.medium + +- name: Spot Request Creation with more options + amazon.aws.ec2_spot_instance: + launch_specification: + image_id: ami-123456789 + key_name: my-keypair + instance_type: t2.medium + subnet_id: subnet-12345678 + block_device_mappings: + - device_name: /dev/sdb + ebs: + delete_on_termination: True + volume_type: gp3 + volume_size: 5 + - device_name: /dev/sdc + ebs: + delete_on_termination: True + volume_type: io2 + volume_size: 30 + network_interfaces: + - associate_public_ip_address: False + delete_on_termination: True + device_index: 0 + placement: + availability_zone: us-west-2a + monitoring: + enabled: False + spot_price: 0.002 + tags: + Environment: Testing + +- name: Spot Request Termination + amazon.aws.ec2_spot_instance: + spot_instance_request_ids: ['sir-12345678', 'sir-abcdefgh'] + state: absent +''' + +RETURN = ''' +spot_request: + description: The spot instance request details after creation + returned: when success + type: dict + sample: { + "create_time": "2021-08-23T22:59:12+00:00", + "instance_interruption_behavior": "terminate", + "launch_specification": { + "block_device_mappings": [ + { + "device_name": "/dev/sdb", + "ebs": { + "delete_on_termination": true, + "volume_size": 5, + "volume_type": "gp3" + } + } + ], + "ebs_optimized": false, + "iam_instance_profile": { + "arn": "arn:aws:iam::EXAMPLE:instance-profile/myinstanceprofile" + }, + "image_id": "ami-083ac7c7ecf9bb9b0", + "instance_type": "t2.small", + "key_name": "mykey", + "monitoring": { + "enabled": false + }, + "network_interfaces": [ + { + "associate_public_ip_address": false, + "delete_on_termination": true, + "device_index": 0 + } + ], + "placement": { + "availability_zone": "us-west-2a", + "tenancy": "default" + }, + "security_groups": [ + { + "group_name": "default" + } + ] + }, + "product_description": "Linux/UNIX", + "spot_instance_request_id": "sir-1234abcd", + "spot_price": "0.00600", + "state": "open", + "status": { + "code": "pending-evaluation", + "message": "Your Spot request has been submitted for review, and is pending evaluation.", + "update_time": "2021-08-23T22:59:12+00:00" + }, + "type": "one-time" + + } + +cancelled_spot_request: + description: The spot instance request details that has been cancelled + returned: always + type: str + sample: 'Spot requests with IDs: sir-1234abcd have been cancelled' +''' +# TODO: add support for datetime-based parameters +# import datetime +# import time + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + + +def build_launch_specification(launch_spec): + """ + Remove keys that have a value of None from Launch Specification + Descend into these subkeys: + network_interfaces + block_device_mappings + monitoring + placement + iam_instance_profile + """ + assigned_keys = dict((k, v) for k, v in launch_spec.items() if v is not None) + + sub_key_to_build = ['placement', 'iam_instance_profile', 'monitoring'] + for subkey in sub_key_to_build: + if launch_spec[subkey] is not None: + assigned_keys[subkey] = dict((k, v) for k, v in launch_spec[subkey].items() if v is not None) + + if launch_spec['network_interfaces'] is not None: + interfaces = [] + for iface in launch_spec['network_interfaces']: + interfaces.append(dict((k, v) for k, v in iface.items() if v is not None)) + assigned_keys['network_interfaces'] = interfaces + + if launch_spec['block_device_mappings'] is not None: + block_devs = [] + for dev in launch_spec['block_device_mappings']: + block_devs.append( + dict((k, v) for k, v in dev.items() if v is not None)) + assigned_keys['block_device_mappings'] = block_devs + + return snake_dict_to_camel_dict(assigned_keys, capitalize_first=True) + + +def request_spot_instances(module, connection): + + # connection.request_spot_instances() always creates a new spot request + changed = True + + if module.check_mode: + module.exit_json(changed=changed) + + params = {} + + if module.params.get('launch_specification'): + params['LaunchSpecification'] = build_launch_specification(module.params.get('launch_specification')) + + if module.params.get('zone_group'): + params['AvailabilityZoneGroup'] = module.params.get('zone_group') + + if module.params.get('count'): + params['InstanceCount'] = module.params.get('count') + + if module.params.get('launch_group'): + params['LaunchGroup'] = module.params.get('launch_group') + + if module.params.get('spot_price'): + params['SpotPrice'] = module.params.get('spot_price') + + if module.params.get('spot_type'): + params['Type'] = module.params.get('spot_type') + + if module.params.get('client_token'): + params['ClientToken'] = module.params.get('client_token') + + if module.params.get('interruption'): + params['InstanceInterruptionBehavior'] = module.params.get('interruption') + + if module.params.get('tags'): + params['TagSpecifications'] = [{ + 'ResourceType': 'spot-instances-request', + 'Tags': ansible_dict_to_boto3_tag_list(module.params.get('tags')), + }] + + # TODO: add support for datetime-based parameters + # params['ValidFrom'] = module.params.get('valid_from') + # params['ValidUntil'] = module.params.get('valid_until') + + try: + request_spot_instance_response = (connection.request_spot_instances(aws_retry=True, **params))['SpotInstanceRequests'][0] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Error while creating the spot instance request') + + request_spot_instance_response['Tags'] = boto3_tag_list_to_ansible_dict(request_spot_instance_response.get('Tags', [])) + spot_request = camel_dict_to_snake_dict(request_spot_instance_response, ignore_list=['Tags']) + module.exit_json(spot_request=spot_request, changed=changed) + + +def cancel_spot_instance_requests(module, connection): + + changed = False + spot_instance_request_ids = module.params.get('spot_instance_request_ids') + requests_exist = dict() + try: + paginator = connection.get_paginator('describe_spot_instance_requests').paginate(SpotInstanceRequestIds=spot_instance_request_ids, + Filters=[{'Name': 'state', 'Values': ['open', 'active']}]) + jittered_retry = AWSRetry.jittered_backoff() + requests_exist = jittered_retry(paginator.build_full_result)() + except is_boto3_error_code('InvalidSpotInstanceRequestID.NotFound'): + requests_exist['SpotInstanceRequests'] = [] + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failure when describing spot requests") + + try: + if len(requests_exist['SpotInstanceRequests']) > 0: + changed = True + if module.check_mode: + module.exit_json(changed=changed, + msg='Would have cancelled Spot request {0}'.format(spot_instance_request_ids)) + + connection.cancel_spot_instance_requests(aws_retry=True, SpotInstanceRequestIds=module.params.get('spot_instance_request_ids')) + + if module.params.get("terminate_instances") is True: + associated_instances = [request["InstanceId"] for request in requests_exist["SpotInstanceRequests"]] + terminate_associated_instances(connection, module, associated_instances) + + module.exit_json(changed=changed, msg='Cancelled Spot request {0}'.format(module.params.get('spot_instance_request_ids'))) + else: + module.exit_json(changed=changed, msg='Spot request not found or already cancelled') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Error while cancelling the spot instance request') + + +def terminate_associated_instances(connection, module, instance_ids): + try: + connection.terminate_instances(aws_retry=True, InstanceIds=instance_ids) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(e, msg="Unable to terminate instances") + + +def main(): + network_interface_options = dict( + associate_public_ip_address=dict(type='bool'), + delete_on_termination=dict(type='bool'), + description=dict(type='str'), + device_index=dict(type='int'), + groups=dict(type='list', elements='str'), + ipv6_address_count=dict(type='int'), + ipv6_addresses=dict(type='list', elements='dict', options=dict(ipv6address=dict(type='str'))), + network_interface_id=dict(type='str'), + private_ip_address=dict(type='str'), + private_ip_addresses=dict(type='list', elements='dict'), + secondary_private_ip_address_count=dict(type='int'), + subnet_id=dict(type='str'), + associate_carrier_ip_address=dict(type='bool'), + interface_type=dict(type='str', choices=['interface', 'efa']), + network_card_index=dict(type='int'), + ipv4_prefixes=dict(type='list', elements='dict'), + ipv4_prefix_count=dict(type='int'), + ipv6_prefixes=dict(type='list', elements='dict'), + ipv6_prefix_count=dict(type='int') + ) + block_device_mappings_options = dict( + device_name=dict(type='str'), + virtual_name=dict(type='str'), + ebs=dict(type='dict'), + no_device=dict(type='str'), + ) + monitoring_options = dict( + enabled=dict(type='bool', default=False) + ) + placement_options = dict( + availability_zone=dict(type='str'), + group_name=dict(type='str'), + tenancy=dict(type='str', choices=['default', 'dedicated', 'host'], default='default') + ) + iam_instance_profile_options = dict( + arn=dict(type='str'), + name=dict(type='str') + ) + launch_specification_options = dict( + security_group_ids=dict(type='list', elements='str'), + security_groups=dict(type='list', elements='str'), + block_device_mappings=dict(type='list', elements='dict', options=block_device_mappings_options), + ebs_optimized=dict(type='bool', default=False), + iam_instance_profile=dict(type='dict', options=iam_instance_profile_options), + image_id=dict(type='str'), + instance_type=dict(type='str'), + kernel_id=dict(type='str'), + key_name=dict(type='str'), + monitoring=dict(type='dict', options=monitoring_options), + network_interfaces=dict(type='list', elements='dict', options=network_interface_options, default=[]), + placement=dict(type='dict', options=placement_options), + ramdisk_id=dict(type='str'), + user_data=dict(type='str'), + subnet_id=dict(type='str') + ) + + argument_spec = dict( + zone_group=dict(type='str'), + client_token=dict(type='str', no_log=False), + count=dict(type='int', default=1), + interruption=dict(type='str', default="terminate", choices=['hibernate', 'stop', 'terminate']), + launch_group=dict(type='str'), + launch_specification=dict(type='dict', options=launch_specification_options), + state=dict(default='present', choices=['present', 'absent']), + spot_price=dict(type='str'), + spot_type=dict(default='one-time', choices=["one-time", "persistent"]), + tags=dict(type='dict'), + # valid_from=dict(type='datetime', default=datetime.datetime.now()), + # valid_until=dict(type='datetime', default=(datetime.datetime.now() + datetime.timedelta(minutes=60)) + spot_instance_request_ids=dict(type="list", elements="str"), + terminate_instances=dict(type="bool", default="False"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + state = module.params["state"] + + if module.params.get("terminate_instances") and state != "absent": + module.fail_json("terminate_instances can only be used when state is absent.") + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + if state == 'present': + request_spot_instances(module, connection) + + if state == 'absent': + cancel_spot_instance_requests(module, connection) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance_info.py new file mode 100644 index 000000000..599db778b --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance_info.py @@ -0,0 +1,301 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://wwww.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_spot_instance_info +version_added: 2.0.0 +short_description: Gather information about ec2 spot instance requests +description: + - Describes the specified Spot Instance requests. +author: + - Mandar Vijay Kulkarni (@mandar242) +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + - Filter names and values are case sensitive. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSpotInstanceRequests.html) for possible filters. + required: false + default: {} + type: dict + spot_instance_request_ids: + description: + - One or more Spot Instance request IDs. + required: false + type: list + elements: str + default: [] + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: describe the Spot Instance requests based on request IDs + amazon.aws.ec2_spot_instance_info: + spot_instance_request_ids: + - sir-12345678 + +- name: describe the Spot Instance requests and filter results based on instance type + amazon.aws.ec2_spot_instance_info: + spot_instance_request_ids: + - sir-12345678 + - sir-13579246 + - sir-87654321 + filters: + launch.instance-type: t3.medium + +- name: describe the Spot requests filtered using multiple filters + amazon.aws.ec2_spot_instance_info: + filters: + state: active + launch.block-device-mapping.device-name: /dev/sdb + +''' + +RETURN = ''' +spot_request: + description: The gathered information about specified spot instance requests. + returned: when success + type: list + elements: dict + contains: + create_time: + description: The date and time when the Spot Instance request was created. + returned: always + type: str + instance_id: + description: The instance ID, if an instance has been launched to fulfill the Spot Instance request. + returned: when instance exists + type: str + instance_interruption_behavior: + description: The behavior when a Spot Instance is interruped. + returned: always + type: str + launch_specification: + description: Additional information for launching instances. + returned: always + type: dict + contains: + ebs_optimized: + description: Indicates whether the instance is optimized for EBS I/O. + returned: always + type: bool + image_id: + description: The ID of the AMI. + returned: always + type: str + instance_type: + description: The instance type. + returned: always + type: str + key_name: + description: The name of the key pair. + returned: always + type: str + monitoring: + description: Described the monitoring of an instance. + returned: always + type: dict + contains: + enabled: + description: Indicated whether detailed monitoring is enabled. + returned: always + type: bool + placement: + description: The placement information for the instance. + returned: always + type: dict + contains: + availability_zone: + description: The name of the availability zone. + returned: always + type: str + security_groups: + description: List of security groups. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + group_name: + description: The name of the security group. + returned: always + type: str + subnet_id: + description: The ID of the subnet. + returned: when creating a network interface when launching an instance + type: str + launched_availability_zone: + description: The availability zone in which the request is launched. + returned: always + type: str + product_description: + description: The product description associated with the Spot Instance. + returned: always + type: str + spot_instance_request_id: + description: The ID of the Spot Instance request. + returned: always + type: str + spot_price: + description: The maximum price per hour that you are willing to pay for a Spot Instance. + returned: always + type: str + state: + description: The state of the Spot Instance request. + returned: always + type: str + status: + description: Extra information about the status of the Spot Instance request. + returned: always + type: dict + contains: + code: + description: + - The status code. + - See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-request-status.html#spot-instance-request-status-understand for codes. + returned: always + type: str + message: + description: The description of the status code. + returned: always + type: str + update_time: + description: The date and time of the most recent status update in UTC format. + returned: always + type: str + tags: + description: List of tags associated with the resource. + returned: always + type: list + elements: dict + contains: + key: + description: The key of the tag. + returned: always + type: str + value: + description: The value of the tag. + returned: always + type: str + type: + description: The Spot Instance request type. + returned: always + type: str + valid_until: + description: The end date of the request in UTC format. + returned: always + type: str + sample: { + "create_time": "2021-09-01T21:05:57+00:00", + "instance_id": "i-08877936b801ac475", + "instance_interruption_behavior": "terminate", + "launch_specification": { + "ebs_optimized": false, + "image_id": "ami-0443305dabd4be2bc", + "instance_type": "t2.medium", + "key_name": "zuul", + "monitoring": { + "enabled": false + }, + "placement": { + "availability_zone": "us-east-2b" + }, + "security_groups": [ + { + "group_id": "sg-01f9833207d53b937", + "group_name": "default" + } + ], + "subnet_id": "subnet-07d906b8358869bda" + }, + "launched_availability_zone": "us-east-2b", + "product_description": "Linux/UNIX", + "spot_instance_request_id": "sir-c3cp9jsk", + "spot_price": "0.046400", + "state": "active", + "status": { + "code": "fulfilled", + "message": "Your spot request is fulfilled.", + "update_time": "2021-09-01T21:05:59+00:00" + }, + "tags": {}, + "type": "one-time", + "valid_until": "2021-09-08T21:05:57+00:00" + } +''' + + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list + + +def _describe_spot_instance_requests(connection, **params): + paginator = connection.get_paginator('describe_spot_instance_requests') + return paginator.paginate(**params).build_full_result() + + +def describe_spot_instance_requests(connection, module): + + params = {} + + if module.params.get('filters'): + params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + if module.params.get('spot_instance_request_ids'): + params['SpotInstanceRequestIds'] = module.params.get('spot_instance_request_ids') + + try: + describe_spot_instance_requests_response = _describe_spot_instance_requests(connection, **params)['SpotInstanceRequests'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to describe spot instance requests') + + spot_request = [] + for response_list_item in describe_spot_instance_requests_response: + spot_request.append(camel_dict_to_snake_dict(response_list_item)) + + if len(spot_request) == 0: + module.exit_json(msg='No spot requests found for specified options') + + module.exit_json(spot_request=spot_request) + + +def main(): + + argument_spec = dict( + filters=dict(default={}, type='dict'), + spot_instance_request_ids=dict(default=[], type='list', elements='str'), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + try: + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + describe_spot_instance_requests(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py b/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py new file mode 100644 index 000000000..6ccf687e3 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_tag +version_added: 1.0.0 +short_description: Create and remove tags on ec2 resources +description: + - Creates, modifies and removes tags for any EC2 resource. + - Resources are referenced by their resource id (for example, an instance being i-XXXXXXX, a VPC being vpc-XXXXXXX). + - This module is designed to be used with complex args (tags), see the examples. +options: + resource: + description: + - The EC2 resource id. + required: true + type: str + state: + description: + - Whether the tags should be present or absent on the resource. + - The use of I(state=list) to interrogate the tags of an instance was + deprecated in release 1.0.0 and is no longer available in release 4.0.0. + The 'list' functionality has been moved to a dedicated module + M(amazon.aws.ec2_tag_info). + default: present + choices: ['present', 'absent'] + type: str + tags: + description: + - A dictionary of tags to add or remove from the resource. + - If the value provided for a key is not set and I(state=absent), the tag will be removed regardless of its current value. + type: dict + required: true + purge_tags: + description: + - Whether unspecified tags should be removed from the resource. + - Note that when combined with I(state=absent), specified tags with non-matching values are not purged. + type: bool + default: false + +author: + - Lester Wade (@lwade) + - Paul Arthur (@flowerysong) +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 +''' + +EXAMPLES = ''' +- name: Ensure tags are present on a resource + amazon.aws.ec2_tag: + region: eu-west-1 + resource: vol-XXXXXX + state: present + tags: + Name: ubervol + env: prod + +- name: Ensure all volumes are tagged + amazon.aws.ec2_tag: + region: eu-west-1 + resource: '{{ item.id }}' + state: present + tags: + Name: dbserver + Env: production + loop: '{{ ec2_vol.volumes }}' + +- name: Remove the Env tag + amazon.aws.ec2_tag: + region: eu-west-1 + resource: i-xxxxxxxxxxxxxxxxx + tags: + Env: + state: absent + +- name: Remove the Env tag if it's currently 'development' + amazon.aws.ec2_tag: + region: eu-west-1 + resource: i-xxxxxxxxxxxxxxxxx + tags: + Env: development + state: absent + +- name: Remove all tags except for Name from an instance + amazon.aws.ec2_tag: + region: eu-west-1 + resource: i-xxxxxxxxxxxxxxxxx + tags: + Name: '' + state: absent + purge_tags: true +''' + +RETURN = ''' +tags: + description: A dict containing the tags on the resource + returned: always + type: dict +added_tags: + description: A dict of tags that were added to the resource + returned: If tags were added + type: dict +removed_tags: + description: A dict of tags that were removed from the resource + returned: If tags were removed + type: dict +''' + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import remove_ec2_tags + + +def main(): + argument_spec = dict( + resource=dict(required=True), + tags=dict(type='dict', required=True), + purge_tags=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'absent']), + ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + resource = module.params['resource'] + tags = module.params['tags'] + state = module.params['state'] + purge_tags = module.params['purge_tags'] + + result = {'changed': False} + + ec2 = module.client('ec2') + + current_tags = describe_ec2_tags(ec2, module, resource) + + if state == 'absent': + removed_tags = {} + for key in tags: + if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): + result['changed'] = True + removed_tags[key] = current_tags[key] + result['removed_tags'] = removed_tags + remove_ec2_tags(ec2, module, resource, removed_tags.keys()) + + if state == 'present': + tags_to_set, tags_to_unset = compare_aws_tags(current_tags, tags, purge_tags) + if tags_to_unset: + result['removed_tags'] = {} + for key in tags_to_unset: + result['removed_tags'][key] = current_tags[key] + result['added_tags'] = tags_to_set + result['changed'] = ensure_ec2_tags(ec2, module, resource, tags=tags, purge_tags=purge_tags) + + result['tags'] = describe_ec2_tags(ec2, module, resource) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py new file mode 100644 index 000000000..6be536562 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py @@ -0,0 +1,73 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_tag_info +version_added: 1.0.0 +short_description: List tags on ec2 resources +description: + - Lists tags for any EC2 resource. + - Resources are referenced by their resource id (e.g. an instance being i-XXXXXXX, a vpc being vpc-XXXXXX). + - Resource tags can be managed using the M(amazon.aws.ec2_tag) module. +options: + resource: + description: + - The EC2 resource id (for example i-XXXXXX or vpc-XXXXXX). + required: true + type: str + +author: + - Mark Chappell (@tremble) +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 +''' + +EXAMPLES = ''' +- name: Retrieve all tags on an instance + amazon.aws.ec2_tag_info: + region: eu-west-1 + resource: i-xxxxxxxxxxxxxxxxx + register: instance_tags + +- name: Retrieve all tags on a VPC + amazon.aws.ec2_tag_info: + region: eu-west-1 + resource: vpc-xxxxxxxxxxxxxxxxx + register: vpc_tags +''' + +RETURN = ''' +tags: + description: A dict containing the tags on the resource + returned: always + type: dict +''' + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags + + +def main(): + argument_spec = dict( + resource=dict(required=True), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + resource = module.params['resource'] + ec2 = module.client('ec2') + + current_tags = describe_ec2_tags(ec2, module, resource) + + module.exit_json(changed=False, tags=current_tags) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py new file mode 100644 index 000000000..8afbc6e53 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py @@ -0,0 +1,862 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vol +version_added: 1.0.0 +short_description: Create and attach a volume, return volume ID and device map +description: + - Creates an EBS volume and optionally attaches it to an instance. + - If both I(instance) and I(name) are given and the instance has a device at the device name, then no volume is created and no attachment is made. +options: + instance: + description: + - Instance ID if you wish to attach the volume. + - Set to C(None) to detach the volume. + type: str + name: + description: + - Volume Name tag if you wish to attach an existing volume (requires instance). + type: str + id: + description: + - Volume ID if you wish to attach an existing volume (requires instance) or remove an existing volume. + type: str + volume_size: + description: + - Size of volume (in GiB) to create. + type: int + volume_type: + description: + - Type of EBS volume; C(standard) (magnetic), C(gp2) (SSD), C(gp3) (SSD), C(io1) (Provisioned IOPS), C(io2) (Provisioned IOPS), + C(st1) (Throughput Optimized HDD), C(sc1) (Cold HDD). + - C(standard) is the old EBS default and continues to remain the Ansible default for backwards compatibility. + default: standard + choices: ['standard', 'gp2', 'io1', 'st1', 'sc1', 'gp3', 'io2'] + type: str + iops: + description: + - The provisioned IOPs you want to associate with this volume (integer). + type: int + encrypted: + description: + - Enable encryption at rest for this volume. + default: false + type: bool + kms_key_id: + description: + - Specify the ID of the KMS key to use. + type: str + device_name: + description: + - Device ID to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows. + type: str + delete_on_termination: + description: + - When set to C(true), the volume will be deleted upon instance termination. + type: bool + default: false + zone: + description: + - Zone in which to create the volume, if unset uses the zone the instance is in (if set). + aliases: ['availability_zone', 'aws_zone', 'ec2_zone'] + type: str + snapshot: + description: + - Snapshot ID on which to base the volume. + type: str + state: + description: + - Whether to ensure the volume is present or absent. + - I(state=list) was deprecated in release 1.1.0 and is no longer available + with release 4.0.0. + - The C(list) functionality has been moved to a dedicated module M(amazon.aws.ec2_vol_info). + default: present + choices: ['absent', 'present'] + type: str + modify_volume: + description: + - The volume won't be modified unless this key is C(true). + type: bool + default: false + version_added: 1.4.0 + throughput: + description: + - Volume throughput in MB/s. + - This parameter is only valid for gp3 volumes. + - Valid range is from 125 to 1000. + type: int + version_added: 1.4.0 + multi_attach: + description: + - If set to C(true), Multi-Attach will be enabled when creating the volume. + - When you create a new volume, Multi-Attach is disabled by default. + - This parameter is supported with io1 and io2 volumes only. + type: bool + version_added: 2.0.0 + outpost_arn: + description: + - The Amazon Resource Name (ARN) of the Outpost. + - If set, allows to create volume in an Outpost. + type: str + version_added: 3.1.0 +author: + - "Lester Wade (@lwade)" +notes: + - Support for I(purge_tags) was added in release 1.5.0. +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Simple attachment action +- amazon.aws.ec2_vol: + instance: XXXXXX + volume_size: 5 + device_name: sdd + region: us-west-2 + +# Example using custom iops params +- amazon.aws.ec2_vol: + instance: XXXXXX + volume_size: 5 + iops: 100 + device_name: sdd + region: us-west-2 + +# Example using snapshot id +- amazon.aws.ec2_vol: + instance: XXXXXX + snapshot: "{{ snapshot }}" + +# Playbook example combined with instance launch +- amazon.aws.ec2: + keypair: "{{ keypair }}" + image: "{{ image }}" + wait: true + count: 3 + register: ec2 +- amazon.aws.ec2_vol: + instance: "{{ item.id }}" + volume_size: 5 + loop: "{{ ec2.instances }}" + register: ec2_vol + +# Example: Launch an instance and then add a volume if not already attached +# * Volume will be created with the given name if not already created. +# * Nothing will happen if the volume is already attached. + +- amazon.aws.ec2: + keypair: "{{ keypair }}" + image: "{{ image }}" + zone: YYYYYY + id: my_instance + wait: true + count: 1 + register: ec2 + +- amazon.aws.ec2_vol: + instance: "{{ item.id }}" + name: my_existing_volume_Name_tag + device_name: /dev/xvdf + loop: "{{ ec2.instances }}" + register: ec2_vol + +# Remove a volume +- amazon.aws.ec2_vol: + id: vol-XXXXXXXX + state: absent + +# Detach a volume (since 1.9) +- amazon.aws.ec2_vol: + id: vol-XXXXXXXX + instance: None + region: us-west-2 + +# Create new volume using SSD storage +- amazon.aws.ec2_vol: + instance: XXXXXX + volume_size: 50 + volume_type: gp2 + device_name: /dev/xvdf + +# Create new volume with multi-attach enabled +- amazon.aws.ec2_vol: + zone: XXXXXX + multi_attach: true + volume_size: 4 + volume_type: io1 + iops: 102 + +# Attach an existing volume to instance. The volume will be deleted upon instance termination. +- amazon.aws.ec2_vol: + instance: XXXXXX + id: XXXXXX + device_name: /dev/sdf + delete_on_termination: true +''' + +RETURN = ''' +device: + description: device name of attached volume + returned: when success + type: str + sample: "/dev/sdf" +volume_id: + description: the id of volume + returned: when success + type: str + sample: "vol-35b333d9" +volume_type: + description: the volume type + returned: when success + type: str + sample: "standard" +volume: + description: a dictionary containing detailed attributes of the volume + returned: when success + type: str + sample: { + "attachment_set": [{ + "attach_time": "2015-10-23T00:22:29.000Z", + "deleteOnTermination": "false", + "device": "/dev/sdf", + "instance_id": "i-8356263c", + "status": "attached" + }], + "create_time": "2015-10-21T14:36:08.870Z", + "encrypted": false, + "id": "vol-35b333d9", + "iops": null, + "size": 1, + "snapshot_id": "", + "status": "in-use", + "tags": { + "env": "dev" + }, + "type": "standard", + "zone": "us-east-1b" + } +''' + +import time + +from ansible_collections.amazon.aws.plugins.module_utils.arn import is_outpost_arn +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications + + +try: + import botocore +except ImportError: + pass # Taken care of by AnsibleAWSModule + + +def get_instance(module, ec2_conn, instance_id=None): + instance = None + if not instance_id: + return instance + + try: + reservation_response = ec2_conn.describe_instances(aws_retry=True, InstanceIds=[instance_id]) + instance = camel_dict_to_snake_dict(reservation_response['Reservations'][0]['Instances'][0]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Error while getting instance_id with id {0}'.format(instance)) + + return instance + + +def get_volume(module, ec2_conn, vol_id=None, fail_on_not_found=True): + name = module.params.get('name') + param_id = module.params.get('id') + zone = module.params.get('zone') + + if not vol_id: + vol_id = param_id + + # If no name or id supplied, just try volume creation based on module parameters + if vol_id is None and name is None: + return None + + find_params = dict() + vols = [] + + if vol_id: + find_params['VolumeIds'] = [vol_id] + elif name: + find_params['Filters'] = ansible_dict_to_boto3_filter_list({'tag:Name': name}) + elif zone: + find_params['Filters'] = ansible_dict_to_boto3_filter_list({'availability-zone': zone}) + + try: + paginator = ec2_conn.get_paginator('describe_volumes') + vols_response = paginator.paginate(**find_params) + vols = list(vols_response)[0].get('Volumes') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if is_boto3_error_code('InvalidVolume.NotFound'): + module.exit_json(msg="Volume {0} does not exist".format(vol_id), changed=False) + module.fail_json_aws(e, msg='Error while getting EBS volumes with the parameters {0}'.format(find_params)) + + if not vols: + if fail_on_not_found and vol_id: + msg = "Could not find volume with id: {0}".format(vol_id) + if name: + msg += (" and name: {0}".format(name)) + module.fail_json(msg=msg) + else: + return None + + if len(vols) > 1: + module.fail_json( + msg="Found more than one volume in zone (if specified) with name: {0}".format(name), + found=[v['VolumeId'] for v in vols] + ) + vol = camel_dict_to_snake_dict(vols[0]) + return vol + + +def get_volumes(module, ec2_conn): + instance = module.params.get('instance') + + find_params = dict() + if instance: + find_params['Filters'] = ansible_dict_to_boto3_filter_list({'attachment.instance-id': instance}) + + vols = [] + try: + vols_response = ec2_conn.describe_volumes(aws_retry=True, **find_params) + vols = [camel_dict_to_snake_dict(vol) for vol in vols_response.get('Volumes', [])] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Error while getting EBS volumes') + return vols + + +def delete_volume(module, ec2_conn, volume_id=None): + changed = False + if volume_id: + try: + ec2_conn.delete_volume(aws_retry=True, VolumeId=volume_id) + changed = True + except is_boto3_error_code('InvalidVolume.NotFound'): + module.exit_json(changed=False) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Error while deleting volume') + return changed + + +def update_volume(module, ec2_conn, volume): + changed = False + req_obj = {'VolumeId': volume['volume_id']} + + if module.params.get('modify_volume'): + target_type = module.params.get('volume_type') + original_type = None + type_changed = False + if target_type: + original_type = volume['volume_type'] + if target_type != original_type: + type_changed = True + req_obj['VolumeType'] = target_type + + iops_changed = False + target_iops = module.params.get('iops') + original_iops = volume.get('iops') + if target_iops: + if target_iops != original_iops: + iops_changed = True + req_obj['Iops'] = target_iops + else: + req_obj['Iops'] = original_iops + else: + # If no IOPS value is specified and there was a volume_type update to gp3, + # the existing value is retained, unless a volume type is modified that supports different values, + # otherwise, the default iops value is applied. + if type_changed and target_type == 'gp3': + if ( + (original_iops and (int(original_iops) < 3000 or int(original_iops) > 16000)) or not original_iops + ): + req_obj['Iops'] = 3000 + iops_changed = True + + target_size = module.params.get('volume_size') + size_changed = False + if target_size: + original_size = volume['size'] + if target_size != original_size: + size_changed = True + req_obj['Size'] = target_size + + target_type = module.params.get('volume_type') + original_type = None + type_changed = False + if target_type: + original_type = volume['volume_type'] + if target_type != original_type: + type_changed = True + req_obj['VolumeType'] = target_type + + target_throughput = module.params.get('throughput') + throughput_changed = False + if target_throughput: + original_throughput = volume.get('throughput') + if target_throughput != original_throughput: + throughput_changed = True + req_obj['Throughput'] = target_throughput + + target_multi_attach = module.params.get('multi_attach') + multi_attach_changed = False + if target_multi_attach is not None: + original_multi_attach = volume['multi_attach_enabled'] + if target_multi_attach != original_multi_attach: + multi_attach_changed = True + req_obj['MultiAttachEnabled'] = target_multi_attach + + changed = iops_changed or size_changed or type_changed or throughput_changed or multi_attach_changed + + if changed: + if module.check_mode: + module.exit_json(changed=True, msg='Would have updated volume if not in check mode.') + response = ec2_conn.modify_volume(**req_obj) + + volume['size'] = response.get('VolumeModification').get('TargetSize') + volume['volume_type'] = response.get('VolumeModification').get('TargetVolumeType') + volume['iops'] = response.get('VolumeModification').get('TargetIops') + volume['multi_attach_enabled'] = response.get('VolumeModification').get('TargetMultiAttachEnabled') + volume['throughput'] = response.get('VolumeModification').get('TargetThroughput') + + return volume, changed + + +def create_volume(module, ec2_conn, zone): + changed = False + iops = module.params.get('iops') + encrypted = module.params.get('encrypted') + kms_key_id = module.params.get('kms_key_id') + volume_size = module.params.get('volume_size') + volume_type = module.params.get('volume_type') + snapshot = module.params.get('snapshot') + throughput = module.params.get('throughput') + multi_attach = module.params.get('multi_attach') + outpost_arn = module.params.get('outpost_arn') + tags = module.params.get('tags') or {} + name = module.params.get('name') + + volume = get_volume(module, ec2_conn) + + if module.check_mode: + module.exit_json(changed=True, msg='Would have created a volume if not in check mode.') + + if volume is None: + + try: + changed = True + additional_params = dict() + + if volume_size: + additional_params['Size'] = int(volume_size) + + if kms_key_id: + additional_params['KmsKeyId'] = kms_key_id + + if snapshot: + additional_params['SnapshotId'] = snapshot + + if iops: + additional_params['Iops'] = int(iops) + + # Use the default value if any iops has been specified when volume_type=gp3 + if volume_type == 'gp3' and not iops: + additional_params['Iops'] = 3000 + + if throughput: + additional_params['Throughput'] = int(throughput) + + if multi_attach: + additional_params['MultiAttachEnabled'] = True + + if outpost_arn: + if is_outpost_arn(outpost_arn): + additional_params['OutpostArn'] = outpost_arn + else: + module.fail_json('OutpostArn does not match the pattern specified in API specifications.') + + if name: + tags['Name'] = name + + if tags: + additional_params['TagSpecifications'] = boto3_tag_specifications(tags, types=['volume']) + + create_vol_response = ec2_conn.create_volume( + aws_retry=True, + AvailabilityZone=zone, + Encrypted=encrypted, + VolumeType=volume_type, + **additional_params + ) + + waiter = ec2_conn.get_waiter('volume_available') + waiter.wait( + VolumeIds=[create_vol_response['VolumeId']], + ) + volume = get_volume(module, ec2_conn, vol_id=create_vol_response['VolumeId']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Error while creating EBS volume') + + return volume, changed + + +def attach_volume(module, ec2_conn, volume_dict, instance_dict, device_name): + changed = False + + # If device_name isn't set, make a choice based on best practices here: + # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html + + # In future this needs to be more dynamic but combining block device mapping best practices + # (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;) + + attachment_data = get_attachment_data(volume_dict, wanted_state='attached') + if attachment_data: + if module.check_mode: + if attachment_data[0].get('status') in ['attached', 'attaching']: + module.exit_json(changed=False, msg='IN CHECK MODE - volume already attached to instance: {0}.'.format( + attachment_data[0].get('instance_id', None))) + if not volume_dict['multi_attach_enabled']: + # volumes without MultiAttach Enabled can be attached to 1 instance only + if attachment_data[0].get('instance_id', None) != instance_dict['instance_id']: + module.fail_json(msg="Volume {0} is already attached to another instance: {1}." + .format(volume_dict['volume_id'], attachment_data[0].get('instance_id', None))) + else: + return volume_dict, changed + + try: + if module.check_mode: + module.exit_json(changed=True, msg='Would have attached volume if not in check mode.') + attach_response = ec2_conn.attach_volume(aws_retry=True, Device=device_name, + InstanceId=instance_dict['instance_id'], + VolumeId=volume_dict['volume_id']) + + waiter = ec2_conn.get_waiter('volume_in_use') + waiter.wait(VolumeIds=[attach_response['VolumeId']]) + changed = True + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Error while attaching EBS volume') + + modify_dot_attribute(module, ec2_conn, instance_dict, device_name) + + volume = get_volume(module, ec2_conn, vol_id=volume_dict['volume_id']) + + return volume, changed + + +def modify_dot_attribute(module, ec2_conn, instance_dict, device_name): + """ Modify delete_on_termination attribute """ + + delete_on_termination = module.params.get('delete_on_termination') + changed = False + + # volume_in_use can return *shortly* before it appears on the instance + # description + mapped_block_device = None + _attempt = 0 + while mapped_block_device is None: + _attempt += 1 + instance_dict = get_instance(module, ec2_conn=ec2_conn, instance_id=instance_dict['instance_id']) + mapped_block_device = get_mapped_block_device(instance_dict=instance_dict, device_name=device_name) + if mapped_block_device is None: + if _attempt > 2: + module.fail_json(msg='Unable to find device on instance', + device=device_name, instance=instance_dict) + time.sleep(1) + + if delete_on_termination != mapped_block_device['ebs'].get('delete_on_termination'): + try: + ec2_conn.modify_instance_attribute( + aws_retry=True, + InstanceId=instance_dict['instance_id'], + BlockDeviceMappings=[{ + "DeviceName": device_name, + "Ebs": { + "DeleteOnTermination": delete_on_termination + } + }] + ) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, + msg='Error while modifying Block Device Mapping of instance {0}'.format(instance_dict['instance_id'])) + + return changed + + +def get_attachment_data(volume_dict, wanted_state=None): + attachment_data = [] + if not volume_dict: + return attachment_data + resource = volume_dict.get('attachments', []) + if wanted_state: + # filter 'state', return attachment matching wanted state + resource = [data for data in resource if data['state'] == wanted_state] + + for data in resource: + attachment_data.append({ + 'attach_time': data.get('attach_time', None), + 'device': data.get('device', None), + 'instance_id': data.get('instance_id', None), + 'status': data.get('state', None), + 'delete_on_termination': data.get('delete_on_termination', None) + }) + + return attachment_data + + +def detach_volume(module, ec2_conn, volume_dict): + changed = False + + attachment_data = get_attachment_data(volume_dict, wanted_state='attached') + # The ID of the instance must be specified if you are detaching a Multi-Attach enabled volume. + for attachment in attachment_data: + if module.check_mode: + module.exit_json(changed=True, msg='Would have detached volume if not in check mode.') + ec2_conn.detach_volume(aws_retry=True, InstanceId=attachment['instance_id'], VolumeId=volume_dict['volume_id']) + waiter = ec2_conn.get_waiter('volume_available') + waiter.wait( + VolumeIds=[volume_dict['volume_id']], + ) + changed = True + + volume_dict = get_volume(module, ec2_conn, vol_id=volume_dict['volume_id']) + return volume_dict, changed + + +def get_volume_info(module, volume, tags=None): + if not tags: + tags = boto3_tag_list_to_ansible_dict(volume.get('tags')) + attachment_data = get_attachment_data(volume) + volume_info = { + 'create_time': volume.get('create_time'), + 'encrypted': volume.get('encrypted'), + 'id': volume.get('volume_id'), + 'iops': volume.get('iops'), + 'size': volume.get('size'), + 'snapshot_id': volume.get('snapshot_id'), + 'status': volume.get('state'), + 'type': volume.get('volume_type'), + 'zone': volume.get('availability_zone'), + 'attachment_set': attachment_data, + 'multi_attach_enabled': volume.get('multi_attach_enabled'), + 'tags': tags + } + + volume_info['throughput'] = volume.get('throughput') + + return volume_info + + +def get_mapped_block_device(instance_dict=None, device_name=None): + mapped_block_device = None + if not instance_dict: + return mapped_block_device + if not device_name: + return mapped_block_device + + for device in instance_dict.get('block_device_mappings', []): + if device['device_name'] == device_name: + mapped_block_device = device + break + + return mapped_block_device + + +def ensure_tags(module, connection, res_id, res_type, tags, purge_tags): + if module.check_mode: + return {}, True + changed = ensure_ec2_tags(connection, module, res_id, res_type, tags, purge_tags, ['InvalidVolume.NotFound']) + final_tags = describe_ec2_tags(connection, module, res_id, res_type) + + return final_tags, changed + + +def main(): + argument_spec = dict( + instance=dict(), + id=dict(), + name=dict(), + volume_size=dict(type='int'), + volume_type=dict(default='standard', choices=['standard', 'gp2', 'io1', 'st1', 'sc1', 'gp3', 'io2']), + iops=dict(type='int'), + encrypted=dict(default=False, type='bool'), + kms_key_id=dict(), + device_name=dict(), + delete_on_termination=dict(default=False, type='bool'), + zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']), + snapshot=dict(), + state=dict(default='present', choices=['absent', 'present']), + tags=dict(type='dict', aliases=['resource_tags']), + modify_volume=dict(default=False, type='bool'), + throughput=dict(type='int'), + outpost_arn=dict(type='str'), + purge_tags=dict(type='bool', default=True), + multi_attach=dict(type='bool'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[ + ['volume_type', 'io1', ['iops']], + ['volume_type', 'io2', ['iops']], + ], + supports_check_mode=True, + ) + + param_id = module.params.get('id') + name = module.params.get('name') + instance = module.params.get('instance') + volume_size = module.params.get('volume_size') + device_name = module.params.get('device_name') + zone = module.params.get('zone') + snapshot = module.params.get('snapshot') + state = module.params.get('state') + tags = module.params.get('tags') + iops = module.params.get('iops') + volume_type = module.params.get('volume_type') + throughput = module.params.get('throughput') + multi_attach = module.params.get('multi_attach') + + # Ensure we have the zone or can get the zone + if instance is None and zone is None and state == 'present': + module.fail_json(msg="You must specify either instance or zone") + + # Set volume detach flag + if instance == 'None' or instance == '': + instance = None + detach_vol_flag = True + else: + detach_vol_flag = False + + if iops: + if volume_type in ('gp2', 'st1', 'sc1', 'standard'): + module.fail_json(msg='IOPS is not supported for gp2, st1, sc1, or standard volumes.') + + if volume_type == 'gp3' and (int(iops) < 3000 or int(iops) > 16000): + module.fail_json(msg='For a gp3 volume type, IOPS values must be between 3000 and 16000.') + + if volume_type in ('io1', 'io2') and (int(iops) < 100 or int(iops) > 64000): + module.fail_json(msg='For io1 and io2 volume types, IOPS values must be between 100 and 64000.') + + if throughput: + if volume_type != 'gp3': + module.fail_json(msg='Throughput is only supported for gp3 volume.') + if throughput < 125 or throughput > 1000: + module.fail_json(msg='Throughput values must be between 125 and 1000.') + + if multi_attach is True and volume_type not in ('io1', 'io2'): + module.fail_json(msg='multi_attach is only supported for io1 and io2 volumes.') + + # Set changed flag + changed = False + + ec2_conn = module.client('ec2', AWSRetry.jittered_backoff()) + + # Here we need to get the zone info for the instance. This covers situation where + # instance is specified but zone isn't. + # Useful for playbooks chaining instance launch with volume create + attach and where the + # zone doesn't matter to the user. + inst = None + + # Delaying the checks until after the instance check allows us to get volume ids for existing volumes + # without needing to pass an unused volume_size + if not volume_size and not (param_id or name or snapshot): + module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot") + + # Try getting volume + volume = get_volume(module, ec2_conn, fail_on_not_found=False) + if state == 'present': + if instance: + inst = get_instance(module, ec2_conn, instance_id=instance) + zone = inst['placement']['availability_zone'] + + # Use platform attribute to guess whether the instance is Windows or Linux + if device_name is None: + if inst.get('platform', '') == 'Windows': + device_name = '/dev/xvdf' + else: + device_name = '/dev/sdf' + + # Check if there is a volume already mounted there. + mapped_device = get_mapped_block_device(instance_dict=inst, device_name=device_name) + if mapped_device: + other_volume_mapped = False + if volume: + if volume['volume_id'] != mapped_device['ebs']['volume_id']: + other_volume_mapped = True + else: + # No volume found so this is another volume + other_volume_mapped = True + + if other_volume_mapped: + module.exit_json( + msg="Volume mapping for {0} already exists on instance {1}".format(device_name, instance), + volume_id=mapped_device['ebs']['volume_id'], + found_volume=volume, + device=device_name, + changed=False + ) + + final_tags = None + tags_changed = False + if volume: + volume, changed = update_volume(module, ec2_conn, volume) + if name: + if not tags: + tags = boto3_tag_list_to_ansible_dict(volume.get('tags')) + tags['Name'] = name + final_tags, tags_changed = ensure_tags(module, ec2_conn, volume['volume_id'], 'volume', tags, module.params.get('purge_tags')) + else: + volume, changed = create_volume(module, ec2_conn, zone=zone) + + if detach_vol_flag: + volume, attach_changed = detach_volume(module, ec2_conn, volume_dict=volume) + elif inst is not None: + volume, attach_changed = attach_volume(module, ec2_conn, volume_dict=volume, instance_dict=inst, device_name=device_name) + else: + attach_changed = False + + # Add device, volume_id and volume_type parameters separately to maintain backward compatibility + volume_info = get_volume_info(module, volume, tags=final_tags) + + if tags_changed or attach_changed: + changed = True + + module.exit_json(changed=changed, volume=volume_info, device=device_name, + volume_id=volume_info['id'], volume_type=volume_info['type']) + elif state == 'absent': + if not name and not param_id: + module.fail_json('A volume name or id is required for deletion') + if volume: + if module.check_mode: + module.exit_json(changed=True, msg='Would have deleted volume if not in check mode.') + detach_volume(module, ec2_conn, volume_dict=volume) + changed = delete_volume(module, ec2_conn, volume_id=volume['volume_id']) + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py new file mode 100644 index 000000000..7cd376740 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py @@ -0,0 +1,213 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vol_info +version_added: 1.0.0 +short_description: Gather information about EC2 volumes in AWS +description: + - Gather information about EC2 volumes in AWS. +author: "Rob White (@wimnat)" +options: + filters: + type: dict + default: {} + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters. +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all volumes +- amazon.aws.ec2_vol_info: + +# Gather information about a particular volume using volume ID +- amazon.aws.ec2_vol_info: + filters: + volume-id: vol-00112233 + +# Gather information about any volume with a tag key Name and value Example +- amazon.aws.ec2_vol_info: + filters: + "tag:Name": Example + +# Gather information about any volume that is attached +- amazon.aws.ec2_vol_info: + filters: + attachment.status: attached + +# Gather information about all volumes related to an EC2 Instance +# register information to `volumes` variable +# Replaces functionality of `amazon.aws.ec2_vol` - `state: list` +- name: get volume(s) info from EC2 Instance + amazon.aws.ec2_vol_info: + filters: + attachment.instance-id: "i-000111222333" + register: volumes + +''' + +RETURN = ''' +volumes: + description: Volumes that match the provided filters. Each element consists of a dict with all the information related to that volume. + type: list + elements: dict + returned: always + contains: + attachment_set: + description: + - Information about the volume attachments. + - This was changed in version 2.0.0 from a dictionary to a list of dictionaries. + type: list + elements: dict + sample: [{ + "attach_time": "2015-10-23T00:22:29.000Z", + "deleteOnTermination": "false", + "device": "/dev/sdf", + "instance_id": "i-8356263c", + "status": "attached" + }] + create_time: + description: The time stamp when volume creation was initiated. + type: str + sample: "2015-10-21T14:36:08.870Z" + encrypted: + description: Indicates whether the volume is encrypted. + type: bool + sample: False + id: + description: The ID of the volume. + type: str + sample: "vol-35b333d9" + iops: + description: The number of I/O operations per second (IOPS) that the volume supports. + type: int + sample: null + size: + description: The size of the volume, in GiBs. + type: int + sample: 1 + snapshot_id: + description: The snapshot from which the volume was created, if applicable. + type: str + sample: "" + status: + description: The volume state. + type: str + sample: "in-use" + tags: + description: Any tags assigned to the volume. + type: dict + sample: { + env: "dev" + } + type: + description: The volume type. This can be gp2, io1, st1, sc1, or standard. + type: str + sample: "standard" + zone: + description: The Availability Zone of the volume. + type: str + sample: "us-east-1b" + throughput: + description: The throughput that the volume supports, in MiB/s. + type: int + sample: 131 +''' + +try: + from botocore.exceptions import ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def get_volume_info(volume, region): + + attachment_data = [] + for data in volume["attachments"]: + attachment_data.append({ + 'attach_time': data.get('attach_time', None), + 'device': data.get('device', None), + 'instance_id': data.get('instance_id', None), + 'status': data.get('state', None), + 'delete_on_termination': data.get('delete_on_termination', None) + }) + + volume_info = { + 'create_time': volume["create_time"], + 'id': volume["volume_id"], + 'encrypted': volume["encrypted"], + 'iops': volume["iops"] if "iops" in volume else None, + 'size': volume["size"], + 'snapshot_id': volume["snapshot_id"], + 'status': volume["state"], + 'type': volume["volume_type"], + 'zone': volume["availability_zone"], + 'region': region, + 'attachment_set': attachment_data, + 'tags': boto3_tag_list_to_ansible_dict(volume['tags']) if "tags" in volume else None + } + + if 'throughput' in volume: + volume_info['throughput'] = volume["throughput"] + + return volume_info + + +@AWSRetry.jittered_backoff() +def describe_volumes_with_backoff(connection, filters): + paginator = connection.get_paginator('describe_volumes') + return paginator.paginate(Filters=filters).build_full_result() + + +def list_ec2_volumes(connection, module): + + # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags + sanitized_filters = module.params.get("filters") + for key in list(sanitized_filters): + if not key.startswith("tag:"): + sanitized_filters[key.replace("_", "-")] = sanitized_filters.pop(key) + volume_dict_array = [] + + try: + all_volumes = describe_volumes_with_backoff(connection, ansible_dict_to_boto3_filter_list(sanitized_filters)) + except ClientError as e: + module.fail_json_aws(e, msg="Failed to describe volumes.") + + for volume in all_volumes["Volumes"]: + volume = camel_dict_to_snake_dict(volume, ignore_list=['Tags']) + volume_dict_array.append(get_volume_info(volume, module.region)) + module.exit_json(volumes=volume_dict_array) + + +def main(): + argument_spec = dict(filters=dict(default={}, type='dict')) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + connection = module.client('ec2') + + list_ec2_volumes(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py new file mode 100644 index 000000000..edfdf7be3 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py @@ -0,0 +1,537 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_dhcp_option +version_added: 1.0.0 +short_description: Manages DHCP Options, and can ensure the DHCP options for the given VPC match what's + requested +description: + - This module removes, or creates DHCP option sets, and can associate them to a VPC. + - Optionally, a new DHCP Options set can be created that converges a VPC's existing + DHCP option set with values provided. + - When dhcp_options_id is provided, the module will + 1. remove (with state='absent') + 2. ensure tags are applied (if state='present' and tags are provided + 3. attach it to a VPC (if state='present' and a vpc_id is provided. + - If any of the optional values are missing, they will either be treated + as a no-op (i.e., inherit what already exists for the VPC) + - To remove existing options while inheriting, supply an empty value + (e.g. set ntp_servers to [] if you want to remove them from the VPC's options) +author: + - "Joel Thompson (@joelthompson)" +options: + domain_name: + description: + - The domain name to set in the DHCP option sets. + type: str + dns_servers: + description: + - A list of IP addresses to set the DNS servers for the VPC to. + type: list + elements: str + ntp_servers: + description: + - List of hosts to advertise as NTP servers for the VPC. + type: list + elements: str + netbios_name_servers: + description: + - List of hosts to advertise as NetBIOS servers. + type: list + elements: str + netbios_node_type: + description: + - NetBIOS node type to advertise in the DHCP options. + The AWS recommendation is to use 2 (when using netbios name services) + U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) + type: int + vpc_id: + description: + - VPC ID to associate with the requested DHCP option set. + - If no VPC ID is provided, and no matching option set is found then a new + DHCP option set is created. + type: str + delete_old: + description: + - Whether to delete the old VPC DHCP option set when associating a new one. + - This is primarily useful for debugging/development purposes when you + want to quickly roll back to the old option set. Note that this setting + will be ignored, and the old DHCP option set will be preserved, if it + is in use by any other VPC. (Otherwise, AWS will return an error.) + type: bool + default: true + inherit_existing: + description: + - For any DHCP options not specified in these parameters, whether to + inherit them from the options set already applied to I(vpc_id), or to + reset them to be empty. + type: bool + default: false + dhcp_options_id: + description: + - The resource_id of an existing DHCP options set. + If this is specified, then it will override other settings, except tags + (which will be updated to match) + type: str + state: + description: + - create/assign or remove the DHCP options. + If state is set to absent, then a DHCP options set matched either + by id, or tags and options will be removed if possible. + default: present + choices: [ 'absent', 'present' ] + type: str +notes: + - Support for I(purge_tags) was added in release 2.0.0. +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +''' + +RETURN = """ +changed: + description: Whether the dhcp options were changed + type: bool + returned: always +dhcp_options: + description: The DHCP options created, associated or found + returned: when available + type: dict + contains: + dhcp_configurations: + description: The DHCP configuration for the option set + type: list + sample: + - '{"key": "ntp-servers", "values": [{"value": "10.0.0.2" , "value": "10.0.1.2"}]}' + - '{"key": "netbios-name-servers", "values": [{value": "10.0.0.1"}, {"value": "10.0.1.1" }]}' + dhcp_options_id: + description: The aws resource id of the primary DCHP options set created or found + type: str + sample: "dopt-0955331de6a20dd07" + owner_id: + description: The ID of the AWS account that owns the DHCP options set. + type: str + sample: 012345678912 + tags: + description: The tags to be applied to a DHCP options set + type: list + sample: + - '{"Key": "CreatedBy", "Value": "ansible-test"}' + - '{"Key": "Collection", "Value": "amazon.aws"}' +dhcp_options_id: + description: The aws resource id of the primary DCHP options set created, found or removed + type: str + returned: when available +dhcp_config: + description: The boto2-style DHCP options created, associated or found + returned: when available + type: dict + contains: + domain-name-servers: + description: The IP addresses of up to four domain name servers, or AmazonProvidedDNS. + returned: when available + type: list + sample: + - 10.0.0.1 + - 10.0.1.1 + domain-name: + description: The domain name for hosts in the DHCP option sets + returned: when available + type: list + sample: + - "my.example.com" + ntp-servers: + description: The IP addresses of up to four Network Time Protocol (NTP) servers. + returned: when available + type: list + sample: + - 10.0.0.1 + - 10.0.1.1 + netbios-name-servers: + description: The IP addresses of up to four NetBIOS name servers. + returned: when available + type: list + sample: + - 10.0.0.1 + - 10.0.1.1 + netbios-node-type: + description: The NetBIOS node type (1, 2, 4, or 8). + returned: when available + type: str + sample: 2 +""" + +EXAMPLES = """ +# Completely overrides the VPC DHCP options associated with VPC vpc-123456 and deletes any existing +# DHCP option set that may have been attached to that VPC. +- amazon.aws.ec2_vpc_dhcp_option: + domain_name: "foo.example.com" + region: us-east-1 + dns_servers: + - 10.0.0.1 + - 10.0.1.1 + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + netbios_node_type: 2 + vpc_id: vpc-123456 + delete_old: True + inherit_existing: False + + +# Ensure the DHCP option set for the VPC has 10.0.0.4 and 10.0.1.4 as the specified DNS servers, but +# keep any other existing settings. Also, keep the old DHCP option set around. +- amazon.aws.ec2_vpc_dhcp_option: + region: us-east-1 + dns_servers: + - "{{groups['dns-primary']}}" + - "{{groups['dns-secondary']}}" + vpc_id: vpc-123456 + inherit_existing: True + delete_old: False + + +## Create a DHCP option set with 4.4.4.4 and 8.8.8.8 as the specified DNS servers, with tags +## but do not assign to a VPC +- amazon.aws.ec2_vpc_dhcp_option: + region: us-east-1 + dns_servers: + - 4.4.4.4 + - 8.8.8.8 + tags: + Name: google servers + Environment: Test + +## Delete a DHCP options set that matches the tags and options specified +- amazon.aws.ec2_vpc_dhcp_option: + region: us-east-1 + dns_servers: + - 4.4.4.4 + - 8.8.8.8 + tags: + Name: google servers + Environment: Test + state: absent + +## Associate a DHCP options set with a VPC by ID +- amazon.aws.ec2_vpc_dhcp_option: + region: us-east-1 + dhcp_options_id: dopt-12345678 + vpc_id: vpc-123456 + +""" + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import normalize_ec2_vpc_dhcp_config +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + + +def fetch_dhcp_options_for_vpc(client, module, vpc_id): + try: + vpcs = client.describe_vpcs(aws_retry=True, VpcIds=[vpc_id])['Vpcs'] + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Unable to describe vpc {0}".format(vpc_id)) + + if len(vpcs) != 1: + return None + try: + dhcp_options = client.describe_dhcp_options(aws_retry=True, DhcpOptionsIds=[vpcs[0]['DhcpOptionsId']]) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Unable to describe dhcp option {0}".format(vpcs[0]['DhcpOptionsId'])) + + if len(dhcp_options['DhcpOptions']) != 1: + return None + return dhcp_options['DhcpOptions'][0]['DhcpConfigurations'], dhcp_options['DhcpOptions'][0]['DhcpOptionsId'] + + +def remove_dhcp_options_by_id(client, module, dhcp_options_id): + changed = False + # First, check if this dhcp option is associated to any other vpcs + try: + associations = client.describe_vpcs(aws_retry=True, Filters=[{'Name': 'dhcp-options-id', 'Values': [dhcp_options_id]}]) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Unable to describe VPC associations for dhcp option id {0}".format(dhcp_options_id)) + if len(associations['Vpcs']) > 0: + return changed + + changed = True + if not module.check_mode: + try: + client.delete_dhcp_options(aws_retry=True, DhcpOptionsId=dhcp_options_id) + except is_boto3_error_code('InvalidDhcpOptionsID.NotFound'): + return False + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to delete dhcp option {0}".format(dhcp_options_id)) + + return changed + + +def match_dhcp_options(client, module, new_config): + """ + Returns a DhcpOptionsId if the module parameters match; else None + Filter by tags, if any are specified + """ + try: + all_dhcp_options = client.describe_dhcp_options(aws_retry=True) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Unable to describe dhcp options") + + for dopts in all_dhcp_options['DhcpOptions']: + if module.params['tags']: + # If we were given tags, try to match on them + boto_tags = ansible_dict_to_boto3_tag_list(module.params['tags']) + if dopts['DhcpConfigurations'] == new_config and dopts['Tags'] == boto_tags: + return True, dopts['DhcpOptionsId'] + elif dopts['DhcpConfigurations'] == new_config: + return True, dopts['DhcpOptionsId'] + + return False, None + + +def create_dhcp_config(module): + """ + Convert provided parameters into a DhcpConfigurations list that conforms to what the API returns: + https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeDhcpOptions.html + [{'Key': 'domain-name', + 'Values': [{'Value': 'us-west-2.compute.internal'}]}, + {'Key': 'domain-name-servers', + 'Values': [{'Value': 'AmazonProvidedDNS'}]}, + ...], + """ + new_config = [] + params = module.params + if params['domain_name'] is not None: + new_config.append({'Key': 'domain-name', 'Values': [{'Value': params['domain_name']}]}) + if params['dns_servers'] is not None: + dns_server_list = [] + for server in params['dns_servers']: + dns_server_list.append({'Value': server}) + new_config.append({'Key': 'domain-name-servers', 'Values': dns_server_list}) + if params['ntp_servers'] is not None: + ntp_server_list = [] + for server in params['ntp_servers']: + ntp_server_list.append({'Value': server}) + new_config.append({'Key': 'ntp-servers', 'Values': ntp_server_list}) + if params['netbios_name_servers'] is not None: + netbios_server_list = [] + for server in params['netbios_name_servers']: + netbios_server_list.append({'Value': server}) + new_config.append({'Key': 'netbios-name-servers', 'Values': netbios_server_list}) + if params['netbios_node_type'] is not None: + new_config.append({'Key': 'netbios-node-type', 'Values': params['netbios_node_type']}) + + return new_config + + +def create_dhcp_option_set(client, module, new_config): + """ + A CreateDhcpOptions object looks different than the object we create in create_dhcp_config() + This is the only place we use it, so create it now + https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateDhcpOptions.html + We have to do this after inheriting any existing_config, so we need to start with the object + that we made in create_dhcp_config(). + normalize_config() gives us the nicest format to work with for this. + """ + changed = True + desired_config = normalize_ec2_vpc_dhcp_config(new_config) + create_config = [] + tags_list = [] + + for option in ['domain-name', 'domain-name-servers', 'ntp-servers', 'netbios-name-servers']: + if desired_config.get(option): + create_config.append({'Key': option, 'Values': desired_config[option]}) + if desired_config.get('netbios-node-type'): + # We need to listify this one + create_config.append({'Key': 'netbios-node-type', 'Values': [desired_config['netbios-node-type']]}) + + if module.params.get('tags'): + tags_list = boto3_tag_specifications(module.params['tags'], ['dhcp-options']) + + try: + if not module.check_mode: + dhcp_options = client.create_dhcp_options(aws_retry=True, DhcpConfigurations=create_config, TagSpecifications=tags_list) + return changed, dhcp_options['DhcpOptions']['DhcpOptionsId'] + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Unable to create dhcp option set") + + return changed, None + + +def find_opt_index(config, option): + return (next((i for i, item in enumerate(config) if item["Key"] == option), None)) + + +def inherit_dhcp_config(existing_config, new_config): + """ + Compare two DhcpConfigurations lists and apply existing options to unset parameters + + If there's an existing option config and the new option is not set or it's none, + inherit the existing config. + The configs are unordered lists of dicts with non-unique keys, so we have to find + the right list index for a given config option first. + """ + changed = False + for option in ['domain-name', 'domain-name-servers', 'ntp-servers', + 'netbios-name-servers', 'netbios-node-type']: + existing_index = find_opt_index(existing_config, option) + new_index = find_opt_index(new_config, option) + # `if existing_index` evaluates to False on index 0, so be very specific and verbose + if existing_index is not None and new_index is None: + new_config.append(existing_config[existing_index]) + changed = True + + return changed, new_config + + +def get_dhcp_options_info(client, module, dhcp_options_id): + # Return boto3-style details, consistent with the _info module + + if module.check_mode and dhcp_options_id is None: + # We can't describe without an option id, we might get here when creating a new option set in check_mode + return None + + try: + dhcp_option_info = client.describe_dhcp_options(aws_retry=True, DhcpOptionsIds=[dhcp_options_id]) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Unable to describe dhcp options") + + dhcp_options_set = dhcp_option_info['DhcpOptions'][0] + dhcp_option_info = {'DhcpOptionsId': dhcp_options_set['DhcpOptionsId'], + 'DhcpConfigurations': dhcp_options_set['DhcpConfigurations'], + 'Tags': boto3_tag_list_to_ansible_dict(dhcp_options_set.get('Tags', [{'Value': '', 'Key': 'Name'}]))} + return camel_dict_to_snake_dict(dhcp_option_info, ignore_list=['Tags']) + + +def associate_options(client, module, vpc_id, dhcp_options_id): + try: + if not module.check_mode: + client.associate_dhcp_options(aws_retry=True, DhcpOptionsId=dhcp_options_id, VpcId=vpc_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Unable to associate dhcp option {0} to VPC {1}".format(dhcp_options_id, vpc_id)) + + +def main(): + argument_spec = dict( + dhcp_options_id=dict(type='str', default=None), + domain_name=dict(type='str', default=None), + dns_servers=dict(type='list', elements='str', default=None), + ntp_servers=dict(type='list', elements='str', default=None), + netbios_name_servers=dict(type='list', elements='str', default=None), + netbios_node_type=dict(type='int', default=None), + vpc_id=dict(type='str', default=None), + delete_old=dict(type='bool', default=True), + inherit_existing=dict(type='bool', default=False), + tags=dict(type='dict', default=None, aliases=['resource_tags']), + purge_tags=dict(default=True, type='bool'), + state=dict(type='str', default='present', choices=['present', 'absent']) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + check_boto3=False, + supports_check_mode=True + ) + + vpc_id = module.params['vpc_id'] + delete_old = module.params['delete_old'] + inherit_existing = module.params['inherit_existing'] + tags = module.params['tags'] + purge_tags = module.params['purge_tags'] + state = module.params['state'] + dhcp_options_id = module.params['dhcp_options_id'] + + found = False + changed = False + new_config = create_dhcp_config(module) + existing_config = None + existing_id = None + + client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + module.deprecate("The 'new_config' return key is deprecated and will be replaced by 'dhcp_config'. Both values are returned for now.", + date='2022-12-01', collection_name='amazon.aws') + if state == 'absent': + if not dhcp_options_id: + # Look up the option id first by matching the supplied options + dhcp_options_id = match_dhcp_options(client, module, new_config) + changed = remove_dhcp_options_by_id(client, module, dhcp_options_id) + module.exit_json(changed=changed, new_options={}, dhcp_options={}) + + if not dhcp_options_id: + # If we were given a vpc_id then we need to look at the configuration on that + if vpc_id: + existing_config, existing_id = fetch_dhcp_options_for_vpc(client, module, vpc_id) + # if we've been asked to inherit existing options, do that now + if inherit_existing and existing_config: + changed, new_config = inherit_dhcp_config(existing_config, new_config) + # Do the vpc's dhcp options already match what we're asked for? if so we are done + if existing_config: + if new_config == existing_config: + dhcp_options_id = existing_id + if tags or purge_tags: + changed |= ensure_ec2_tags(client, module, dhcp_options_id, resource_type='dhcp-options', + tags=tags, purge_tags=purge_tags) + return_config = normalize_ec2_vpc_dhcp_config(new_config) + results = get_dhcp_options_info(client, module, dhcp_options_id) + module.exit_json(changed=changed, new_options=return_config, dhcp_options_id=dhcp_options_id, dhcp_options=results) + # If no vpc_id was given, or the options don't match then look for an existing set using tags + found, dhcp_options_id = match_dhcp_options(client, module, new_config) + + else: + # Now let's cover the case where there are existing options that we were told about by id + # If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given) + try: + # Preserve the boto2 module's behaviour of checking if the option set exists first, + # and return the same error message if it does not + client.describe_dhcp_options(aws_retry=True, DhcpOptionsIds=[dhcp_options_id]) + # If that didn't fail, then we know the option ID exists + found = True + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="a dhcp_options_id was supplied, but does not exist") + + if not found: + # If we still don't have an options ID, create it + changed, dhcp_options_id = create_dhcp_option_set(client, module, new_config) + else: + if tags or purge_tags: + changed |= ensure_ec2_tags(client, module, dhcp_options_id, resource_type='dhcp-options', + tags=tags, purge_tags=purge_tags) + + # If we were given a vpc_id, then attach the options we now have to that before we finish + if vpc_id: + associate_options(client, module, vpc_id, dhcp_options_id) + changed = (changed or True) + + if delete_old and existing_id: + remove_dhcp_options_by_id(client, module, existing_id) + + return_config = normalize_ec2_vpc_dhcp_config(new_config) + results = get_dhcp_options_info(client, module, dhcp_options_id) + module.exit_json(changed=changed, new_options=return_config, dhcp_options_id=dhcp_options_id, dhcp_options=results, dhcp_config=return_config) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py new file mode 100644 index 000000000..c5058bd7a --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py @@ -0,0 +1,216 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_dhcp_option_info +version_added: 1.0.0 +short_description: Gather information about DHCP options sets in AWS +description: + - Gather information about DHCP options sets in AWS. +author: "Nick Aslanidis (@naslanidis)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeDhcpOptions.html) for possible filters. + type: dict + default: {} + dhcp_options_ids: + description: + - Get details of specific DHCP option IDs. + type: list + elements: str + dry_run: + description: + - Checks whether you have the required permissions to view the DHCP + options. + type: bool + default: false +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 +''' + +EXAMPLES = ''' +# # Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all DHCP Option sets for an account or profile + amazon.aws.ec2_vpc_dhcp_option_info: + region: ap-southeast-2 + profile: production + register: dhcp_info + +- name: Gather information about a filtered list of DHCP Option sets + amazon.aws.ec2_vpc_dhcp_option_info: + region: ap-southeast-2 + profile: production + filters: + "tag:Name": "abc-123" + register: dhcp_info + +- name: Gather information about a specific DHCP Option set by DhcpOptionId + amazon.aws.ec2_vpc_dhcp_option_info: + region: ap-southeast-2 + profile: production + dhcp_options_ids: dopt-123fece2 + register: dhcp_info + +''' + +RETURN = ''' +dhcp_options: + description: The DHCP options created, associated or found. + returned: always + type: list + elements: dict + contains: + dhcp_configurations: + description: The DHCP configuration for the option set. + type: list + elements: dict + contains: + key: + description: The name of a DHCP option. + returned: always + type: str + values: + description: List of values for the DHCP option. + returned: always + type: list + elements: dict + contains: + value: + description: The attribute value. This value is case-sensitive. + returned: always + type: str + sample: + - '{"key": "ntp-servers", "values": [{"value": "10.0.0.2" , "value": "10.0.1.2"}]}' + - '{"key": "netbios-name-servers", "values": [{value": "10.0.0.1"}, {"value": "10.0.1.1" }]}' + dhcp_options_id: + description: The aws resource id of the primary DHCP options set created or found. + type: str + sample: "dopt-0955331de6a20dd07" + owner_id: + description: The ID of the AWS account that owns the DHCP options set. + type: str + sample: 012345678912 + tags: + description: The tags to be applied to a DHCP options set. + type: list + elements: dict + sample: + - '{"Key": "CreatedBy", "Value": "ansible-test"}' + - '{"Key": "Collection", "Value": "amazon.aws"}' +dhcp_config: + description: The boto2-style DHCP options created, associated or found. Provided for consistency with ec2_vpc_dhcp_option's C(dhcp_config). + returned: always + type: list + elements: dict + contains: + domain-name-servers: + description: The IP addresses of up to four domain name servers, or AmazonProvidedDNS. + returned: when available + type: list + sample: + - 10.0.0.1 + - 10.0.1.1 + domain-name: + description: The domain name for hosts in the DHCP option sets. + returned: when available + type: list + sample: + - "my.example.com" + ntp-servers: + description: The IP addresses of up to four Network Time Protocol (NTP) servers. + returned: when available + type: list + sample: + - 10.0.0.1 + - 10.0.1.1 + netbios-name-servers: + description: The IP addresses of up to four NetBIOS name servers. + returned: when available + type: list + sample: + - 10.0.0.1 + - 10.0.1.1 + netbios-node-type: + description: The NetBIOS node type (1, 2, 4, or 8). + returned: when available + type: str + sample: 2 +changed: + description: True if listing the dhcp options succeeds. + type: bool + returned: always +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import normalize_ec2_vpc_dhcp_config + + +def get_dhcp_options_info(dhcp_option): + dhcp_option_info = {'DhcpOptionsId': dhcp_option['DhcpOptionsId'], + 'DhcpConfigurations': dhcp_option['DhcpConfigurations'], + 'Tags': boto3_tag_list_to_ansible_dict(dhcp_option.get('Tags', [{'Value': '', 'Key': 'Name'}]))} + return dhcp_option_info + + +def list_dhcp_options(client, module): + params = dict(Filters=ansible_dict_to_boto3_filter_list(module.params.get('filters'))) + + if module.params.get("dry_run"): + params['DryRun'] = True + + if module.params.get("dhcp_options_ids"): + params['DhcpOptionsIds'] = module.params.get("dhcp_options_ids") + + try: + all_dhcp_options = client.describe_dhcp_options(aws_retry=True, **params) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + normalized_config = [normalize_ec2_vpc_dhcp_config(config['DhcpConfigurations']) for config in all_dhcp_options['DhcpOptions']] + raw_config = [camel_dict_to_snake_dict(get_dhcp_options_info(option), ignore_list=['Tags']) for option in all_dhcp_options['DhcpOptions']] + return raw_config, normalized_config + + +def main(): + argument_spec = dict( + filters=dict(type='dict', default={}), + dry_run=dict(type='bool', default=False), + dhcp_options_ids=dict(type='list', elements='str'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + + # call your function here + results, normalized_config = list_dhcp_options(client, module) + + module.exit_json(dhcp_options=results, dhcp_config=normalized_config) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint.py new file mode 100644 index 000000000..080610eb6 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint.py @@ -0,0 +1,482 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +module: ec2_vpc_endpoint +short_description: Create and delete AWS VPC endpoints +version_added: 1.0.0 +description: + - Creates AWS VPC endpoints. + - Deletes AWS VPC endpoints. + - This module supports check mode. +options: + vpc_id: + description: + - Required when creating a VPC endpoint. + required: false + type: str + vpc_endpoint_type: + description: + - The type of endpoint. + required: false + default: Gateway + choices: [ "Interface", "Gateway", "GatewayLoadBalancer" ] + type: str + version_added: 1.5.0 + vpc_endpoint_subnets: + description: + - The list of subnets to attach to the endpoint. + - Requires I(vpc_endpoint_type=GatewayLoadBalancer) or I(vpc_endpoint_type=Interface). + required: false + type: list + elements: str + version_added: 2.1.0 + vpc_endpoint_security_groups: + description: + - The list of security groups to attach to the endpoint. + - Requires I(vpc_endpoint_type=GatewayLoadBalancer) or I(vpc_endpoint_type=Interface). + required: false + type: list + elements: str + version_added: 2.1.0 + service: + description: + - An AWS supported VPC endpoint service. Use the M(amazon.aws.ec2_vpc_endpoint_info) + module to describe the supported endpoint services. + - Required when creating an endpoint. + required: false + type: str + policy: + description: + - A properly formatted JSON policy as string, see + U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813). + Cannot be used with I(policy_file). + - Option when creating an endpoint. If not provided AWS will + utilise a default policy which provides full access to the service. + required: false + type: json + policy_file: + description: + - The path to the properly json formatted policy file, see + U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813) + on how to use it properly. Cannot be used with I(policy). + - Option when creating an endpoint. If not provided AWS will + utilise a default policy which provides full access to the service. + - This option has been deprecated and will be removed after 2022-12-01 + to maintain the existing functionality please use the I(policy) option + and a file lookup. + required: false + aliases: [ "policy_path" ] + type: path + state: + description: + - C(present) to ensure resource is created. + - C(absent) to remove resource. + required: false + default: present + choices: [ "present", "absent" ] + type: str + wait: + description: + - When specified, will wait for status to reach C(available) for I(state=present). + - Unfortunately this is ignored for delete actions due to a difference in + behaviour from AWS. + required: false + default: false + type: bool + wait_timeout: + description: + - Used in conjunction with I(wait). + - Number of seconds to wait for status. + - Unfortunately this is ignored for delete actions due to a difference in + behaviour from AWS. + required: false + default: 320 + type: int + route_table_ids: + description: + - List of one or more route table IDs to attach to the endpoint. + - A route is added to the route table with the destination of the + endpoint if provided. + - Route table IDs are only valid for C(Gateway) endpoints. + required: false + type: list + elements: str + vpc_endpoint_id: + description: + - One or more VPC endpoint IDs to remove from the AWS account. + - Required if I(state=absent). + required: false + type: str + client_token: + description: + - Optional client token to ensure idempotency. + required: false + type: str +author: + - Karen Cheng (@Etherdaemon) +notes: + - Support for I(tags) and I(purge_tags) was added in release 1.5.0. +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create new vpc endpoint with a json template for policy + amazon.aws.ec2_vpc_endpoint: + state: present + region: ap-southeast-2 + vpc_id: vpc-12345678 + service: com.amazonaws.ap-southeast-2.s3 + policy: " {{ lookup( 'template', 'endpoint_policy.json.j2') }} " + route_table_ids: + - rtb-12345678 + - rtb-87654321 + register: new_vpc_endpoint + +- name: Create new vpc endpoint with the default policy + amazon.aws.ec2_vpc_endpoint: + state: present + region: ap-southeast-2 + vpc_id: vpc-12345678 + service: com.amazonaws.ap-southeast-2.s3 + route_table_ids: + - rtb-12345678 + - rtb-87654321 + register: new_vpc_endpoint + +- name: Create new vpc endpoint with json file + amazon.aws.ec2_vpc_endpoint: + state: present + region: ap-southeast-2 + vpc_id: vpc-12345678 + service: com.amazonaws.ap-southeast-2.s3 + policy_file: "{{ role_path }}/files/endpoint_policy.json" + route_table_ids: + - rtb-12345678 + - rtb-87654321 + register: new_vpc_endpoint + +- name: Delete newly created vpc endpoint + amazon.aws.ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: "{{ new_vpc_endpoint.result['VpcEndpointId'] }}" + region: ap-southeast-2 +''' + +RETURN = r''' +endpoints: + description: The resulting endpoints from the module call + returned: success + type: list + sample: [ + { + "creation_timestamp": "2017-02-20T05:04:15+00:00", + "policy_document": { + "Id": "Policy1450910922815", + "Statement": [ + { + "Action": "s3:*", + "Effect": "Allow", + "Principal": "*", + "Resource": [ + "arn:aws:s3:::*/*", + "arn:aws:s3:::*" + ], + "Sid": "Stmt1450910920641" + } + ], + "Version": "2012-10-17" + }, + "route_table_ids": [ + "rtb-abcd1234" + ], + "service_name": "com.amazonaws.ap-southeast-2.s3", + "vpc_endpoint_id": "vpce-a1b2c3d4", + "vpc_id": "vpc-abbad0d0" + } + ] +''' + +import datetime +import json +import traceback + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.six import string_types +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications + + +def get_endpoints(client, module, endpoint_id=None): + params = dict() + if endpoint_id: + params['VpcEndpointIds'] = [endpoint_id] + else: + filters = list() + if module.params.get('service'): + filters.append({'Name': 'service-name', 'Values': [module.params.get('service')]}) + if module.params.get('vpc_id'): + filters.append({'Name': 'vpc-id', 'Values': [module.params.get('vpc_id')]}) + params['Filters'] = filters + try: + result = client.describe_vpc_endpoints(aws_retry=True, **params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get endpoints") + + # normalize iso datetime fields in result + normalized_result = normalize_boto3_result(result) + return normalized_result + + +def match_endpoints(route_table_ids, service_name, vpc_id, endpoint): + found = False + sorted_route_table_ids = [] + + if route_table_ids: + sorted_route_table_ids = sorted(route_table_ids) + + if endpoint['VpcId'] == vpc_id and endpoint['ServiceName'] == service_name: + sorted_endpoint_rt_ids = sorted(endpoint['RouteTableIds']) + if sorted_endpoint_rt_ids == sorted_route_table_ids: + found = True + return found + + +def setup_creation(client, module): + endpoint_id = module.params.get('vpc_endpoint_id') + route_table_ids = module.params.get('route_table_ids') + service_name = module.params.get('service') + vpc_id = module.params.get('vpc_id') + changed = False + + if not endpoint_id: + # Try to use the module parameters to match any existing endpoints + all_endpoints = get_endpoints(client, module, endpoint_id) + if len(all_endpoints['VpcEndpoints']) > 0: + for endpoint in all_endpoints['VpcEndpoints']: + if match_endpoints(route_table_ids, service_name, vpc_id, endpoint): + endpoint_id = endpoint['VpcEndpointId'] + break + + if endpoint_id: + # If we have an endpoint now, just ensure tags and exit + if module.params.get('tags'): + changed |= ensure_ec2_tags(client, module, endpoint_id, + resource_type='vpc-endpoint', + tags=module.params.get('tags'), + purge_tags=module.params.get('purge_tags')) + normalized_result = get_endpoints(client, module, endpoint_id=endpoint_id)['VpcEndpoints'][0] + return changed, camel_dict_to_snake_dict(normalized_result, ignore_list=['Tags']) + + changed, result = create_vpc_endpoint(client, module) + + return changed, camel_dict_to_snake_dict(result, ignore_list=['Tags']) + + +def create_vpc_endpoint(client, module): + params = dict() + changed = False + token_provided = False + params['VpcId'] = module.params.get('vpc_id') + params['VpcEndpointType'] = module.params.get('vpc_endpoint_type') + params['ServiceName'] = module.params.get('service') + + if module.params.get('vpc_endpoint_type') != 'Gateway' and module.params.get('route_table_ids'): + module.fail_json(msg="Route table IDs are only supported for Gateway type VPC Endpoint.") + + if module.check_mode: + changed = True + result = 'Would have created VPC Endpoint if not in check mode' + module.exit_json(changed=changed, result=result) + + if module.params.get('route_table_ids'): + params['RouteTableIds'] = module.params.get('route_table_ids') + + if module.params.get('vpc_endpoint_subnets'): + params['SubnetIds'] = module.params.get('vpc_endpoint_subnets') + + if module.params.get('vpc_endpoint_security_groups'): + params['SecurityGroupIds'] = module.params.get('vpc_endpoint_security_groups') + + if module.params.get('client_token'): + token_provided = True + request_time = datetime.datetime.utcnow() + params['ClientToken'] = module.params.get('client_token') + + policy = None + if module.params.get('policy'): + try: + policy = json.loads(module.params.get('policy')) + except ValueError as e: + module.fail_json(msg=str(e), exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + elif module.params.get('policy_file'): + try: + with open(module.params.get('policy_file'), 'r') as json_data: + policy = json.load(json_data) + except (OSError, json.JSONDecodeError) as e: + module.fail_json(msg=str(e), exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + if policy: + params['PolicyDocument'] = json.dumps(policy) + + if module.params.get('tags'): + params["TagSpecifications"] = boto3_tag_specifications(module.params.get('tags'), ['vpc-endpoint']) + + try: + changed = True + result = client.create_vpc_endpoint(aws_retry=True, **params)['VpcEndpoint'] + if token_provided and (request_time > result['creation_timestamp'].replace(tzinfo=None)): + changed = False + elif module.params.get('wait') and not module.check_mode: + try: + waiter = get_waiter(client, 'vpc_endpoint_exists') + waiter.wait(VpcEndpointIds=[result['VpcEndpointId']], WaiterConfig=dict(Delay=15, MaxAttempts=module.params.get('wait_timeout') // 15)) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(msg='Error waiting for vpc endpoint to become available - please check the AWS console') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failure while waiting for status') + + except is_boto3_error_code('IdempotentParameterMismatch'): # pylint: disable=duplicate-except + module.fail_json(msg="IdempotentParameterMismatch - updates of endpoints are not allowed by the API") + except is_boto3_error_code('RouteAlreadyExists'): # pylint: disable=duplicate-except + module.fail_json(msg="RouteAlreadyExists for one of the route tables - update is not allowed by the API") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to create VPC.") + + # describe and normalize iso datetime fields in result after adding tags + normalized_result = get_endpoints(client, module, endpoint_id=result['VpcEndpointId'])['VpcEndpoints'][0] + return changed, normalized_result + + +def setup_removal(client, module): + params = dict() + changed = False + + if module.check_mode: + try: + exists = client.describe_vpc_endpoints(aws_retry=True, VpcEndpointIds=[module.params.get('vpc_endpoint_id')]) + if exists: + result = {'msg': 'Would have deleted VPC Endpoint if not in check mode'} + changed = True + except is_boto3_error_code('InvalidVpcEndpointId.NotFound'): + result = {'msg': 'Endpoint does not exist, nothing to delete.'} + changed = False + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get endpoints") + + return changed, result + + if isinstance(module.params.get('vpc_endpoint_id'), string_types): + params['VpcEndpointIds'] = [module.params.get('vpc_endpoint_id')] + else: + params['VpcEndpointIds'] = module.params.get('vpc_endpoint_id') + try: + result = client.delete_vpc_endpoints(aws_retry=True, **params)['Unsuccessful'] + if len(result) < len(params['VpcEndpointIds']): + changed = True + # For some reason delete_vpc_endpoints doesn't throw exceptions it + # returns a list of failed 'results' instead. Throw these so we can + # catch them the way we expect + for r in result: + try: + raise botocore.exceptions.ClientError(r, 'delete_vpc_endpoints') + except is_boto3_error_code('InvalidVpcEndpoint.NotFound'): + continue + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, "Failed to delete VPC endpoint") + return changed, result + + +def main(): + argument_spec = dict( + vpc_id=dict(), + vpc_endpoint_type=dict(default='Gateway', choices=['Interface', 'Gateway', 'GatewayLoadBalancer']), + vpc_endpoint_security_groups=dict(type='list', elements='str'), + vpc_endpoint_subnets=dict(type='list', elements='str'), + service=dict(), + policy=dict(type='json'), + policy_file=dict(type='path', aliases=['policy_path']), + state=dict(default='present', choices=['present', 'absent']), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=320, required=False), + route_table_ids=dict(type='list', elements='str'), + vpc_endpoint_id=dict(), + client_token=dict(no_log=False), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['policy', 'policy_file']], + required_if=[ + ['state', 'present', ['vpc_id', 'service']], + ['state', 'absent', ['vpc_endpoint_id']], + ], + ) + + # Validate Requirements + state = module.params.get('state') + + if module.params.get('policy_file'): + module.deprecate('The policy_file option has been deprecated and' + ' will be removed after 2022-12-01', + date='2022-12-01', collection_name='amazon.aws') + + if module.params.get('vpc_endpoint_type'): + if module.params.get('vpc_endpoint_type') == 'Gateway': + if module.params.get('vpc_endpoint_subnets') or module.params.get('vpc_endpoint_security_groups'): + module.fail_json(msg="Parameter vpc_endpoint_subnets and/or vpc_endpoint_security_groups can't be used with Gateway endpoint type") + + if module.params.get('vpc_endpoint_type') == 'GatewayLoadBalancer': + if module.params.get('vpc_endpoint_security_groups'): + module.fail_json(msg="Parameter vpc_endpoint_security_groups can't be used with GatewayLoadBalancer endpoint type") + + if module.params.get('vpc_endpoint_type') == 'Interface': + if module.params.get('vpc_endpoint_subnets') and not module.params.get('vpc_endpoint_security_groups'): + module.fail_json(msg="Parameter vpc_endpoint_security_groups must be set when endpoint type is Interface and vpc_endpoint_subnets is defined") + if not module.params.get('vpc_endpoint_subnets') and module.params.get('vpc_endpoint_security_groups'): + module.fail_json(msg="Parameter vpc_endpoint_subnets must be set when endpoint type is Interface and vpc_endpoint_security_groups is defined") + + try: + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + # Ensure resource is present + if state == 'present': + (changed, results) = setup_creation(ec2, module) + else: + (changed, results) = setup_removal(ec2, module) + + module.exit_json(changed=changed, result=results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_info.py new file mode 100644 index 000000000..11a362812 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_info.py @@ -0,0 +1,298 @@ +#!/usr/bin/python +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +module: ec2_vpc_endpoint_info +short_description: Retrieves AWS VPC endpoints details using AWS methods +version_added: 1.0.0 +description: + - Gets various details related to AWS VPC endpoints. +options: + query: + description: + - Defaults to C(endpoints). + - Specifies the query action to take. + - I(query=endpoints) returns information about AWS VPC endpoints. + - Retrieving information about services using I(query=services) has been + deprecated in favour of the M(amazon.aws.ec2_vpc_endpoint_service_info) module. + - The I(query) option has been deprecated and will be removed after 2022-12-01. + required: False + choices: + - services + - endpoints + type: str + vpc_endpoint_ids: + description: + - The IDs of specific endpoints to retrieve the details of. + type: list + elements: str + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcEndpoints.html) + for possible filters. + type: dict + default: {} +author: Karen Cheng (@Etherdaemon) +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 +''' + +EXAMPLES = r''' +# Simple example of listing all support AWS services for VPC endpoints +- name: List supported AWS endpoint services + amazon.aws.ec2_vpc_endpoint_info: + query: services + region: ap-southeast-2 + register: supported_endpoint_services + +- name: Get all endpoints in ap-southeast-2 region + amazon.aws.ec2_vpc_endpoint_info: + query: endpoints + region: ap-southeast-2 + register: existing_endpoints + +- name: Get all endpoints with specific filters + amazon.aws.ec2_vpc_endpoint_info: + query: endpoints + region: ap-southeast-2 + filters: + vpc-id: + - vpc-12345678 + - vpc-87654321 + vpc-endpoint-state: + - available + - pending + register: existing_endpoints + +- name: Get details on specific endpoint + amazon.aws.ec2_vpc_endpoint_info: + query: endpoints + region: ap-southeast-2 + vpc_endpoint_ids: + - vpce-12345678 + register: endpoint_details +''' + +RETURN = r''' +service_names: + description: AWS VPC endpoint service names. + returned: I(query) is C(services) + type: list + elements: str + sample: + service_names: + - com.amazonaws.ap-southeast-2.s3 +vpc_endpoints: + description: + - A list of endpoints that match the query. Each endpoint has the keys creation_timestamp, + policy_document, route_table_ids, service_name, state, vpc_endpoint_id, vpc_id. + returned: I(query) is C(endpoints) + type: list + elements: dict + contains: + creation_timestamp: + description: The date and time that the endpoint was created. + returned: always + type: str + dns_entries: + description: List of DNS entires for the endpoint. + returned: always + type: list + elements: dict + contains: + dns_name: + description: The DNS name. + returned: always + type: str + hosted_zone_id: + description: The ID of the private hosted zone. + returned: always + type: str + groups: + description: List of security groups associated with the network interface. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + group_name: + description: The name of the security group. + returned: always + type: str + network_interface_ids: + description: List of network interfaces for the endpoint. + returned: always + type: list + elements: str + owner_id: + description: The ID of the AWS account that owns the endpoint. + returned: always + type: str + policy_document: + description: The policy document associated with the endpoint. + returned: always + type: str + private_dns_enabled: + description: Indicates whether the VPC is associated with a private hosted zone. + returned: always + type: bool + requester_managed: + description: Indicated whether the endpoint is being managed by its service. + returned: always + type: bool + route_table_ids: + description: List of route table IDs associated with the endpoint. + returned: always + type: list + elements: str + service_name: + description: The name of the service to which the endpoint is associated. + returned: always + type: str + state: + description: The state of the endpoint. + returned: always + type: str + subnet_ids: + description: List of subnets associated with the endpoint. + returned: always + type: str + tags: + description: List of tags associated with the endpoint. + returned: always + type: list + elements: dict + vpc_endpoint_id: + description: The ID of the endpoint. + returned: always + type: str + vpc_endpoint_type: + description: The type of endpoint. + returned: always + type: str + vpc_id: + description: The ID of the VPC. + returned: always + type: str + sample: + vpc_endpoints: + - creation_timestamp: "2017-02-16T11:06:48+00:00" + policy_document: > + "{\"Version\":\"2012-10-17\",\"Id\":\"Policy1450910922815\", + \"Statement\":[{\"Sid\":\"Stmt1450910920641\",\"Effect\":\"Allow\", + \"Principal\":\"*\",\"Action\":\"s3:*\",\"Resource\":[\"arn:aws:s3:::*/*\",\"arn:aws:s3:::*\"]}]}" + route_table_ids: + - rtb-abcd1234 + service_name: "com.amazonaws.ap-southeast-2.s3" + state: "available" + vpc_endpoint_id: "vpce-abbad0d0" + vpc_id: "vpc-1111ffff" +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list + + +@AWSRetry.jittered_backoff() +def _describe_endpoints(client, **params): + paginator = client.get_paginator('describe_vpc_endpoints') + return paginator.paginate(**params).build_full_result() + + +@AWSRetry.jittered_backoff() +def _describe_endpoint_services(client, **params): + paginator = client.get_paginator('describe_vpc_endpoint_services') + return paginator.paginate(**params).build_full_result() + + +def get_supported_services(client, module): + try: + services = _describe_endpoint_services(client) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get endpoint servicess") + + results = list(services['ServiceNames']) + return dict(service_names=results) + + +def get_endpoints(client, module): + results = list() + params = dict() + params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + if module.params.get('vpc_endpoint_ids'): + params['VpcEndpointIds'] = module.params.get('vpc_endpoint_ids') + try: + results = _describe_endpoints(client, **params)['VpcEndpoints'] + results = normalize_boto3_result(results) + except is_boto3_error_code('InvalidVpcEndpointId.NotFound'): + module.exit_json(msg='VpcEndpoint {0} does not exist'.format(module.params.get('vpc_endpoint_ids')), vpc_endpoints=[]) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get endpoints") + + return dict(vpc_endpoints=[camel_dict_to_snake_dict(result) for result in results]) + + +def main(): + argument_spec = dict( + query=dict(choices=['services', 'endpoints'], required=False), + filters=dict(default={}, type='dict'), + vpc_endpoint_ids=dict(type='list', elements='str'), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + # Validate Requirements + try: + connection = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + query = module.params.get('query') + if query == 'endpoints': + module.deprecate('The query option has been deprecated and' + ' will be removed after 2022-12-01. Searching for' + ' `endpoints` is now the default and after' + ' 2022-12-01 this module will only support fetching' + ' endpoints.', + date='2022-12-01', collection_name='amazon.aws') + elif query == 'services': + module.deprecate('Support for fetching service information with this ' + 'module has been deprecated and will be removed after' + ' 2022-12-01. ' + 'Please use the ec2_vpc_endpoint_service_info module ' + 'instead.', date='2022-12-01', + collection_name='amazon.aws') + else: + query = 'endpoints' + + invocations = { + 'services': get_supported_services, + 'endpoints': get_endpoints, + } + results = invocations[query](connection, module) + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_service_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_service_info.py new file mode 100644 index 000000000..fefd39421 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_service_info.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +module: ec2_vpc_endpoint_service_info +short_description: Retrieves AWS VPC endpoint service details +version_added: 1.5.0 +description: + - Gets details related to AWS VPC Endpoint Services. +options: + filters: + description: + - A dict of filters to apply. + - Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcEndpointServices.html) + for possible filters. + type: dict + default: {} + service_names: + description: + - A list of service names which can be used to narrow the search results. + type: list + elements: str +author: + - Mark Chappell (@tremble) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 +''' + +EXAMPLES = r''' +# Simple example of listing all supported AWS services for VPC endpoints +- name: List supported AWS endpoint services + amazon.aws.ec2_vpc_endpoint_service_info: + region: ap-southeast-2 + register: supported_endpoint_services +''' + +RETURN = r''' +service_names: + description: List of supported AWS VPC endpoint service names. + returned: success + type: list + sample: + service_names: + - com.amazonaws.ap-southeast-2.s3 +service_details: + description: Detailed information about the AWS VPC endpoint services. + returned: success + type: complex + contains: + service_name: + returned: success + description: The ARN of the endpoint service. + type: str + service_id: + returned: success + description: The ID of the endpoint service. + type: str + service_type: + returned: success + description: The type of the service + type: list + availability_zones: + returned: success + description: The Availability Zones in which the service is available. + type: list + owner: + returned: success + description: The AWS account ID of the service owner. + type: str + base_endpoint_dns_names: + returned: success + description: The DNS names for the service. + type: list + private_dns_name: + returned: success + description: The private DNS name for the service. + type: str + private_dns_names: + returned: success + description: The private DNS names assigned to the VPC endpoint service. + type: list + vpc_endpoint_policy_supported: + returned: success + description: Whether the service supports endpoint policies. + type: bool + acceptance_required: + returned: success + description: + Whether VPC endpoint connection requests to the service must be + accepted by the service owner. + type: bool + manages_vpc_endpoints: + returned: success + description: Whether the service manages its VPC endpoints. + type: bool + tags: + returned: success + description: A dict of tags associated with the service + type: dict + private_dns_name_verification_state: + returned: success + description: + - The verification state of the VPC endpoint service. + - Consumers of an endpoint service cannot use the private name when the state is not C(verified). + type: str +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +# We're using a paginator so we can't use the client decorators +@AWSRetry.jittered_backoff() +def get_services(client, module): + paginator = client.get_paginator('describe_vpc_endpoint_services') + params = {} + if module.params.get("filters"): + params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + + if module.params.get("service_names"): + params['ServiceNames'] = module.params.get("service_names") + + results = paginator.paginate(**params).build_full_result() + return results + + +def normalize_service(service): + normalized = camel_dict_to_snake_dict(service, ignore_list=['Tags']) + normalized["tags"] = boto3_tag_list_to_ansible_dict(service.get('Tags')) + return normalized + + +def normalize_result(result): + normalized = {} + normalized['service_details'] = [normalize_service(service) for service in result.get('ServiceDetails')] + normalized['service_names'] = result.get('ServiceNames', []) + return normalized + + +def main(): + argument_spec = dict( + filters=dict(default={}, type='dict'), + service_names=dict(type='list', elements='str'), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + # Validate Requirements + try: + client = module.client('ec2') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + try: + results = get_services(client, module) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to retrieve service details') + normalized_result = normalize_result(results) + + module.exit_json(changed=False, **normalized_result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw.py new file mode 100644 index 000000000..99106b03c --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw.py @@ -0,0 +1,266 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_igw +version_added: 1.0.0 +short_description: Manage an AWS VPC Internet gateway +description: + - Manage an AWS VPC Internet gateway +author: Robert Estelle (@erydo) +options: + vpc_id: + description: + - The VPC ID for the VPC in which to manage the Internet Gateway. + required: true + type: str + state: + description: + - Create or terminate the IGW + default: present + choices: [ 'present', 'absent' ] + type: str +notes: +- Support for I(purge_tags) was added in release 1.3.0. +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.tags +- amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Ensure that the VPC has an Internet Gateway. +# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc. +- name: Create Internet gateway + amazon.aws.ec2_vpc_igw: + vpc_id: vpc-abcdefgh + state: present + register: igw + +- name: Create Internet gateway with tags + amazon.aws.ec2_vpc_igw: + vpc_id: vpc-abcdefgh + state: present + tags: + Tag1: tag1 + Tag2: tag2 + register: igw + +- name: Delete Internet gateway + amazon.aws.ec2_vpc_igw: + state: absent + vpc_id: vpc-abcdefgh + register: vpc_igw_delete +''' + +RETURN = ''' +changed: + description: If any changes have been made to the Internet Gateway. + type: bool + returned: always + sample: + changed: false +gateway_id: + description: The unique identifier for the Internet Gateway. + type: str + returned: I(state=present) + sample: + gateway_id: "igw-XXXXXXXX" +tags: + description: The tags associated the Internet Gateway. + type: dict + returned: I(state=present) + sample: + tags: + "Ansible": "Test" +vpc_id: + description: The VPC ID associated with the Internet Gateway. + type: str + returned: I(state=present) + sample: + vpc_id: "vpc-XXXXXXXX" +''' + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + + +@AWSRetry.jittered_backoff(retries=10, delay=10) +def describe_igws_with_backoff(connection, **params): + paginator = connection.get_paginator('describe_internet_gateways') + return paginator.paginate(**params).build_full_result()['InternetGateways'] + + +class AnsibleEc2Igw(): + + def __init__(self, module, results): + self._module = module + self._results = results + self._connection = self._module.client( + 'ec2', retry_decorator=AWSRetry.jittered_backoff() + ) + self._check_mode = self._module.check_mode + + def process(self): + vpc_id = self._module.params.get('vpc_id') + state = self._module.params.get('state', 'present') + tags = self._module.params.get('tags') + purge_tags = self._module.params.get('purge_tags') + + if state == 'present': + self.ensure_igw_present(vpc_id, tags, purge_tags) + elif state == 'absent': + self.ensure_igw_absent(vpc_id) + + def get_matching_igw(self, vpc_id, gateway_id=None): + ''' + Returns the internet gateway found. + Parameters: + vpc_id (str): VPC ID + gateway_id (str): Internet Gateway ID, if specified + Returns: + igw (dict): dict of igw found, None if none found + ''' + filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id}) + try: + # If we know the gateway_id, use it to avoid bugs with using filters + # See https://github.com/ansible-collections/amazon.aws/pull/766 + if not gateway_id: + igws = describe_igws_with_backoff(self._connection, Filters=filters) + else: + igws = describe_igws_with_backoff(self._connection, InternetGatewayIds=[gateway_id]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e) + + igw = None + if len(igws) > 1: + self._module.fail_json( + msg='EC2 returned more than one Internet Gateway for VPC {0}, aborting' + .format(vpc_id)) + elif igws: + igw = camel_dict_to_snake_dict(igws[0]) + + return igw + + @staticmethod + def get_igw_info(igw, vpc_id): + return { + 'gateway_id': igw['internet_gateway_id'], + 'tags': boto3_tag_list_to_ansible_dict(igw['tags']), + 'vpc_id': vpc_id + } + + def ensure_igw_absent(self, vpc_id): + igw = self.get_matching_igw(vpc_id) + if igw is None: + return self._results + + if self._check_mode: + self._results['changed'] = True + return self._results + + try: + self._results['changed'] = True + self._connection.detach_internet_gateway( + aws_retry=True, + InternetGatewayId=igw['internet_gateway_id'], + VpcId=vpc_id + ) + self._connection.delete_internet_gateway( + aws_retry=True, + InternetGatewayId=igw['internet_gateway_id'] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg="Unable to delete Internet Gateway") + + return self._results + + def ensure_igw_present(self, vpc_id, tags, purge_tags): + igw = self.get_matching_igw(vpc_id) + + if igw is None: + if self._check_mode: + self._results['changed'] = True + self._results['gateway_id'] = None + return self._results + + try: + response = self._connection.create_internet_gateway(aws_retry=True) + + # Ensure the gateway exists before trying to attach it or add tags + waiter = get_waiter(self._connection, 'internet_gateway_exists') + waiter.wait(InternetGatewayIds=[response['InternetGateway']['InternetGatewayId']]) + + igw = camel_dict_to_snake_dict(response['InternetGateway']) + self._connection.attach_internet_gateway( + aws_retry=True, + InternetGatewayId=igw['internet_gateway_id'], + VpcId=vpc_id + ) + + # Ensure the gateway is attached before proceeding + waiter = get_waiter(self._connection, 'internet_gateway_attached') + waiter.wait(InternetGatewayIds=[igw['internet_gateway_id']]) + self._results['changed'] = True + except botocore.exceptions.WaiterError as e: + self._module.fail_json_aws(e, msg="No Internet Gateway exists.") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self._module.fail_json_aws(e, msg='Unable to create Internet Gateway') + + # Modify tags + self._results['changed'] |= ensure_ec2_tags( + self._connection, self._module, igw['internet_gateway_id'], + resource_type='internet-gateway', tags=tags, purge_tags=purge_tags, + retry_codes='InvalidInternetGatewayID.NotFound' + ) + + # Update igw + igw = self.get_matching_igw(vpc_id, gateway_id=igw['internet_gateway_id']) + igw_info = self.get_igw_info(igw, vpc_id) + self._results.update(igw_info) + + return self._results + + +def main(): + argument_spec = dict( + vpc_id=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, type='bool'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + results = dict( + changed=False + ) + igw_manager = AnsibleEc2Igw(module=module, results=results) + igw_manager.process() + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw_info.py new file mode 100644 index 000000000..5e7c1a0af --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw_info.py @@ -0,0 +1,177 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: ec2_vpc_igw_info +version_added: 1.0.0 +short_description: Gather information about internet gateways in AWS +description: + - Gather information about internet gateways in AWS. +author: "Nick Aslanidis (@naslanidis)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInternetGateways.html) for possible filters. + type: dict + default: {} + internet_gateway_ids: + description: + - Get details of specific Internet Gateway ID. Provide this value as a list. + type: list + elements: str + convert_tags: + description: + - Convert tags from boto3 format (list of dictionaries) to the standard dictionary format. + - Prior to release 4.0.0 this defaulted to C(False). + default: True + type: bool + version_added: 1.3.0 +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 +''' + +EXAMPLES = r''' +# # Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all Internet Gateways for an account or profile + amazon.aws.ec2_vpc_igw_info: + region: ap-southeast-2 + profile: production + register: igw_info + +- name: Gather information about a filtered list of Internet Gateways + amazon.aws.ec2_vpc_igw_info: + region: ap-southeast-2 + profile: production + filters: + "tag:Name": "igw-123" + register: igw_info + +- name: Gather information about a specific internet gateway by InternetGatewayId + amazon.aws.ec2_vpc_igw_info: + region: ap-southeast-2 + profile: production + internet_gateway_ids: igw-c1231234 + register: igw_info +''' + +RETURN = r''' +changed: + description: True if listing the internet gateways succeeds. + type: bool + returned: always + sample: "false" +internet_gateways: + description: The internet gateways for the account. + returned: always + type: complex + contains: + attachments: + description: Any VPCs attached to the internet gateway. + returned: I(state=present) + type: complex + contains: + state: + description: The current state of the attachment. + returned: I(state=present) + type: str + sample: available + vpc_id: + description: The ID of the VPC. + returned: I(state=present) + type: str + sample: vpc-02123b67 + internet_gateway_id: + description: The ID of the internet gateway. + returned: I(state=present) + type: str + sample: igw-2123634d + tags: + description: Any tags assigned to the internet gateway. + returned: I(state=present) + type: dict + sample: + tags: + "Ansible": "Test" +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict + + +def get_internet_gateway_info(internet_gateway, convert_tags): + if convert_tags: + tags = boto3_tag_list_to_ansible_dict(internet_gateway['Tags']) + ignore_list = ["Tags"] + else: + tags = internet_gateway['Tags'] + ignore_list = [] + internet_gateway_info = {'InternetGatewayId': internet_gateway['InternetGatewayId'], + 'Attachments': internet_gateway['Attachments'], + 'Tags': tags} + + internet_gateway_info = camel_dict_to_snake_dict(internet_gateway_info, ignore_list=ignore_list) + return internet_gateway_info + + +def list_internet_gateways(connection, module): + params = dict() + + params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + convert_tags = module.params.get('convert_tags') + + if module.params.get("internet_gateway_ids"): + params['InternetGatewayIds'] = module.params.get("internet_gateway_ids") + + try: + all_internet_gateways = connection.describe_internet_gateways(aws_retry=True, **params) + except is_boto3_error_code('InvalidInternetGatewayID.NotFound'): + module.fail_json('InternetGateway not found') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, 'Unable to describe internet gateways') + + return [get_internet_gateway_info(igw, convert_tags) + for igw in all_internet_gateways['InternetGateways']] + + +def main(): + argument_spec = dict( + filters=dict(type='dict', default=dict()), + internet_gateway_ids=dict(type='list', default=None, elements='str'), + convert_tags=dict(type='bool', default=True), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + # Validate Requirements + try: + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + # call your function here + results = list_internet_gateways(connection, module) + + module.exit_json(internet_gateways=results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway.py new file mode 100644 index 000000000..38bdf34f5 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway.py @@ -0,0 +1,967 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: ec2_vpc_nat_gateway +version_added: 1.0.0 +short_description: Manage AWS VPC NAT Gateways +description: + - Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids. +options: + state: + description: + - Ensure NAT Gateway is present or absent. + default: "present" + choices: ["present", "absent"] + type: str + nat_gateway_id: + description: + - The id AWS dynamically allocates to the NAT Gateway on creation. + This is required when the absent option is present. + type: str + subnet_id: + description: + - The id of the subnet to create the NAT Gateway in. This is required + with the present option. + type: str + allocation_id: + description: + - The id of the elastic IP allocation. If this is not passed and the + eip_address is not passed. An EIP is generated for this NAT Gateway. + type: str + connectivity_type: + description: + - Indicates whether the NAT gateway supports public or private connectivity. + choices: ["public", "private"] + default: "public" + type: str + version_added: 5.5.0 + eip_address: + description: + - The elastic IP address of the EIP you want attached to this NAT Gateway. + If this is not passed and the allocation_id is not passed, + an EIP is generated for this NAT Gateway. + type: str + if_exist_do_not_create: + description: + - if a NAT Gateway exists already in the subnet_id, then do not create a new one. + required: false + default: false + type: bool + release_eip: + description: + - Deallocate the EIP from the VPC. + - Option is only valid with the absent state. + - You should use this with the wait option. Since you can not release an address while a delete operation is happening. + default: false + type: bool + wait: + description: + - Wait for operation to complete before returning. + default: false + type: bool + wait_timeout: + description: + - How many seconds to wait for an operation to complete before timing out. + default: 320 + type: int + client_token: + description: + - Optional unique token to be used during create to ensure idempotency. + When specifying this option, ensure you specify the eip_address parameter + as well otherwise any subsequent runs will fail. + type: str +author: + - Allen Sanabria (@linuxdynasty) + - Jon Hadfield (@jonhadfield) + - Karen Cheng (@Etherdaemon) + - Alina Buzachis (@alinabuzachis) +notes: + - Support for I(tags) and I(purge_tags) was added in release 1.4.0. +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create new nat gateway with client token. + amazon.aws.ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + eip_address: 52.1.1.1 + region: ap-southeast-2 + client_token: abcd-12345678 + register: new_nat_gateway + +- name: Create new nat gateway using an allocation-id and connectivity type. + amazon.aws.ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + allocation_id: eipalloc-12345678 + connectivity_type: "private" + region: ap-southeast-2 + register: new_nat_gateway + +- name: Create new nat gateway, using an EIP address and wait for available status. + amazon.aws.ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + eip_address: 52.1.1.1 + wait: true + region: ap-southeast-2 + register: new_nat_gateway + +- name: Create new nat gateway and allocate new EIP. + amazon.aws.ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + wait: true + region: ap-southeast-2 + register: new_nat_gateway + +- name: Create new nat gateway and allocate new EIP if a nat gateway does not yet exist in the subnet. + amazon.aws.ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + wait: true + region: ap-southeast-2 + if_exist_do_not_create: true + register: new_nat_gateway + +- name: Delete nat gateway using discovered nat gateways from facts module. + amazon.aws.ec2_vpc_nat_gateway: + state: absent + region: ap-southeast-2 + wait: true + nat_gateway_id: "{{ item.NatGatewayId }}" + release_eip: true + register: delete_nat_gateway_result + loop: "{{ gateways_to_remove.result }}" + +- name: Delete nat gateway and wait for deleted status. + amazon.aws.ec2_vpc_nat_gateway: + state: absent + nat_gateway_id: nat-12345678 + wait: true + wait_timeout: 500 + region: ap-southeast-2 + +- name: Delete nat gateway and release EIP. + amazon.aws.ec2_vpc_nat_gateway: + state: absent + nat_gateway_id: nat-12345678 + release_eip: true + wait: true + wait_timeout: 300 + region: ap-southeast-2 + +- name: Create new nat gateway using allocation-id and tags. + amazon.aws.ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + allocation_id: eipalloc-12345678 + region: ap-southeast-2 + tags: + Tag1: tag1 + Tag2: tag2 + register: new_nat_gateway + +- name: Update tags without purge + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: subnet-12345678 + allocation_id: eipalloc-12345678 + region: ap-southeast-2 + purge_tags: false + tags: + Tag3: tag3 + wait: true + register: update_tags_nat_gateway +''' + +RETURN = r''' +create_time: + description: The ISO 8601 date time format in UTC. + returned: In all cases. + type: str + sample: "2016-03-05T05:19:20.282000+00:00'" +nat_gateway_id: + description: id of the VPC NAT Gateway + returned: In all cases. + type: str + sample: "nat-0d1e3a878585988f8" +subnet_id: + description: id of the Subnet + returned: In all cases. + type: str + sample: "subnet-12345" +state: + description: The current state of the NAT Gateway. + returned: In all cases. + type: str + sample: "available" +tags: + description: The tags associated the VPC NAT Gateway. + type: dict + returned: When tags are present. + sample: + tags: + "Ansible": "Test" +vpc_id: + description: id of the VPC. + returned: In all cases. + type: str + sample: "vpc-12345" +nat_gateway_addresses: + description: List of dictionaries containing the public_ip, network_interface_id, private_ip, and allocation_id. + returned: In all cases. + type: str + sample: [ + { + 'public_ip': '52.52.52.52', + 'network_interface_id': 'eni-12345', + 'private_ip': '10.0.0.100', + 'allocation_id': 'eipalloc-12345' + } + ] +''' + +import datetime + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications + + +@AWSRetry.jittered_backoff(retries=10) +def _describe_nat_gateways(client, **params): + try: + paginator = client.get_paginator('describe_nat_gateways') + return paginator.paginate(**params).build_full_result()['NatGateways'] + except is_boto3_error_code('InvalidNatGatewayID.NotFound'): + return None + + +def wait_for_status(client, module, waiter_name, nat_gateway_id): + wait_timeout = module.params.get('wait_timeout') + try: + waiter = get_waiter(client, waiter_name) + attempts = 1 + int(wait_timeout / waiter.config.delay) + waiter.wait( + NatGatewayIds=[nat_gateway_id], + WaiterConfig={'MaxAttempts': attempts} + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg="NAT gateway failed to reach expected state.") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to wait for NAT gateway state to update.") + + +def get_nat_gateways(client, module, subnet_id=None, nat_gateway_id=None, states=None): + """Retrieve a list of NAT Gateways + Args: + client (botocore.client.EC2): Boto3 client + module: AnsibleAWSModule class instance + + Kwargs: + subnet_id (str): The subnet_id the nat resides in. + nat_gateway_id (str): The Amazon NAT id. + states (list): States available (pending, failed, available, deleting, and deleted) + default=None + + Basic Usage: + >>> client = boto3.client('ec2') + >>> module = AnsibleAWSModule(...) + >>> subnet_id = 'subnet-12345678' + >>> get_nat_gateways(client, module, subnet_id) + [ + { + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "nat_gateway_addresses": [ + { + "public_ip": "55.55.55.55", + "network_interface_id": "eni-1234567", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-1234567" + } + ], + "nat_gateway_id": "nat-123456789", + "state": "deleted", + "subnet_id": "subnet-123456789", + "tags": {}, + "vpc_id": "vpc-12345678" + } + ] + + Returns: + list + """ + + params = dict() + existing_gateways = list() + + if not states: + states = ['available', 'pending'] + if nat_gateway_id: + params['NatGatewayIds'] = [nat_gateway_id] + else: + params['Filter'] = [ + { + 'Name': 'subnet-id', + 'Values': [subnet_id] + }, + { + 'Name': 'state', + 'Values': states + } + ] + + try: + gateways = _describe_nat_gateways(client, **params) + if gateways: + for gw in gateways: + existing_gateways.append(camel_dict_to_snake_dict(gw)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + + return existing_gateways + + +def gateway_in_subnet_exists(client, module, subnet_id, allocation_id=None): + """Retrieve all NAT Gateways for a subnet. + Args: + client (botocore.client.EC2): Boto3 client + module: AnsibleAWSModule class instance + subnet_id (str): The subnet_id the nat resides in. + + Kwargs: + allocation_id (str): The EIP Amazon identifier. + default = None + + Basic Usage: + >>> client = boto3.client('ec2') + >>> module = AnsibleAWSModule(...) + >>> subnet_id = 'subnet-1234567' + >>> allocation_id = 'eipalloc-1234567' + >>> gateway_in_subnet_exists(client, module, subnet_id, allocation_id) + ( + [ + { + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "nat_gateway_addresses": [ + { + "public_ip": "55.55.55.55", + "network_interface_id": "eni-1234567", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-1234567" + } + ], + "nat_gateway_id": "nat-123456789", + "state": "deleted", + "subnet_id": "subnet-123456789", + "tags": {}, + "vpc_id": "vpc-1234567" + } + ], + False + ) + + Returns: + Tuple (list, bool) + """ + + allocation_id_exists = False + gateways = [] + states = ['available', 'pending'] + + gws_retrieved = (get_nat_gateways(client, module, subnet_id, states=states)) + + if gws_retrieved: + for gw in gws_retrieved: + for address in gw['nat_gateway_addresses']: + if allocation_id: + if address.get('allocation_id') == allocation_id: + allocation_id_exists = True + gateways.append(gw) + else: + gateways.append(gw) + + return gateways, allocation_id_exists + + +def get_eip_allocation_id_by_address(client, module, eip_address): + """Release an EIP from your EIP Pool + Args: + client (botocore.client.EC2): Boto3 client + module: AnsibleAWSModule class instance + eip_address (str): The Elastic IP Address of the EIP. + + Basic Usage: + >>> client = boto3.client('ec2') + >>> module = AnsibleAWSModule(...) + >>> eip_address = '52.87.29.36' + >>> get_eip_allocation_id_by_address(client, module, eip_address) + ( + 'eipalloc-36014da3', '' + ) + + Returns: + Tuple (str, str) + """ + + params = { + 'PublicIps': [eip_address], + } + allocation_id = None + msg = '' + + try: + allocations = client.describe_addresses(aws_retry=True, **params)['Addresses'] + + if len(allocations) == 1: + allocation = allocations[0] + else: + allocation = None + + if allocation: + if allocation.get('Domain') != 'vpc': + msg = ( + "EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP" + .format(eip_address) + ) + else: + allocation_id = allocation.get('AllocationId') + + except is_boto3_error_code('InvalidAddress.Malformed'): + module.fail_json(msg='EIP address {0} is invalid.'.format(eip_address)) + except is_boto3_error_code('InvalidAddress.NotFound'): # pylint: disable=duplicate-except + msg = ( + "EIP {0} does not exist".format(eip_address) + ) + allocation_id = None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to describe EIP") + + return allocation_id, msg + + +def allocate_eip_address(client, module): + """Release an EIP from your EIP Pool + Args: + client (botocore.client.EC2): Boto3 client + module: AnsibleAWSModule class instance + + Basic Usage: + >>> client = boto3.client('ec2') + >>> module = AnsibleAWSModule(...) + >>> allocate_eip_address(client, module) + ( + True, '', '' + ) + + Returns: + Tuple (bool, str, str) + """ + + new_eip = None + msg = '' + params = { + 'Domain': 'vpc', + } + + if module.check_mode: + ip_allocated = True + new_eip = None + return ip_allocated, msg, new_eip + + try: + new_eip = client.allocate_address(aws_retry=True, **params)['AllocationId'] + ip_allocated = True + msg = 'eipalloc id {0} created'.format(new_eip) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + + return ip_allocated, msg, new_eip + + +def release_address(client, module, allocation_id): + """Release an EIP from your EIP Pool + Args: + client (botocore.client.EC2): Boto3 client + module: AnsibleAWSModule class instance + allocation_id (str): The eip Amazon identifier. + + Basic Usage: + >>> client = boto3.client('ec2') + >>> module = AnsibleAWSModule(...) + >>> allocation_id = "eipalloc-123456" + >>> release_address(client, module, allocation_id) + ( + True, '' + ) + + Returns: + Tuple (bool, str) + """ + + msg = '' + + if module.check_mode: + return True, '' + + ip_released = False + + try: + client.describe_addresses(aws_retry=True, AllocationIds=[allocation_id]) + except is_boto3_error_code('InvalidAllocationID.NotFound') as e: + # IP address likely already released + # Happens with gateway in 'deleted' state that + # still lists associations + return True, e + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + + try: + client.release_address(aws_retry=True, AllocationId=allocation_id) + ip_released = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + + return ip_released, msg + + +def create(client, module, subnet_id, allocation_id, tags, client_token=None, + wait=False, connectivity_type='public'): + """Create an Amazon NAT Gateway. + Args: + client (botocore.client.EC2): Boto3 client + module: AnsibleAWSModule class instance + subnet_id (str): The subnet_id the nat resides in + allocation_id (str): The eip Amazon identifier + connectivity_type (str): public or private connectivity support + tags (dict): Tags to associate to the NAT gateway + purge_tags (bool): If true, remove tags not listed in I(tags) + type: bool + + Kwargs: + wait (bool): Wait for the nat to be in the deleted state before returning. + default = False + client_token (str): + default = None + + Basic Usage: + >>> client = boto3.client('ec2') + >>> module = AnsibleAWSModule(...) + >>> subnet_id = 'subnet-1234567' + >>> allocation_id = 'eipalloc-1234567' + >>> create(client, module, subnet_id, allocation_id, wait=True, connectivity_type='public') + [ + true, + { + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "nat_gateway_addresses": [ + { + "public_ip": "55.55.55.55", + "network_interface_id": "eni-1234567", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-1234567" + } + ], + "nat_gateway_id": "nat-123456789", + "state": "deleted", + "subnet_id": "subnet-1234567", + "tags": {}, + "vpc_id": "vpc-1234567" + }, + "" + ] + + Returns: + Tuple (bool, str, list) + """ + + params = { + 'SubnetId': subnet_id, + 'ConnectivityType': connectivity_type + } + + if connectivity_type == "public": + params.update({'AllocationId': allocation_id}) + + request_time = datetime.datetime.utcnow() + changed = False + token_provided = False + result = {} + msg = '' + + if client_token: + token_provided = True + params['ClientToken'] = client_token + + if tags: + params["TagSpecifications"] = boto3_tag_specifications(tags, ['natgateway']) + + if module.check_mode: + changed = True + return changed, result, msg + + try: + result = camel_dict_to_snake_dict( + client.create_nat_gateway(aws_retry=True, **params)["NatGateway"] + ) + changed = True + + create_time = result['create_time'].replace(tzinfo=None) + + if token_provided and (request_time > create_time): + changed = False + + elif wait and result.get('state') != 'available': + wait_for_status(client, module, 'nat_gateway_available', result['nat_gateway_id']) + + # Get new result + result = camel_dict_to_snake_dict( + _describe_nat_gateways(client, NatGatewayIds=[result['nat_gateway_id']])[0] + ) + + except is_boto3_error_code('IdempotentParameterMismatch') as e: + msg = ( + 'NAT Gateway does not support update and token has already been provided:' + e + ) + changed = False + result = None + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + + result['tags'] = describe_ec2_tags(client, module, result['nat_gateway_id'], + resource_type='natgateway') + + return changed, result, msg + + +def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, eip_address=None, + if_exist_do_not_create=False, wait=False, client_token=None, connectivity_type='public'): + """Create an Amazon NAT Gateway. + Args: + client (botocore.client.EC2): Boto3 client + module: AnsibleAWSModule class instance + subnet_id (str): The subnet_id the nat resides in + tags (dict): Tags to associate to the NAT gateway + purge_tags (bool): If true, remove tags not listed in I(tags) + + Kwargs: + allocation_id (str): The EIP Amazon identifier. + default = None + eip_address (str): The Elastic IP Address of the EIP. + default = None + if_exist_do_not_create (bool): if a nat gateway already exists in this + subnet, than do not create another one. + default = False + wait (bool): Wait for the nat to be in the deleted state before returning. + default = False + client_token (str): + default = None + + Basic Usage: + >>> client = boto3.client('ec2') + >>> module = AnsibleAWSModule(...) + >>> subnet_id = 'subnet-w4t12897' + >>> allocation_id = 'eipalloc-36014da3' + >>> pre_create(client, module, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, connectivity_type=public) + [ + true, + "", + { + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "nat_gateway_addresses": [ + { + "public_ip": "52.87.29.36", + "network_interface_id": "eni-5579742d", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-36014da3" + } + ], + "nat_gateway_id": "nat-03835afb6e31df79b", + "state": "deleted", + "subnet_id": "subnet-w4t12897", + "tags": {}, + "vpc_id": "vpc-w68571b5" + } + ] + + Returns: + Tuple (bool, str, list) + """ + + changed = False + msg = '' + results = {} + + if not allocation_id and not eip_address: + existing_gateways, allocation_id_exists = ( + gateway_in_subnet_exists(client, module, subnet_id) + ) + + if len(existing_gateways) > 0 and if_exist_do_not_create: + results = existing_gateways[0] + changed |= ensure_ec2_tags(client, module, results['nat_gateway_id'], + resource_type='natgateway', tags=tags, + purge_tags=purge_tags) + + results['tags'] = describe_ec2_tags(client, module, results['nat_gateway_id'], + resource_type='natgateway') + + if changed: + return changed, msg, results + + changed = False + msg = ( + 'NAT Gateway {0} already exists in subnet_id {1}' + .format( + existing_gateways[0]['nat_gateway_id'], subnet_id + ) + ) + return changed, msg, results + else: + changed, msg, allocation_id = ( + allocate_eip_address(client, module) + ) + + if not changed: + return changed, msg, dict() + + elif eip_address or allocation_id: + if eip_address and not allocation_id: + allocation_id, msg = ( + get_eip_allocation_id_by_address( + client, module, eip_address + ) + ) + if not allocation_id: + changed = False + return changed, msg, dict() + + existing_gateways, allocation_id_exists = ( + gateway_in_subnet_exists( + client, module, subnet_id, allocation_id + ) + ) + + if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create): + results = existing_gateways[0] + changed |= ensure_ec2_tags(client, module, results['nat_gateway_id'], + resource_type='natgateway', tags=tags, + purge_tags=purge_tags) + + results['tags'] = describe_ec2_tags(client, module, results['nat_gateway_id'], + resource_type='natgateway') + + if changed: + return changed, msg, results + + changed = False + msg = ( + 'NAT Gateway {0} already exists in subnet_id {1}' + .format( + existing_gateways[0]['nat_gateway_id'], subnet_id + ) + ) + return changed, msg, results + + changed, results, msg = create( + client, module, subnet_id, allocation_id, tags, client_token, wait, connectivity_type + ) + + return changed, msg, results + + +def remove(client, module, nat_gateway_id, wait=False, release_eip=False, connectivity_type='public'): + """Delete an Amazon NAT Gateway. + Args: + client (botocore.client.EC2): Boto3 client + module: AnsibleAWSModule class instance + nat_gateway_id (str): The Amazon nat id + + Kwargs: + wait (bool): Wait for the nat to be in the deleted state before returning. + release_eip (bool): Once the nat has been deleted, you can deallocate the eip from the vpc. + connectivity_type (str): private/public connection type + + Basic Usage: + >>> client = boto3.client('ec2') + >>> module = AnsibleAWSModule(...) + >>> nat_gw_id = 'nat-03835afb6e31df79b' + >>> remove(client, module, nat_gw_id, wait=True, release_eip=True, connectivity_type='public') + [ + true, + "", + { + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "nat_gateway_addresses": [ + { + "public_ip": "52.87.29.36", + "network_interface_id": "eni-5579742d", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-36014da3" + } + ], + "nat_gateway_id": "nat-03835afb6e31df79b", + "state": "deleted", + "subnet_id": "subnet-w4t12897", + "tags": {}, + "vpc_id": "vpc-w68571b5" + } + ] + + Returns: + Tuple (bool, str, list) + """ + + allocation_id = None + params = { + 'NatGatewayId': nat_gateway_id + } + changed = False + results = {} + states = ['pending', 'available'] + msg = '' + + if module.check_mode: + changed = True + return changed, msg, results + + try: + gw_list = ( + get_nat_gateways( + client, module, nat_gateway_id=nat_gateway_id, + states=states + ) + ) + + if len(gw_list) == 1: + results = gw_list[0] + client.delete_nat_gateway(aws_retry=True, **params) + if connectivity_type == "public": + allocation_id = ( + results['nat_gateway_addresses'][0]['allocation_id'] + ) + changed = True + msg = ( + 'NAT gateway {0} is in a deleting state. Delete was successful' + .format(nat_gateway_id) + ) + + if wait and results.get('state') != 'deleted': + wait_for_status(client, module, 'nat_gateway_deleted', nat_gateway_id) + + # Get new results + results = camel_dict_to_snake_dict( + _describe_nat_gateways(client, NatGatewayIds=[nat_gateway_id])[0] + ) + results['tags'] = describe_ec2_tags(client, module, nat_gateway_id, + resource_type='natgateway') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + + if release_eip and allocation_id: + eip_released, msg = ( + release_address(client, module, allocation_id)) + if not eip_released: + module.fail_json( + msg="Failed to release EIP {0}: {1}".format(allocation_id, msg) + ) + + return changed, msg, results + + +def main(): + argument_spec = dict( + subnet_id=dict(type='str'), + eip_address=dict(type='str'), + allocation_id=dict(type='str'), + connectivity_type=dict(type='str', default='public', choices=['private', 'public']), + if_exist_do_not_create=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'absent']), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=320, required=False), + release_eip=dict(type='bool', default=False), + nat_gateway_id=dict(type='str'), + client_token=dict(type='str', no_log=False), + tags=dict(required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, type='bool'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['allocation_id', 'eip_address'] + ], + required_if=[['state', 'absent', ['nat_gateway_id']], + ['state', 'present', ['subnet_id']]], + ) + + state = module.params.get('state').lower() + subnet_id = module.params.get('subnet_id') + allocation_id = module.params.get('allocation_id') + connectivity_type = module.params.get('connectivity_type') + eip_address = module.params.get('eip_address') + nat_gateway_id = module.params.get('nat_gateway_id') + wait = module.params.get('wait') + release_eip = module.params.get('release_eip') + client_token = module.params.get('client_token') + if_exist_do_not_create = module.params.get('if_exist_do_not_create') + tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') + + try: + client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS.') + + changed = False + msg = '' + + if state == 'present': + changed, msg, results = ( + pre_create( + client, module, subnet_id, tags, purge_tags, allocation_id, eip_address, + if_exist_do_not_create, wait, client_token, connectivity_type + ) + ) + else: + changed, msg, results = ( + remove( + client, module, nat_gateway_id, wait, release_eip, connectivity_type + ) + ) + + module.exit_json(msg=msg, changed=changed, **results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway_info.py new file mode 100644 index 000000000..45c794e80 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway_info.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +module: ec2_vpc_nat_gateway_info +short_description: Retrieves AWS VPC Managed Nat Gateway details using AWS methods +version_added: 1.0.0 +description: + - Gets various details related to AWS VPC Managed Nat Gateways +options: + nat_gateway_ids: + description: + - List of specific nat gateway IDs to fetch details for. + type: list + elements: str + default: [] + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNatGateways.html) + for possible filters. + type: dict + default: {} +author: Karen Cheng (@Etherdaemon) +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 +''' + +EXAMPLES = r''' +# Simple example of listing all nat gateways +- name: List all managed nat gateways in ap-southeast-2 + amazon.aws.ec2_vpc_nat_gateway_info: + region: ap-southeast-2 + register: all_ngws + +- name: Debugging the result + ansible.builtin.debug: + msg: "{{ all_ngws.result }}" + +- name: Get details on specific nat gateways + amazon.aws.ec2_vpc_nat_gateway_info: + nat_gateway_ids: + - nat-1234567891234567 + - nat-7654321987654321 + region: ap-southeast-2 + register: specific_ngws + +- name: Get all nat gateways with specific filters + amazon.aws.ec2_vpc_nat_gateway_info: + region: ap-southeast-2 + filters: + state: ['pending'] + register: pending_ngws + +- name: Get nat gateways with specific filter + amazon.aws.ec2_vpc_nat_gateway_info: + region: ap-southeast-2 + filters: + subnet-id: subnet-12345678 + state: ['available'] + register: existing_nat_gateways +''' + +RETURN = r''' +changed: + description: True if listing the internet gateways succeeds. + type: bool + returned: always + sample: false +result: + description: + - The result of the describe, converted to ansible snake case style. + - See also U(http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_nat_gateways). + returned: suceess + type: list + contains: + create_time: + description: The date and time the NAT gateway was created. + returned: always + type: str + sample: "2021-03-11T22:43:25+00:00" + delete_time: + description: The date and time the NAT gateway was deleted. + returned: when the NAT gateway has been deleted + type: str + sample: "2021-03-11T22:43:25+00:00" + nat_gateway_addresses: + description: List containing a dictionary with the IP addresses and network interface associated with the NAT gateway. + returned: always + type: dict + contains: + allocation_id: + description: The allocation ID of the Elastic IP address that's associated with the NAT gateway. + returned: always + type: str + sample: eipalloc-0853e66a40803da76 + network_interface_id: + description: The ID of the network interface associated with the NAT gateway. + returned: always + type: str + sample: eni-0a37acdbe306c661c + private_ip: + description: The private IP address associated with the Elastic IP address. + returned: always + type: str + sample: 10.0.238.227 + public_ip: + description: The Elastic IP address associated with the NAT gateway. + returned: always + type: str + sample: 34.204.123.52 + nat_gateway_id: + description: The ID of the NAT gateway. + returned: always + type: str + sample: nat-0c242a2397acf6173 + state: + description: state of the NAT gateway. + returned: always + type: str + sample: available + subnet_id: + description: The ID of the subnet in which the NAT gateway is located. + returned: always + type: str + sample: subnet-098c447465d4344f9 + vpc_id: + description: The ID of the VPC in which the NAT gateway is located. + returned: always + type: str + sample: vpc-02f37f48438ab7d4c + tags: + description: Tags applied to the NAT gateway. + returned: always + type: dict + sample: + Tag1: tag1 + Tag_2: tag_2 +''' + + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result + + +@AWSRetry.jittered_backoff(retries=10) +def _describe_nat_gateways(client, module, **params): + try: + paginator = client.get_paginator('describe_nat_gateways') + return paginator.paginate(**params).build_full_result()['NatGateways'] + except is_boto3_error_code('InvalidNatGatewayID.NotFound'): + module.exit_json(msg="NAT gateway not found.") + except is_boto3_error_code('NatGatewayMalformed'): # pylint: disable=duplicate-except + module.fail_json_aws(msg="NAT gateway id is malformed.") + + +def get_nat_gateways(client, module): + params = dict() + nat_gateways = list() + + params['Filter'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + params['NatGatewayIds'] = module.params.get('nat_gateway_ids') + + try: + result = normalize_boto3_result(_describe_nat_gateways(client, module, **params)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, 'Unable to describe NAT gateways.') + + for gateway in result: + # Turn the boto3 result into ansible_friendly_snaked_names + converted_gateway = camel_dict_to_snake_dict(gateway) + if 'tags' in converted_gateway: + # Turn the boto3 result into ansible friendly tag dictionary + converted_gateway['tags'] = boto3_tag_list_to_ansible_dict(converted_gateway['tags']) + nat_gateways.append(converted_gateway) + + return nat_gateways + + +def main(): + argument_spec = dict( + filters=dict(default={}, type='dict'), + nat_gateway_ids=dict(default=[], type='list', elements='str'), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True,) + + try: + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + results = get_nat_gateways(connection, module) + + module.exit_json(result=results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py new file mode 100644 index 000000000..c7430e989 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py @@ -0,0 +1,720 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_net +version_added: 1.0.0 +short_description: Configure AWS Virtual Private Clouds +description: + - Create, modify, and terminate AWS Virtual Private Clouds (VPCs). +author: + - Jonathan Davila (@defionscode) + - Sloane Hertel (@s-hertel) +options: + name: + description: + - The name to give your VPC. This is used in combination with I(cidr_block) + to determine if a VPC already exists. + - The value of I(name) overrides any value set for C(Name) in the I(tags) + parameter. + - At least one of I(name) and I(vpc_id) must be specified. + - I(name) must be specified when creating a new VPC. + type: str + vpc_id: + version_added: 4.0.0 + description: + - The ID of the VPC. + - At least one of I(name) and I(vpc_id) must be specified. + - At least one of I(name) and I(cidr_block) must be specified. + type: str + cidr_block: + description: + - The primary CIDR of the VPC. + - The first in the list will be used as the primary CIDR + and is used in conjunction with I(name) to ensure idempotence. + - Required when I(vpc_id) is not set. + type: list + elements: str + ipv6_cidr: + description: + - Request an Amazon-provided IPv6 CIDR block with /56 prefix length. You cannot specify the range of IPv6 addresses, + or the size of the CIDR block. + - Default value is C(false) when creating a new VPC. + type: bool + purge_cidrs: + description: + - Remove CIDRs that are associated with the VPC and are not specified in I(cidr_block). + default: false + type: bool + tenancy: + description: + - Whether to be default or dedicated tenancy. + - This cannot be changed after the VPC has been created. + default: default + choices: [ 'default', 'dedicated' ] + type: str + dns_support: + description: + - Whether to enable AWS DNS support. + - Default value is C(true) when creating a new VPC. + type: bool + dns_hostnames: + description: + - Whether to enable AWS hostname support. + - Default value is C(true) when creating a new VPC. + type: bool + dhcp_opts_id: + description: + - The id of the DHCP options to use for this VPC. + type: str + state: + description: + - The state of the VPC. Either absent or present. + default: present + choices: [ 'present', 'absent' ] + type: str + multi_ok: + description: + - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. + Specify I(multi_ok=true) if you want duplicate VPCs created. + type: bool + default: false +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: create a VPC with dedicated tenancy and a couple of tags + amazon.aws.ec2_vpc_net: + name: Module_dev2 + cidr_block: 10.10.0.0/16 + region: us-east-1 + tags: + module: ec2_vpc_net + this: works + tenancy: dedicated + +- name: create a VPC with dedicated tenancy and request an IPv6 CIDR + amazon.aws.ec2_vpc_net: + name: Module_dev2 + cidr_block: 10.10.0.0/16 + ipv6_cidr: True + region: us-east-1 + tenancy: dedicated +''' + +RETURN = ''' +vpc: + description: info about the VPC that was created or deleted + returned: always + type: complex + contains: + cidr_block: + description: The CIDR of the VPC + returned: always + type: str + sample: 10.0.0.0/16 + cidr_block_association_set: + description: IPv4 CIDR blocks associated with the VPC + returned: success + type: list + sample: + "cidr_block_association_set": [ + { + "association_id": "vpc-cidr-assoc-97aeeefd", + "cidr_block": "10.0.0.0/24", + "cidr_block_state": { + "state": "associated" + } + } + ] + classic_link_enabled: + description: indicates whether ClassicLink is enabled + returned: always + type: bool + sample: false + dhcp_options_id: + description: the id of the DHCP options associated with this VPC + returned: always + type: str + sample: dopt-12345678 + id: + description: VPC resource id + returned: always + type: str + sample: vpc-12345678 + name: + description: The Name tag of the VPC. + returned: When the Name tag has been set on the VPC + type: str + sample: MyVPC + version_added: 4.0.0 + instance_tenancy: + description: indicates whether VPC uses default or dedicated tenancy + returned: always + type: str + sample: default + ipv6_cidr_block_association_set: + description: IPv6 CIDR blocks associated with the VPC + returned: success + type: list + sample: + "ipv6_cidr_block_association_set": [ + { + "association_id": "vpc-cidr-assoc-97aeeefd", + "ipv6_cidr_block": "2001:db8::/56", + "ipv6_cidr_block_state": { + "state": "associated" + } + } + ] + is_default: + description: indicates whether this is the default VPC + returned: always + type: bool + sample: false + state: + description: state of the VPC + returned: always + type: str + sample: available + tags: + description: tags attached to the VPC, includes name + returned: always + type: complex + contains: + Name: + description: name tag for the VPC + returned: always + type: str + sample: pk_vpc4 + owner_id: + description: The AWS account which owns the VPC. + returned: always + type: str + sample: 123456789012 +''' + +from time import sleep +from time import time + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.network import to_subnet +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + + +def vpc_exists(module, vpc, name, cidr_block, multi): + """Returns None or a vpc object depending on the existence of a VPC. When supplied + with a CIDR, it will check for matching tags to determine if it is a match + otherwise it will assume the VPC does not exist and thus return None. + """ + try: + vpc_filters = ansible_dict_to_boto3_filter_list({'tag:Name': name, 'cidr-block': cidr_block}) + matching_vpcs = vpc.describe_vpcs(aws_retry=True, Filters=vpc_filters)['Vpcs'] + # If an exact matching using a list of CIDRs isn't found, check for a match with the first CIDR as is documented for C(cidr_block) + if not matching_vpcs: + vpc_filters = ansible_dict_to_boto3_filter_list({'tag:Name': name, 'cidr-block': [cidr_block[0]]}) + matching_vpcs = vpc.describe_vpcs(aws_retry=True, Filters=vpc_filters)['Vpcs'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe VPCs") + + if multi: + return None + elif len(matching_vpcs) == 1: + return matching_vpcs[0]['VpcId'] + elif len(matching_vpcs) > 1: + module.fail_json(msg='Currently there are %d VPCs that have the same name and ' + 'CIDR block you specified. If you would like to create ' + 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs)) + return None + + +def get_classic_link_status(module, connection, vpc_id): + try: + results = connection.describe_vpc_classic_link(aws_retry=True, VpcIds=[vpc_id]) + return results['Vpcs'][0].get('ClassicLinkEnabled') + except is_boto3_error_message('The functionality you requested is not available in this region.'): + return False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to describe VPCs") + + +def wait_for_vpc_to_exist(module, connection, **params): + # wait for vpc to be available + try: + get_waiter(connection, 'vpc_exists').wait(**params) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg="VPC failed to reach expected state (exists)") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to wait for VPC creation.") + + +def wait_for_vpc(module, connection, **params): + # wait for vpc to be available + try: + get_waiter(connection, 'vpc_available').wait(**params) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg="VPC failed to reach expected state (available)") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to wait for VPC state to update.") + + +def get_vpc(module, connection, vpc_id, wait=True): + wait_for_vpc(module, connection, VpcIds=[vpc_id]) + try: + vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)['Vpcs'][0] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe VPCs") + + vpc_obj['ClassicLinkEnabled'] = get_classic_link_status(module, connection, vpc_id) + + return vpc_obj + + +def update_vpc_tags(connection, module, vpc_id, tags, name, purge_tags): + # Name is a tag rather than a direct parameter, we need to inject 'Name' + # into tags, but since tags isn't explicitly passed we'll treat it not being + # set as purge_tags == False + if name: + if purge_tags and tags is None: + purge_tags = False + tags = tags or {} + tags.update({'Name': name}) + + if tags is None: + return False + + changed = ensure_ec2_tags(connection, module, vpc_id, tags=tags, purge_tags=purge_tags) + if not changed or module.check_mode: + return changed + + return True + + +def update_dhcp_opts(connection, module, vpc_obj, dhcp_id): + if dhcp_id is None: + return False + if vpc_obj['DhcpOptionsId'] == dhcp_id: + return False + if module.check_mode: + return True + + try: + connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj['VpcId'], aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to associate DhcpOptionsId {0}".format(dhcp_id)) + + return True + + +def create_vpc(connection, module, cidr_block, tenancy, tags, ipv6_cidr, name): + if module.check_mode: + module.exit_json(changed=True, msg="VPC would be created if not in check mode") + + create_args = dict( + CidrBlock=cidr_block, InstanceTenancy=tenancy, + ) + + if name: + tags = tags or {} + tags['Name'] = name + if tags: + create_args['TagSpecifications'] = boto3_tag_specifications(tags, 'vpc') + + # Defaults to False (including None) + if ipv6_cidr: + create_args['AmazonProvidedIpv6CidrBlock'] = True + + try: + vpc_obj = connection.create_vpc(aws_retry=True, **create_args) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to create the VPC") + + # wait up to 30 seconds for vpc to exist + wait_for_vpc_to_exist( + module, connection, + VpcIds=[vpc_obj['Vpc']['VpcId']], + WaiterConfig=dict(MaxAttempts=30) + ) + # Wait for the VPC to enter an 'Available' State + wait_for_vpc( + module, connection, + VpcIds=[vpc_obj['Vpc']['VpcId']], + WaiterConfig=dict(MaxAttempts=30) + ) + + return vpc_obj['Vpc']['VpcId'] + + +def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value): + if expected_value is None: + return + if module.check_mode: + return + + start_time = time() + updated = False + while time() < start_time + 300: + current_value = connection.describe_vpc_attribute( + Attribute=attribute, + VpcId=vpc_id, + aws_retry=True + )['{0}{1}'.format(attribute[0].upper(), attribute[1:])]['Value'] + if current_value != expected_value: + sleep(3) + else: + updated = True + break + if not updated: + module.fail_json(msg="Failed to wait for {0} to be updated".format(attribute)) + + +def wait_for_vpc_ipv6_state(module, connection, vpc_id, ipv6_assoc_state): + """ + If ipv6_assoc_state is True, wait for VPC to be associated with at least one Amazon-provided IPv6 CIDR block. + If ipv6_assoc_state is False, wait for VPC to be dissociated from all Amazon-provided IPv6 CIDR blocks. + """ + + if ipv6_assoc_state is None: + return + if module.check_mode: + return + + start_time = time() + criteria_match = False + while time() < start_time + 300: + current_value = get_vpc(module, connection, vpc_id) + if current_value: + ipv6_set = current_value.get('Ipv6CidrBlockAssociationSet') + if ipv6_set: + if ipv6_assoc_state: + # At least one 'Amazon' IPv6 CIDR block must be associated. + for val in ipv6_set: + if val.get('Ipv6Pool') == 'Amazon' and val.get("Ipv6CidrBlockState").get("State") == "associated": + criteria_match = True + break + if criteria_match: + break + else: + # All 'Amazon' IPv6 CIDR blocks must be disassociated. + expected_count = sum( + [(val.get("Ipv6Pool") == "Amazon") for val in ipv6_set]) + actual_count = sum([(val.get('Ipv6Pool') == 'Amazon' and + val.get("Ipv6CidrBlockState").get("State") == "disassociated") for val in ipv6_set]) + if actual_count == expected_count: + criteria_match = True + break + sleep(3) + if not criteria_match: + module.fail_json(msg="Failed to wait for IPv6 CIDR association") + + +def get_cidr_network_bits(module, cidr_block): + if cidr_block is None: + return None + + fixed_cidrs = [] + for cidr in cidr_block: + split_addr = cidr.split('/') + if len(split_addr) == 2: + # this_ip is a IPv4 CIDR that may or may not have host bits set + # Get the network bits. + valid_cidr = to_subnet(split_addr[0], split_addr[1]) + if cidr != valid_cidr: + module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, " + "check the network mask and make sure that only network bits are set: {1}.".format(cidr, valid_cidr)) + fixed_cidrs.append(valid_cidr) + else: + # let AWS handle invalid CIDRs + fixed_cidrs.append(cidr) + return fixed_cidrs + + +def update_ipv6_cidrs(connection, module, vpc_obj, vpc_id, ipv6_cidr): + if ipv6_cidr is None: + return False + + # Fetch current state from vpc_object + current_ipv6_cidr = False + if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys(): + for ipv6_assoc in vpc_obj['Ipv6CidrBlockAssociationSet']: + if ipv6_assoc['Ipv6Pool'] == 'Amazon' and ipv6_assoc['Ipv6CidrBlockState']['State'] in ['associated', 'associating']: + current_ipv6_cidr = True + break + + if ipv6_cidr == current_ipv6_cidr: + return False + + if module.check_mode: + return True + + # There's no block associated, and we want one to be associated + if ipv6_cidr: + try: + connection.associate_vpc_cidr_block(AmazonProvidedIpv6CidrBlock=ipv6_cidr, VpcId=vpc_id, aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Unable to associate IPv6 CIDR") + else: + for ipv6_assoc in vpc_obj['Ipv6CidrBlockAssociationSet']: + if ipv6_assoc['Ipv6Pool'] == 'Amazon' and ipv6_assoc['Ipv6CidrBlockState']['State'] in ['associated', 'associating']: + try: + connection.disassociate_vpc_cidr_block(AssociationId=ipv6_assoc['AssociationId'], aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Unable to disassociate IPv6 CIDR {0}.".format(ipv6_assoc['AssociationId'])) + return True + + +def update_cidrs(connection, module, vpc_obj, vpc_id, cidr_block, purge_cidrs): + if cidr_block is None: + return False, None + + associated_cidrs = dict((cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', []) + if cidr['CidrBlockState']['State'] not in ['disassociating', 'disassociated']) + + current_cidrs = set(associated_cidrs.keys()) + desired_cidrs = set(cidr_block) + if not purge_cidrs: + desired_cidrs = desired_cidrs.union(current_cidrs) + + cidrs_to_add = list(desired_cidrs.difference(current_cidrs)) + cidrs_to_remove = list(current_cidrs.difference(desired_cidrs)) + + if not cidrs_to_add and not cidrs_to_remove: + return False, None + + if module.check_mode: + return True, list(desired_cidrs) + + for cidr in cidrs_to_add: + try: + connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id, aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(cidr)) + + for cidr in cidrs_to_remove: + association_id = associated_cidrs[cidr] + try: + connection.disassociate_vpc_cidr_block(AssociationId=association_id, aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that " + "are associated with the CIDR block before you can disassociate it.".format(association_id)) + return True, list(desired_cidrs) + + +def update_dns_enabled(connection, module, vpc_id, dns_support): + if dns_support is None: + return False + + current_dns_enabled = connection.describe_vpc_attribute(Attribute='enableDnsSupport', VpcId=vpc_id, aws_retry=True)['EnableDnsSupport']['Value'] + if current_dns_enabled == dns_support: + return False + + if module.check_mode: + return True + + try: + connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={'Value': dns_support}, aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to update enabled dns support attribute") + return True + + +def update_dns_hostnames(connection, module, vpc_id, dns_hostnames): + if dns_hostnames is None: + return False + + current_dns_hostnames = connection.describe_vpc_attribute(Attribute='enableDnsHostnames', VpcId=vpc_id, aws_retry=True)['EnableDnsHostnames']['Value'] + if current_dns_hostnames == dns_hostnames: + return False + + if module.check_mode: + return True + + try: + connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames}, aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to update enabled dns hostnames attribute") + return True + + +def delete_vpc(connection, module, vpc_id): + if vpc_id is None: + return False + if module.check_mode: + return True + + try: + connection.delete_vpc(VpcId=vpc_id, aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws( + e, msg="Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " + "and/or ec2_vpc_route_table modules to ensure that all depenednt components are absent.".format(vpc_id) + ) + + return True + + +def wait_for_updates(connection, module, vpc_id, ipv6_cidr, expected_cidrs, dns_support, dns_hostnames, tags, dhcp_id): + + if module.check_mode: + return + + if expected_cidrs: + wait_for_vpc( + module, connection, + VpcIds=[vpc_id], + Filters=[{'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs}] + ) + wait_for_vpc_ipv6_state(module, connection, vpc_id, ipv6_cidr) + + if tags is not None: + tag_list = ansible_dict_to_boto3_tag_list(tags) + filters = [{'Name': 'tag:{0}'.format(t['Key']), 'Values': [t['Value']]} for t in tag_list] + wait_for_vpc(module, connection, VpcIds=[vpc_id], Filters=filters) + + wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support) + wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames) + + if dhcp_id is not None: + # Wait for DhcpOptionsId to be updated + filters = [{'Name': 'dhcp-options-id', 'Values': [dhcp_id]}] + wait_for_vpc(module, connection, VpcIds=[vpc_id], Filters=filters) + + return + + +def main(): + argument_spec = dict( + name=dict(required=False), + vpc_id=dict(type='str', required=False, default=None), + cidr_block=dict(type='list', elements='str'), + ipv6_cidr=dict(type='bool', default=None), + tenancy=dict(choices=['default', 'dedicated'], default='default'), + dns_support=dict(type='bool'), + dns_hostnames=dict(type='bool'), + dhcp_opts_id=dict(), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + state=dict(choices=['present', 'absent'], default='present'), + multi_ok=dict(type='bool', default=False), + purge_cidrs=dict(type='bool', default=False), + ) + required_one_of = [ + ['vpc_id', 'name'], + ['vpc_id', 'cidr_block'], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_one_of=required_one_of, + supports_check_mode=True + ) + + name = module.params.get('name') + vpc_id = module.params.get('vpc_id') + cidr_block = module.params.get('cidr_block') + ipv6_cidr = module.params.get('ipv6_cidr') + purge_cidrs = module.params.get('purge_cidrs') + tenancy = module.params.get('tenancy') + dns_support = module.params.get('dns_support') + dns_hostnames = module.params.get('dns_hostnames') + dhcp_id = module.params.get('dhcp_opts_id') + tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') + state = module.params.get('state') + multi = module.params.get('multi_ok') + + changed = False + + connection = module.client( + 'ec2', + retry_decorator=AWSRetry.jittered_backoff( + retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound'] + ), + ) + + if dns_hostnames and not dns_support: + module.fail_json(msg='In order to enable DNS Hostnames you must also enable DNS support') + + cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block')) + + if vpc_id is None: + vpc_id = vpc_exists(module, connection, name, cidr_block, multi) + + if state == 'present': + + # Check if VPC exists + if vpc_id is None: + if module.params.get('name') is None: + module.fail_json('The name parameter must be specified when creating a new VPC.') + vpc_id = create_vpc(connection, module, cidr_block[0], tenancy, tags, ipv6_cidr, name) + changed = True + vpc_obj = get_vpc(module, connection, vpc_id) + if len(cidr_block) > 1: + cidrs_changed, desired_cidrs = update_cidrs(connection, module, vpc_obj, vpc_id, cidr_block, purge_cidrs) + changed |= cidrs_changed + else: + desired_cidrs = None + # Set on-creation defaults + if dns_hostnames is None: + dns_hostnames = True + if dns_support is None: + dns_support = True + else: + vpc_obj = get_vpc(module, connection, vpc_id) + cidrs_changed, desired_cidrs = update_cidrs(connection, module, vpc_obj, vpc_id, cidr_block, purge_cidrs) + changed |= cidrs_changed + ipv6_changed = update_ipv6_cidrs(connection, module, vpc_obj, vpc_id, ipv6_cidr) + changed |= ipv6_changed + tags_changed = update_vpc_tags(connection, module, vpc_id, tags, name, purge_tags) + changed |= tags_changed + + dhcp_changed = update_dhcp_opts(connection, module, vpc_obj, dhcp_id) + changed |= dhcp_changed + dns_changed = update_dns_enabled(connection, module, vpc_id, dns_support) + changed |= dns_changed + hostnames_changed = update_dns_hostnames(connection, module, vpc_id, dns_hostnames) + changed |= hostnames_changed + + wait_for_updates(connection, module, vpc_id, ipv6_cidr, desired_cidrs, dns_support, dns_hostnames, tags, dhcp_id) + + updated_obj = get_vpc(module, connection, vpc_id) + final_state = camel_dict_to_snake_dict(updated_obj) + final_state['tags'] = boto3_tag_list_to_ansible_dict(updated_obj.get('Tags', [])) + final_state['name'] = final_state['tags'].get('Name', None) + final_state['id'] = final_state.pop('vpc_id') + + module.exit_json(changed=changed, vpc=final_state) + + elif state == 'absent': + changed = delete_vpc(connection, module, vpc_id) + module.exit_json(changed=changed, vpc={}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py new file mode 100644 index 000000000..e32b42d83 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py @@ -0,0 +1,271 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_net_info +version_added: 1.0.0 +short_description: Gather information about ec2 VPCs in AWS +description: + - Gather information about ec2 VPCs in AWS +author: "Rob White (@wimnat)" +options: + vpc_ids: + description: + - A list of VPC IDs that exist in your account. + type: list + elements: str + default: [] + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters. + type: dict + default: {} +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all VPCs +- amazon.aws.ec2_vpc_net_info: + +# Gather information about a particular VPC using VPC ID +- amazon.aws.ec2_vpc_net_info: + vpc_ids: vpc-00112233 + +# Gather information about any VPC with a tag key Name and value Example +- amazon.aws.ec2_vpc_net_info: + filters: + "tag:Name": Example + +''' + +RETURN = ''' +vpcs: + description: Returns an array of complex objects as described below. + returned: success + type: complex + contains: + id: + description: The ID of the VPC (for backwards compatibility). + returned: always + type: str + vpc_id: + description: The ID of the VPC. + returned: always + type: str + state: + description: The state of the VPC. + returned: always + type: str + tags: + description: A dict of tags associated with the VPC. + returned: always + type: dict + instance_tenancy: + description: The instance tenancy setting for the VPC. + returned: always + type: str + is_default: + description: True if this is the default VPC for account. + returned: always + type: bool + cidr_block: + description: The IPv4 CIDR block assigned to the VPC. + returned: always + type: str + classic_link_dns_supported: + description: True/False depending on attribute setting for classic link DNS support. + returned: always + type: bool + classic_link_enabled: + description: True/False depending on if classic link support is enabled. + returned: always + type: bool + enable_dns_hostnames: + description: True/False depending on attribute setting for DNS hostnames support. + returned: always + type: bool + enable_dns_support: + description: True/False depending on attribute setting for DNS support. + returned: always + type: bool + cidr_block_association_set: + description: An array of IPv4 cidr block association set information. + returned: always + type: complex + contains: + association_id: + description: The association ID. + returned: always + type: str + cidr_block: + description: The IPv4 CIDR block that is associated with the VPC. + returned: always + type: str + cidr_block_state: + description: A hash/dict that contains a single item. The state of the cidr block association. + returned: always + type: dict + contains: + state: + description: The CIDR block association state. + returned: always + type: str + ipv6_cidr_block_association_set: + description: An array of IPv6 cidr block association set information. + returned: always + type: complex + contains: + association_id: + description: The association ID. + returned: always + type: str + ipv6_cidr_block: + description: The IPv6 CIDR block that is associated with the VPC. + returned: always + type: str + ipv6_cidr_block_state: + description: A hash/dict that contains a single item. The state of the cidr block association. + returned: always + type: dict + contains: + state: + description: The CIDR block association state. + returned: always + type: str + owner_id: + description: The AWS account which owns the VPC. + returned: always + type: str + sample: 123456789012 + dhcp_options_id: + description: The ID of the DHCP options associated with this VPC. + returned: always + type: str + sample: dopt-12345678 +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +def describe_vpcs(connection, module): + """ + Describe VPCs. + + connection : boto3 client connection object + module : AnsibleAWSModule object + """ + # collect parameters + filters = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + vpc_ids = module.params.get('vpc_ids') + + # init empty list for return vars + vpc_info = list() + + # Get the basic VPC info + try: + response = connection.describe_vpcs(VpcIds=vpc_ids, Filters=filters, aws_retry=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to describe VPCs {0}".format(vpc_ids)) + + # We can get these results in bulk but still needs two separate calls to the API + cl_enabled = {} + cl_dns_support = {} + dns_support = {} + dns_hostnames = {} + # Loop through the results and add the other VPC attributes we gathered + for vpc in response['Vpcs']: + error_message = "Unable to describe VPC attribute {0} on VPC {1}" + cl_enabled = describe_classic_links(module, connection, vpc['VpcId'], 'ClassicLinkEnabled', error_message) + cl_dns_support = describe_classic_links(module, connection, vpc['VpcId'], 'ClassicLinkDnsSupported', error_message) + dns_support = describe_vpc_attribute(module, connection, vpc['VpcId'], 'enableDnsSupport', error_message) + dns_hostnames = describe_vpc_attribute(module, connection, vpc['VpcId'], 'enableDnsHostnames', error_message) + if cl_enabled: + # loop through the ClassicLink Enabled results and add the value for the correct VPC + for item in cl_enabled['Vpcs']: + if vpc['VpcId'] == item['VpcId']: + vpc['ClassicLinkEnabled'] = item.get('ClassicLinkEnabled', False) + if cl_dns_support: + # loop through the ClassicLink DNS support results and add the value for the correct VPC + for item in cl_dns_support['Vpcs']: + if vpc['VpcId'] == item['VpcId']: + vpc['ClassicLinkDnsSupported'] = item.get('ClassicLinkDnsSupported', False) + + # add the two DNS attributes + if dns_support: + vpc['EnableDnsSupport'] = dns_support['EnableDnsSupport'].get('Value') + if dns_hostnames: + vpc['EnableDnsHostnames'] = dns_hostnames['EnableDnsHostnames'].get('Value') + # for backwards compatibility + vpc['id'] = vpc['VpcId'] + vpc_info.append(camel_dict_to_snake_dict(vpc)) + # convert tag list to ansible dict + vpc_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(vpc.get('Tags', [])) + + module.exit_json(vpcs=vpc_info) + + +def describe_classic_links(module, connection, vpc, attribute, error_message): + result = None + try: + if attribute == "ClassicLinkEnabled": + result = connection.describe_vpc_classic_link(VpcIds=[vpc], aws_retry=True) + else: + result = connection.describe_vpc_classic_link_dns_support(VpcIds=[vpc], aws_retry=True) + except is_boto3_error_code('UnsupportedOperation'): + result = {'Vpcs': [{'VpcId': vpc}]} + except is_boto3_error_code('InvalidVpcID.NotFound'): + module.warn(error_message.format(attribute, vpc)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Unable to describe if {0} is enabled'.format(attribute)) + return result + + +def describe_vpc_attribute(module, connection, vpc, attribute, error_message): + result = None + try: + return connection.describe_vpc_attribute(VpcId=vpc, Attribute=attribute, aws_retry=True) + except is_boto3_error_code('InvalidVpcID.NotFound'): + module.warn(error_message.format(attribute, vpc)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg=error_message.format(attribute, vpc)) + return result + + +def main(): + argument_spec = dict( + vpc_ids=dict(type='list', elements='str', default=[]), + filters=dict(type='dict', default={}) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + + describe_vpcs(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py new file mode 100644 index 000000000..583a0a076 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py @@ -0,0 +1,843 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: ec2_vpc_route_table +version_added: 1.0.0 +short_description: Manage route tables for AWS Virtual Private Clouds +description: + - Manage route tables for AWS Virtual Private Clouds (VPCs). +author: + - Robert Estelle (@erydo) + - Rob White (@wimnat) + - Will Thames (@willthames) +options: + gateway_id: + description: + - The ID of the gateway to associate with the route table. + - If I(gateway_id) is C('None') or C(''), gateway will be disassociated with the route table. + type: str + version_added: 3.2.0 + lookup: + description: + - Look up route table by either I(tags) or by I(route_table_id). + - If I(lookup=tag) and I(tags) is not specified then no lookup for an + existing route table is performed and a new route table will be created. + - When using I(lookup=tag), multiple matches being found will result in + a failure and no changes will be made. + - To change the tags of a route table use I(lookup=id). + - I(vpc_id) must be specified when I(lookup=tag). + default: tag + choices: [ 'tag', 'id' ] + type: str + propagating_vgw_ids: + description: Enable route propagation from virtual gateways specified by ID. + type: list + elements: str + purge_routes: + description: Purge existing routes that are not found in routes. + type: bool + default: True + purge_subnets: + description: + - Purge existing subnets that are not found in subnets. + - Ignored unless the subnets option is supplied. + default: True + type: bool + route_table_id: + description: + - The ID of the route table to update or delete. + - Required when I(lookup=id). + type: str + routes: + description: + - List of routes in the route table. + - Routes are specified as dicts containing the keys C(dest) and one of C(gateway_id), + C(instance_id), C(network_interface_id), or C(vpc_peering_connection_id). + - The value of C(dest) is used for the destination match. It may be a IPv4 CIDR block + or a IPv6 CIDR block. + - If I(gateway_id) is specified, you can refer to the VPC's IGW by using the value C(igw). + - Routes are required for present states. + type: list + elements: dict + default: [] + state: + description: Create or destroy the VPC route table. + default: present + choices: [ 'present', 'absent' ] + type: str + subnets: + description: An array of subnets to add to this route table. Subnets may be specified + by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24' or 'fd00::/8'. + type: list + elements: str + vpc_id: + description: + - VPC ID of the VPC in which to create the route table. + - Required when I(state=present) or I(lookup=tag). + type: str +notes: + - Tags are used to uniquely identify route tables within a VPC when the I(route_table_id) is not supplied. +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Basic creation example: +- name: Set up public subnet route table + amazon.aws.ec2_vpc_route_table: + vpc_id: vpc-1245678 + region: us-west-1 + tags: + Name: Public + subnets: + - "{{ jumpbox_subnet.subnet.id }}" + - "{{ frontend_subnet.subnet.id }}" + - "{{ vpn_subnet.subnet_id }}" + routes: + - dest: 0.0.0.0/0 + gateway_id: "{{ igw.gateway_id }}" + - dest: ::/0 + gateway_id: "{{ igw.gateway_id }}" + register: public_route_table + +- name: Create VPC gateway + amazon.aws.ec2_vpc_igw: + vpc_id: vpc-1245678 + register: vpc_igw + +- name: Create gateway route table + amazon.aws.ec2_vpc_route_table: + vpc_id: vpc-1245678 + tags: + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + register: gateway_route_table + +- name: Disassociate gateway from route table + amazon.aws.ec2_vpc_route_table: + vpc_id: vpc-1245678 + tags: + Name: Gateway route table + gateway_id: None + register: gateway_route_table + +- name: Set up NAT-protected route table + amazon.aws.ec2_vpc_route_table: + vpc_id: vpc-1245678 + region: us-west-1 + tags: + Name: Internal + subnets: + - "{{ application_subnet.subnet.id }}" + - 'Database Subnet' + - '10.0.0.0/8' + routes: + - dest: 0.0.0.0/0 + instance_id: "{{ nat.instance_id }}" + register: nat_route_table + +- name: delete route table + amazon.aws.ec2_vpc_route_table: + vpc_id: vpc-1245678 + region: us-west-1 + route_table_id: "{{ route_table.id }}" + lookup: id + state: absent +''' + +RETURN = r''' +route_table: + description: Route Table result. + returned: always + type: complex + contains: + associations: + description: List of associations between the route table and one or more subnets or a gateway. + returned: always + type: complex + contains: + association_state: + description: The state of the association. + returned: always + type: complex + contains: + state: + description: The state of the association. + returned: always + type: str + sample: associated + state_message: + description: Additional information about the state of the association. + returned: when available + type: str + sample: 'Creating association' + gateway_id: + description: ID of the internet gateway or virtual private gateway. + returned: when route table is a gateway route table + type: str + sample: igw-03312309 + main: + description: Whether this is the main route table. + returned: always + type: bool + sample: false + route_table_association_id: + description: ID of association between route table and subnet. + returned: always + type: str + sample: rtbassoc-ab47cfc3 + route_table_id: + description: ID of the route table. + returned: always + type: str + sample: rtb-bf779ed7 + subnet_id: + description: ID of the subnet. + returned: when route table is a subnet route table + type: str + sample: subnet-82055af9 + id: + description: ID of the route table (same as route_table_id for backwards compatibility). + returned: always + type: str + sample: rtb-bf779ed7 + propagating_vgws: + description: List of Virtual Private Gateways propagating routes. + returned: always + type: list + sample: [] + route_table_id: + description: ID of the route table. + returned: always + type: str + sample: rtb-bf779ed7 + routes: + description: List of routes in the route table. + returned: always + type: complex + contains: + destination_cidr_block: + description: IPv4 CIDR block of destination + returned: always + type: str + sample: 10.228.228.0/22 + destination_ipv6_cidr_block: + description: IPv6 CIDR block of destination + returned: when the route includes an IPv6 destination + type: str + sample: 2600:1f1c:1b3:8f00:8000::/65 + gateway_id: + description: ID of the gateway. + returned: when gateway is local or internet gateway + type: str + sample: local + instance_id: + description: ID of a NAT instance. + returned: when the route is via an EC2 instance + type: str + sample: i-abcd123456789 + instance_owner_id: + description: AWS account owning the NAT instance. + returned: when the route is via an EC2 instance + type: str + sample: 123456789012 + nat_gateway_id: + description: ID of the NAT gateway. + returned: when the route is via a NAT gateway + type: str + sample: local + origin: + description: mechanism through which the route is in the table. + returned: always + type: str + sample: CreateRouteTable + state: + description: state of the route. + returned: always + type: str + sample: active + tags: + description: Tags applied to the route table. + returned: always + type: dict + sample: + Name: Public route table + Public: 'true' + vpc_id: + description: ID for the VPC in which the route lives. + returned: always + type: str + sample: vpc-6e2d2407 +''' + +import re +from time import sleep +from ipaddress import ip_network + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + + +@AWSRetry.jittered_backoff() +def describe_subnets_with_backoff(connection, **params): + paginator = connection.get_paginator('describe_subnets') + return paginator.paginate(**params).build_full_result()['Subnets'] + + +@AWSRetry.jittered_backoff() +def describe_igws_with_backoff(connection, **params): + paginator = connection.get_paginator('describe_internet_gateways') + return paginator.paginate(**params).build_full_result()['InternetGateways'] + + +@AWSRetry.jittered_backoff() +def describe_route_tables_with_backoff(connection, **params): + try: + paginator = connection.get_paginator('describe_route_tables') + return paginator.paginate(**params).build_full_result()['RouteTables'] + except is_boto3_error_code('InvalidRouteTableID.NotFound'): + return None + + +def find_subnets(connection, module, vpc_id, identified_subnets): + """ + Finds a list of subnets, each identified either by a raw ID, a unique + 'Name' tag, or a CIDR such as 10.0.0.0/8. + """ + CIDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$') + SUBNET_RE = re.compile(r'^subnet-[A-z0-9]+$') + + subnet_ids = [] + subnet_names = [] + subnet_cidrs = [] + for subnet in (identified_subnets or []): + if re.match(SUBNET_RE, subnet): + subnet_ids.append(subnet) + elif re.match(CIDR_RE, subnet): + subnet_cidrs.append(subnet) + else: + subnet_names.append(subnet) + + subnets_by_id = [] + if subnet_ids: + filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id}) + try: + subnets_by_id = describe_subnets_with_backoff(connection, SubnetIds=subnet_ids, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't find subnet with id %s" % subnet_ids) + + subnets_by_cidr = [] + if subnet_cidrs: + filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr': subnet_cidrs}) + try: + subnets_by_cidr = describe_subnets_with_backoff(connection, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't find subnet with cidr %s" % subnet_cidrs) + + subnets_by_name = [] + if subnet_names: + filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'tag:Name': subnet_names}) + try: + subnets_by_name = describe_subnets_with_backoff(connection, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't find subnet with names %s" % subnet_names) + + for name in subnet_names: + matching_count = len([1 for s in subnets_by_name for t in s.get('Tags', []) if t['Key'] == 'Name' and t['Value'] == name]) + if matching_count == 0: + module.fail_json(msg='Subnet named "{0}" does not exist'.format(name)) + elif matching_count > 1: + module.fail_json(msg='Multiple subnets named "{0}"'.format(name)) + + return subnets_by_id + subnets_by_cidr + subnets_by_name + + +def find_igw(connection, module, vpc_id): + """ + Finds the Internet gateway for the given VPC ID. + """ + filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id}) + try: + igw = describe_igws_with_backoff(connection, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='No IGW found for VPC {0}'.format(vpc_id)) + if len(igw) == 1: + return igw[0]['InternetGatewayId'] + elif len(igw) == 0: + module.fail_json(msg='No IGWs found for VPC {0}'.format(vpc_id)) + else: + module.fail_json(msg='Multiple IGWs found for VPC {0}'.format(vpc_id)) + + +def tags_match(match_tags, candidate_tags): + return all((k in candidate_tags and candidate_tags[k] == v + for k, v in match_tags.items())) + + +def get_route_table_by_id(connection, module, route_table_id): + + route_table = None + try: + route_tables = describe_route_tables_with_backoff(connection, RouteTableIds=[route_table_id]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get route table") + if route_tables: + route_table = route_tables[0] + + return route_table + + +def get_route_table_by_tags(connection, module, vpc_id, tags): + count = 0 + route_table = None + filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id}) + try: + route_tables = describe_route_tables_with_backoff(connection, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get route table") + for table in route_tables: + this_tags = describe_ec2_tags(connection, module, table['RouteTableId']) + if tags_match(tags, this_tags): + route_table = table + count += 1 + + if count > 1: + module.fail_json(msg="Tags provided do not identify a unique route table") + else: + return route_table + + +def route_spec_matches_route(route_spec, route): + if route_spec.get('GatewayId') and 'nat-' in route_spec['GatewayId']: + route_spec['NatGatewayId'] = route_spec.pop('GatewayId') + if route_spec.get('GatewayId') and 'vpce-' in route_spec['GatewayId']: + if route_spec.get('DestinationCidrBlock', '').startswith('pl-'): + route_spec['DestinationPrefixListId'] = route_spec.pop('DestinationCidrBlock') + + return set(route_spec.items()).issubset(route.items()) + + +def route_spec_matches_route_cidr(route_spec, route): + if route_spec.get('DestinationCidrBlock') and route.get('DestinationCidrBlock'): + return route_spec.get('DestinationCidrBlock') == route.get('DestinationCidrBlock') + if route_spec.get('DestinationIpv6CidrBlock') and route.get('DestinationIpv6CidrBlock'): + return route_spec.get('DestinationIpv6CidrBlock') == route.get('DestinationIpv6CidrBlock') + return False + + +def rename_key(d, old_key, new_key): + d[new_key] = d.pop(old_key) + + +def index_of_matching_route(route_spec, routes_to_match): + for i, route in enumerate(routes_to_match): + if route_spec_matches_route(route_spec, route): + return "exact", i + elif 'Origin' in route and route['Origin'] != 'EnableVgwRoutePropagation': # only replace created routes + if route_spec_matches_route_cidr(route_spec, route): + return "replace", i + + +def ensure_routes(connection, module, route_table, route_specs, purge_routes): + routes_to_match = list(route_table['Routes']) + route_specs_to_create = [] + route_specs_to_recreate = [] + for route_spec in route_specs: + match = index_of_matching_route(route_spec, routes_to_match) + if match is None: + if route_spec.get('DestinationCidrBlock') or route_spec.get('DestinationIpv6CidrBlock'): + route_specs_to_create.append(route_spec) + else: + module.warn("Skipping creating {0} because it has no destination cidr block. " + "To add VPC endpoints to route tables use the ec2_vpc_endpoint module.".format(route_spec)) + else: + if match[0] == "replace": + if route_spec.get('DestinationCidrBlock'): + route_specs_to_recreate.append(route_spec) + else: + module.warn("Skipping recreating route {0} because it has no destination cidr block.".format(route_spec)) + del routes_to_match[match[1]] + + routes_to_delete = [] + if purge_routes: + for route in routes_to_match: + if not route.get('DestinationCidrBlock'): + module.warn("Skipping purging route {0} because it has no destination cidr block. " + "To remove VPC endpoints from route tables use the ec2_vpc_endpoint module.".format(route)) + continue + if route['Origin'] == 'CreateRoute': + routes_to_delete.append(route) + + changed = bool(routes_to_delete or route_specs_to_create or route_specs_to_recreate) + if changed and not module.check_mode: + for route in routes_to_delete: + try: + connection.delete_route( + aws_retry=True, + RouteTableId=route_table['RouteTableId'], + DestinationCidrBlock=route['DestinationCidrBlock']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete route") + + for route_spec in route_specs_to_recreate: + try: + connection.replace_route(aws_retry=True, RouteTableId=route_table['RouteTableId'], **route_spec) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't recreate route") + + for route_spec in route_specs_to_create: + try: + connection.create_route(aws_retry=True, RouteTableId=route_table['RouteTableId'], **route_spec) + except is_boto3_error_code('RouteAlreadyExists'): + changed = False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't create route") + + return changed + + +def ensure_subnet_association(connection, module, vpc_id, route_table_id, subnet_id): + filters = ansible_dict_to_boto3_filter_list({'association.subnet-id': subnet_id, 'vpc-id': vpc_id}) + try: + route_tables = describe_route_tables_with_backoff(connection, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get route tables") + for route_table in route_tables: + if route_table.get('RouteTableId'): + for association in route_table['Associations']: + if association['Main']: + continue + if association['SubnetId'] == subnet_id: + if route_table['RouteTableId'] == route_table_id: + return {'changed': False, 'association_id': association['RouteTableAssociationId']} + if module.check_mode: + return {'changed': True} + try: + connection.disassociate_route_table( + aws_retry=True, AssociationId=association['RouteTableAssociationId']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table") + + if module.check_mode: + return {'changed': True} + try: + association_id = connection.associate_route_table(aws_retry=True, + RouteTableId=route_table_id, + SubnetId=subnet_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't associate subnet with route table") + return {'changed': True, 'association_id': association_id} + + +def ensure_subnet_associations(connection, module, route_table, subnets, purge_subnets): + current_association_ids = [association['RouteTableAssociationId'] for association in route_table['Associations'] + if not association['Main'] and association.get('SubnetId')] + new_association_ids = [] + changed = False + for subnet in subnets: + result = ensure_subnet_association( + connection=connection, module=module, vpc_id=route_table['VpcId'], + route_table_id=route_table['RouteTableId'], subnet_id=subnet['SubnetId']) + changed = changed or result['changed'] + if changed and module.check_mode: + return True + new_association_ids.append(result['association_id']) + + if purge_subnets: + to_delete = [association_id for association_id in current_association_ids + if association_id not in new_association_ids] + for association_id in to_delete: + changed = True + if not module.check_mode: + try: + connection.disassociate_route_table(aws_retry=True, AssociationId=association_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table") + + return changed + + +def disassociate_gateway(connection, module, route_table): + # Delete all gateway associations that have state = associated + # Subnet associations are handled in its method + changed = False + associations_to_delete = [association['RouteTableAssociationId'] for association in route_table['Associations'] if not association['Main'] + and association.get('GatewayId') and association['AssociationState']['State'] in ['associated', 'associating']] + for association_id in associations_to_delete: + changed = True + if not module.check_mode: + try: + connection.disassociate_route_table(aws_retry=True, AssociationId=association_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't disassociate gateway from route table") + + return changed + + +def associate_gateway(connection, module, route_table, gateway_id): + filters = ansible_dict_to_boto3_filter_list({'association.gateway-id': gateway_id, 'vpc-id': route_table['VpcId']}) + try: + route_tables = describe_route_tables_with_backoff(connection, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get route tables") + for table in route_tables: + if table.get('RouteTableId'): + for association in table.get('Associations'): + if association['Main']: + continue + if association.get('GatewayId', '') == gateway_id and (association['AssociationState']['State'] in ['associated', 'associating']): + if table['RouteTableId'] == route_table['RouteTableId']: + return False + elif module.check_mode: + return True + else: + try: + connection.disassociate_route_table( + aws_retry=True, AssociationId=association['RouteTableAssociationId']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't disassociate gateway from route table") + + if not module.check_mode: + try: + connection.associate_route_table(aws_retry=True, + RouteTableId=route_table['RouteTableId'], + GatewayId=gateway_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't associate gateway with route table") + return True + + +def ensure_propagation(connection, module, route_table, propagating_vgw_ids): + changed = False + gateways = [gateway['GatewayId'] for gateway in route_table['PropagatingVgws']] + vgws_to_add = set(propagating_vgw_ids) - set(gateways) + if vgws_to_add: + changed = True + if not module.check_mode: + for vgw_id in vgws_to_add: + try: + connection.enable_vgw_route_propagation( + aws_retry=True, + RouteTableId=route_table['RouteTableId'], + GatewayId=vgw_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't enable route propagation") + + return changed + + +def ensure_route_table_absent(connection, module): + + lookup = module.params.get('lookup') + route_table_id = module.params.get('route_table_id') + tags = module.params.get('tags') + vpc_id = module.params.get('vpc_id') + purge_subnets = module.params.get('purge_subnets') + + if lookup == 'tag': + if tags is not None: + route_table = get_route_table_by_tags(connection, module, vpc_id, tags) + else: + route_table = None + elif lookup == 'id': + route_table = get_route_table_by_id(connection, module, route_table_id) + + if route_table is None: + return {'changed': False} + + # disassociate subnets and gateway before deleting route table + if not module.check_mode: + ensure_subnet_associations(connection=connection, module=module, route_table=route_table, + subnets=[], purge_subnets=purge_subnets) + disassociate_gateway(connection=connection, module=module, route_table=route_table) + try: + connection.delete_route_table(aws_retry=True, RouteTableId=route_table['RouteTableId']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Error deleting route table") + + return {'changed': True} + + +def get_route_table_info(connection, module, route_table): + result = get_route_table_by_id(connection, module, route_table['RouteTableId']) + try: + result['Tags'] = describe_ec2_tags(connection, module, route_table['RouteTableId']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get tags for route table") + result = camel_dict_to_snake_dict(result, ignore_list=['Tags']) + # backwards compatibility + result['id'] = result['route_table_id'] + return result + + +def create_route_spec(connection, module, vpc_id): + routes = module.params.get('routes') + for route_spec in routes: + + cidr_block_type = str(type(ip_network(route_spec['dest']))) + if "IPv4" in cidr_block_type: + rename_key(route_spec, 'dest', 'destination_cidr_block') + if "IPv6" in cidr_block_type: + rename_key(route_spec, 'dest', 'destination_ipv6_cidr_block') + + if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw': + igw = find_igw(connection, module, vpc_id) + route_spec['gateway_id'] = igw + if route_spec.get('gateway_id') and route_spec['gateway_id'].startswith('nat-'): + rename_key(route_spec, 'gateway_id', 'nat_gateway_id') + + return snake_dict_to_camel_dict(routes, capitalize_first=True) + + +def ensure_route_table_present(connection, module): + + gateway_id = module.params.get('gateway_id') + lookup = module.params.get('lookup') + propagating_vgw_ids = module.params.get('propagating_vgw_ids') + purge_routes = module.params.get('purge_routes') + purge_subnets = module.params.get('purge_subnets') + purge_tags = module.params.get('purge_tags') + route_table_id = module.params.get('route_table_id') + subnets = module.params.get('subnets') + tags = module.params.get('tags') + vpc_id = module.params.get('vpc_id') + routes = create_route_spec(connection, module, vpc_id) + + changed = False + tags_valid = False + + if lookup == 'tag': + if tags is not None: + try: + route_table = get_route_table_by_tags(connection, module, vpc_id, tags) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Error finding route table with lookup 'tag'") + else: + route_table = None + elif lookup == 'id': + try: + route_table = get_route_table_by_id(connection, module, route_table_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Error finding route table with lookup 'id'") + + # If no route table returned then create new route table + if route_table is None: + changed = True + if not module.check_mode: + try: + route_table = connection.create_route_table(aws_retry=True, VpcId=vpc_id)['RouteTable'] + # try to wait for route table to be present before moving on + get_waiter( + connection, 'route_table_exists' + ).wait( + RouteTableIds=[route_table['RouteTableId']], + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg='Timeout waiting for route table creation') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Error creating route table") + else: + route_table = {"id": "rtb-xxxxxxxx", "route_table_id": "rtb-xxxxxxxx", "vpc_id": vpc_id} + module.exit_json(changed=changed, route_table=route_table) + + if routes is not None: + result = ensure_routes(connection=connection, module=module, route_table=route_table, + route_specs=routes, purge_routes=purge_routes) + changed = changed or result + + if propagating_vgw_ids is not None: + result = ensure_propagation(connection=connection, module=module, route_table=route_table, + propagating_vgw_ids=propagating_vgw_ids) + changed = changed or result + + if not tags_valid and tags is not None: + changed |= ensure_ec2_tags(connection, module, route_table['RouteTableId'], + tags=tags, purge_tags=purge_tags, + retry_codes=['InvalidRouteTableID.NotFound']) + route_table['Tags'] = describe_ec2_tags(connection, module, route_table['RouteTableId']) + + if subnets is not None: + associated_subnets = find_subnets(connection, module, vpc_id, subnets) + result = ensure_subnet_associations(connection=connection, module=module, route_table=route_table, + subnets=associated_subnets, purge_subnets=purge_subnets) + changed = changed or result + + if gateway_id == 'None' or gateway_id == '': + gateway_changed = disassociate_gateway(connection=connection, module=module, route_table=route_table) + elif gateway_id is not None: + gateway_changed = associate_gateway(connection=connection, module=module, route_table=route_table, gateway_id=gateway_id) + else: + gateway_changed = False + + changed = changed or gateway_changed + + if changed: + # pause to allow route table routes/subnets/associations to be updated before exiting with final state + sleep(5) + module.exit_json(changed=changed, route_table=get_route_table_info(connection, module, route_table)) + + +def main(): + argument_spec = dict( + gateway_id=dict(type='str'), + lookup=dict(default='tag', choices=['tag', 'id']), + propagating_vgw_ids=dict(type='list', elements='str'), + purge_routes=dict(default=True, type='bool'), + purge_subnets=dict(default=True, type='bool'), + purge_tags=dict(type='bool', default=True), + route_table_id=dict(), + routes=dict(default=[], type='list', elements='dict'), + state=dict(default='present', choices=['present', 'absent']), + subnets=dict(type='list', elements='str'), + tags=dict(type='dict', aliases=['resource_tags']), + vpc_id=dict() + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=[['lookup', 'id', ['route_table_id']], + ['lookup', 'tag', ['vpc_id']], + ['state', 'present', ['vpc_id']]], + supports_check_mode=True) + + # The tests for RouteTable existing uses its own decorator, we can safely + # retry on InvalidRouteTableID.NotFound + retry_decorator = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=['InvalidRouteTableID.NotFound']) + connection = module.client('ec2', retry_decorator=retry_decorator) + + state = module.params.get('state') + + if state == 'present': + result = ensure_route_table_present(connection, module) + elif state == 'absent': + result = ensure_route_table_absent(connection, module) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table_info.py new file mode 100644 index 000000000..b7b3c69d4 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table_info.py @@ -0,0 +1,279 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: ec2_vpc_route_table_info +version_added: 1.0.0 +short_description: Gather information about ec2 VPC route tables in AWS +description: + - Gather information about ec2 VPC route tables in AWS +author: +- "Rob White (@wimnat)" +- "Mark Chappell (@tremble)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters. + type: dict +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all VPC route tables + amazon.aws.ec2_vpc_route_table_info: + +- name: Gather information about a particular VPC route table using route table ID + amazon.aws.ec2_vpc_route_table_info: + filters: + route-table-id: rtb-00112233 + +- name: Gather information about any VPC route table with a tag key Name and value Example + amazon.aws.ec2_vpc_route_table_info: + filters: + "tag:Name": Example + +- name: Gather information about any VPC route table within VPC with ID vpc-abcdef00 + amazon.aws.ec2_vpc_route_table_info: + filters: + vpc-id: vpc-abcdef00 +''' + +RETURN = r''' +route_tables: + description: + - A list of dictionarys describing route tables. + - See also U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_route_tables). + returned: always + type: complex + contains: + associations: + description: List of associations between the route table and one or more subnets or a gateway. + returned: always + type: complex + contains: + association_state: + description: The state of the association. + returned: always + type: complex + contains: + state: + description: The state of the association. + returned: always + type: str + sample: associated + state_message: + description: Additional information about the state of the association. + returned: when available + type: str + sample: 'Creating association' + gateway_id: + description: ID of the internet gateway or virtual private gateway. + returned: when route table is a gateway route table + type: str + sample: igw-03312309 + main: + description: Whether this is the main route table. + returned: always + type: bool + sample: false + route_table_association_id: + description: ID of association between route table and subnet. + returned: always + type: str + sample: rtbassoc-ab47cfc3 + route_table_id: + description: ID of the route table. + returned: always + type: str + sample: rtb-bf779ed7 + subnet_id: + description: ID of the subnet. + returned: when route table is a subnet route table + type: str + sample: subnet-82055af9 + id: + description: ID of the route table (same as route_table_id for backwards compatibility). + returned: always + type: str + sample: rtb-bf779ed7 + owner_id: + description: ID of the account which owns the route table. + returned: always + type: str + sample: '012345678912' + propagating_vgws: + description: List of Virtual Private Gateways propagating routes. + returned: always + type: list + sample: [] + route_table_id: + description: ID of the route table. + returned: always + type: str + sample: rtb-bf779ed7 + routes: + description: List of routes in the route table. + returned: always + type: complex + contains: + destination_cidr_block: + description: CIDR block of destination. + returned: always + type: str + sample: 10.228.228.0/22 + gateway_id: + description: ID of the gateway. + returned: when gateway is local or internet gateway + type: str + sample: local + instance_id: + description: + - ID of a NAT instance. + - Empty unless the route is via an EC2 instance. + returned: always + type: str + sample: i-abcd123456789 + instance_owner_id: + description: + - AWS account owning the NAT instance. + - Empty unless the route is via an EC2 instance. + returned: always + type: str + sample: 123456789012 + network_interface_id: + description: + - The ID of the network interface. + - Empty unless the route is via an EC2 instance. + returned: always + type: str + sample: 123456789012 + nat_gateway_id: + description: ID of the NAT gateway. + returned: when the route is via a NAT gateway. + type: str + sample: local + origin: + description: mechanism through which the route is in the table. + returned: always + type: str + sample: CreateRouteTable + state: + description: state of the route. + returned: always + type: str + sample: active + tags: + description: Tags applied to the route table. + returned: always + type: dict + sample: + Name: Public route table + Public: 'true' + vpc_id: + description: ID for the VPC in which the route lives. + returned: always + type: str + sample: vpc-6e2d2407 +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +@AWSRetry.jittered_backoff() +def describe_route_tables_with_backoff(connection, **params): + try: + paginator = connection.get_paginator('describe_route_tables') + return paginator.paginate(**params).build_full_result() + except is_boto3_error_code('InvalidRouteTableID.NotFound'): + return None + + +def normalize_route(route): + # Historically these were all there, but set to null when empty' + for legacy_key in ['DestinationCidrBlock', 'GatewayId', 'InstanceId', + 'Origin', 'State', 'NetworkInterfaceId']: + if legacy_key not in route: + route[legacy_key] = None + route['InterfaceId'] = route['NetworkInterfaceId'] + return route + + +def normalize_association(assoc): + # Name change between boto v2 and boto v3, return both + assoc['Id'] = assoc['RouteTableAssociationId'] + return assoc + + +def normalize_route_table(table): + table['tags'] = boto3_tag_list_to_ansible_dict(table['Tags']) + table['Associations'] = [normalize_association(assoc) for assoc in table['Associations']] + table['Routes'] = [normalize_route(route) for route in table['Routes']] + table['Id'] = table['RouteTableId'] + del table['Tags'] + return camel_dict_to_snake_dict(table, ignore_list=['tags']) + + +def normalize_results(results): + """ + We used to be a boto v2 module, make sure that the old return values are + maintained and the shape of the return values are what people expect + """ + + routes = [normalize_route_table(route) for route in results['RouteTables']] + del results['RouteTables'] + results = camel_dict_to_snake_dict(results) + results['route_tables'] = routes + return results + + +def list_ec2_vpc_route_tables(connection, module): + + filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + + try: + results = describe_route_tables_with_backoff(connection, Filters=filters) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get route tables") + + results = normalize_results(results) + module.exit_json(changed=False, **results) + + +def main(): + argument_spec = dict( + filters=dict(default=None, type='dict'), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True) + + connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + + list_ec2_vpc_route_tables(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py new file mode 100644 index 000000000..ae806ae14 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py @@ -0,0 +1,574 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_subnet +version_added: 1.0.0 +short_description: Manage subnets in AWS virtual private clouds +description: + - Manage subnets in AWS virtual private clouds. +author: + - Robert Estelle (@erydo) + - Brad Davidson (@brandond) +options: + az: + description: + - The availability zone for the subnet. + - Required if I(outpost_arn) is set. + type: str + cidr: + description: + - The CIDR block for the subnet. E.g. C(192.0.2.0/24). + type: str + required: true + ipv6_cidr: + description: + - The IPv6 CIDR block for the subnet. + - The VPC must have a /56 block assigned and this value must be a valid IPv6 /64 that falls in the VPC range. + - Required if I(assign_instances_ipv6=true) + type: str + default: '' + outpost_arn: + description: + - The Amazon Resource Name (ARN) of the Outpost. + - If set, allows to create subnet in an Outpost. + - If I(outpost_arn) is set, I(az) must also be specified. + type: str + default: '' + state: + description: + - Create or remove the subnet. + default: present + choices: [ 'present', 'absent' ] + type: str + vpc_id: + description: + -"VPC ID of the VPC in which to create or delete the subnet. + required: true + type: str + map_public: + description: + - Whether instances launched into the subnet should default to being assigned public IP address. + type: bool + default: false + assign_instances_ipv6: + description: + - Whether instances launched into the subnet should default to being automatically assigned an IPv6 address. + - If I(assign_instances_ipv6=true), I(ipv6_cidr) must also be specified. + type: bool + default: false + wait: + description: + - Whether to wait for changes to complete. + type: bool + default: true + wait_timeout: + description: + - Number of seconds to wait for changes to complete + - Ignored unless I(wait=True). + default: 300 + type: int + tags: + default: {} +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create subnet for database servers + amazon.aws.ec2_vpc_subnet: + state: present + vpc_id: vpc-123456 + cidr: 10.0.1.16/28 + tags: + Name: Database Subnet + register: database_subnet + +- name: Remove subnet for database servers + amazon.aws.ec2_vpc_subnet: + state: absent + vpc_id: vpc-123456 + cidr: 10.0.1.16/28 + +- name: Create subnet with IPv6 block assigned + amazon.aws.ec2_vpc_subnet: + state: present + vpc_id: vpc-123456 + cidr: 10.1.100.0/24 + ipv6_cidr: 2001:db8:0:102::/64 + +- name: Remove IPv6 block assigned to subnet + amazon.aws.ec2_vpc_subnet: + state: present + vpc_id: vpc-123456 + cidr: 10.1.100.0/24 + ipv6_cidr: '' +''' + +RETURN = ''' +subnet: + description: Dictionary of subnet values + returned: I(state=present) + type: complex + contains: + id: + description: Subnet resource id + returned: I(state=present) + type: str + sample: subnet-b883b2c4 + cidr_block: + description: The IPv4 CIDR of the Subnet + returned: I(state=present) + type: str + sample: "10.0.0.0/16" + ipv6_cidr_block: + description: The IPv6 CIDR block actively associated with the Subnet + returned: I(state=present) + type: str + sample: "2001:db8:0:102::/64" + availability_zone: + description: Availability zone of the Subnet + returned: I(state=present) + type: str + sample: us-east-1a + state: + description: state of the Subnet + returned: I(state=present) + type: str + sample: available + tags: + description: tags attached to the Subnet, includes name + returned: I(state=present) + type: dict + sample: {"Name": "My Subnet", "env": "staging"} + map_public_ip_on_launch: + description: whether public IP is auto-assigned to new instances + returned: I(state=present) + type: bool + sample: false + assign_ipv6_address_on_creation: + description: whether IPv6 address is auto-assigned to new instances + returned: I(state=present) + type: bool + sample: false + vpc_id: + description: the id of the VPC where this Subnet exists + returned: I(state=present) + type: str + sample: vpc-67236184 + available_ip_address_count: + description: number of available IPv4 addresses + returned: I(state=present) + type: str + sample: 251 + default_for_az: + description: indicates whether this is the default Subnet for this Availability Zone + returned: I(state=present) + type: bool + sample: false + ipv6_association_id: + description: The IPv6 association ID for the currently associated CIDR + returned: I(state=present) + type: str + sample: subnet-cidr-assoc-b85c74d2 + ipv6_cidr_block_association_set: + description: An array of IPv6 cidr block association set information. + returned: I(state=present) + type: complex + contains: + association_id: + description: The association ID + returned: always + type: str + ipv6_cidr_block: + description: The IPv6 CIDR block that is associated with the subnet. + returned: always + type: str + ipv6_cidr_block_state: + description: A hash/dict that contains a single item. The state of the cidr block association. + returned: always + type: dict + contains: + state: + description: The CIDR block association state. + returned: always + type: str +''' + + +import time + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils._text import to_text +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.arn import is_outpost_arn +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + + +def get_subnet_info(subnet): + if 'Subnets' in subnet: + return [get_subnet_info(s) for s in subnet['Subnets']] + elif 'Subnet' in subnet: + subnet = camel_dict_to_snake_dict(subnet['Subnet']) + else: + subnet = camel_dict_to_snake_dict(subnet) + + if 'tags' in subnet: + subnet['tags'] = boto3_tag_list_to_ansible_dict(subnet['tags']) + else: + subnet['tags'] = dict() + + if 'subnet_id' in subnet: + subnet['id'] = subnet['subnet_id'] + del subnet['subnet_id'] + + subnet['ipv6_cidr_block'] = '' + subnet['ipv6_association_id'] = '' + ipv6set = subnet.get('ipv6_cidr_block_association_set') + if ipv6set: + for item in ipv6set: + if item.get('ipv6_cidr_block_state', {}).get('state') in ('associated', 'associating'): + subnet['ipv6_cidr_block'] = item['ipv6_cidr_block'] + subnet['ipv6_association_id'] = item['association_id'] + + return subnet + + +def waiter_params(module, params, start_time): + remaining_wait_timeout = int(module.params['wait_timeout'] + start_time - time.time()) + params['WaiterConfig'] = {'Delay': 5, 'MaxAttempts': remaining_wait_timeout // 5} + return params + + +def handle_waiter(conn, module, waiter_name, params, start_time): + try: + get_waiter(conn, waiter_name).wait( + **waiter_params(module, params, start_time) + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, "Failed to wait for updates to complete") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "An exception happened while trying to wait for updates") + + +def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, outpost_arn=None, az=None, start_time=None): + wait = module.params['wait'] + + params = dict(VpcId=vpc_id, + CidrBlock=cidr) + + if ipv6_cidr: + params['Ipv6CidrBlock'] = ipv6_cidr + + if az: + params['AvailabilityZone'] = az + + if outpost_arn: + if is_outpost_arn(outpost_arn): + params['OutpostArn'] = outpost_arn + else: + module.fail_json('OutpostArn does not match the pattern specified in API specifications.') + + try: + subnet = get_subnet_info(conn.create_subnet(aws_retry=True, **params)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create subnet") + + # Sometimes AWS takes its time to create a subnet and so using + # new subnets's id to do things like create tags results in + # exception. + if wait and subnet.get('state') != 'available': + handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time) + handle_waiter(conn, module, 'subnet_available', {'SubnetIds': [subnet['id']]}, start_time) + subnet['state'] = 'available' + + return subnet + + +def ensure_tags(conn, module, subnet, tags, purge_tags, start_time): + + changed = ensure_ec2_tags( + conn, module, subnet['id'], + resource_type='subnet', + purge_tags=purge_tags, + tags=tags, + retry_codes=['InvalidSubnetID.NotFound']) + + if module.params['wait'] and not module.check_mode: + # Wait for tags to be updated + filters = [{'Name': 'tag:{0}'.format(k), 'Values': [v]} for k, v in tags.items()] + handle_waiter(conn, module, 'subnet_exists', + {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) + + return changed + + +def ensure_map_public(conn, module, subnet, map_public, check_mode, start_time): + if check_mode: + return + try: + conn.modify_subnet_attribute(aws_retry=True, SubnetId=subnet['id'], + MapPublicIpOnLaunch={'Value': map_public}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't modify subnet attribute") + + +def ensure_assign_ipv6_on_create(conn, module, subnet, assign_instances_ipv6, check_mode, start_time): + if check_mode: + return + try: + conn.modify_subnet_attribute(aws_retry=True, SubnetId=subnet['id'], + AssignIpv6AddressOnCreation={'Value': assign_instances_ipv6}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't modify subnet attribute") + + +def disassociate_ipv6_cidr(conn, module, subnet, start_time): + if subnet.get('assign_ipv6_address_on_creation'): + ensure_assign_ipv6_on_create(conn, module, subnet, False, False, start_time) + + try: + conn.disassociate_subnet_cidr_block(aws_retry=True, AssociationId=subnet['ipv6_association_id']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't disassociate ipv6 cidr block id {0} from subnet {1}" + .format(subnet['ipv6_association_id'], subnet['id'])) + + # Wait for cidr block to be disassociated + if module.params['wait']: + filters = ansible_dict_to_boto3_filter_list( + {'ipv6-cidr-block-association.state': ['disassociated'], + 'vpc-id': subnet['vpc_id']} + ) + handle_waiter(conn, module, 'subnet_exists', + {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) + + +def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_time): + wait = module.params['wait'] + changed = False + + if subnet['ipv6_association_id'] and not ipv6_cidr: + if not check_mode: + disassociate_ipv6_cidr(conn, module, subnet, start_time) + changed = True + + if ipv6_cidr: + filters = ansible_dict_to_boto3_filter_list({'ipv6-cidr-block-association.ipv6-cidr-block': ipv6_cidr, + 'vpc-id': subnet['vpc_id']}) + + try: + _subnets = conn.describe_subnets(aws_retry=True, Filters=filters) + check_subnets = get_subnet_info(_subnets) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get subnet info") + + if check_subnets and check_subnets[0]['ipv6_cidr_block']: + module.fail_json(msg="The IPv6 CIDR '{0}' conflicts with another subnet".format(ipv6_cidr)) + + if subnet['ipv6_association_id']: + if not check_mode: + disassociate_ipv6_cidr(conn, module, subnet, start_time) + changed = True + + try: + if not check_mode: + associate_resp = conn.associate_subnet_cidr_block(aws_retry=True, SubnetId=subnet['id'], + Ipv6CidrBlock=ipv6_cidr) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't associate ipv6 cidr {0} to {1}".format(ipv6_cidr, subnet['id'])) + else: + if not check_mode and wait: + filters = ansible_dict_to_boto3_filter_list( + {'ipv6-cidr-block-association.state': ['associated'], + 'vpc-id': subnet['vpc_id']} + ) + handle_waiter(conn, module, 'subnet_exists', + {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) + + if associate_resp.get('Ipv6CidrBlockAssociation', {}).get('AssociationId'): + subnet['ipv6_association_id'] = associate_resp['Ipv6CidrBlockAssociation']['AssociationId'] + subnet['ipv6_cidr_block'] = associate_resp['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'] + if subnet['ipv6_cidr_block_association_set']: + subnet['ipv6_cidr_block_association_set'][0] = camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation']) + else: + subnet['ipv6_cidr_block_association_set'].append(camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation'])) + + return changed + + +def get_matching_subnet(conn, module, vpc_id, cidr): + filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr-block': cidr}) + try: + _subnets = conn.describe_subnets(aws_retry=True, Filters=filters) + subnets = get_subnet_info(_subnets) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get matching subnet") + + if subnets: + return subnets[0] + + return None + + +def ensure_subnet_present(conn, module): + subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + changed = False + + # Initialize start so max time does not exceed the specified wait_timeout for multiple operations + start_time = time.time() + + if subnet is None: + if not module.check_mode: + subnet = create_subnet(conn, module, module.params['vpc_id'], module.params['cidr'], + ipv6_cidr=module.params['ipv6_cidr'], outpost_arn=module.params['outpost_arn'], + az=module.params['az'], start_time=start_time) + changed = True + # Subnet will be None when check_mode is true + if subnet is None: + return { + 'changed': changed, + 'subnet': {} + } + if module.params['wait']: + handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time) + + if module.params['ipv6_cidr'] != subnet.get('ipv6_cidr_block'): + if ensure_ipv6_cidr_block(conn, module, subnet, module.params['ipv6_cidr'], module.check_mode, start_time): + changed = True + + if module.params['map_public'] != subnet['map_public_ip_on_launch']: + ensure_map_public(conn, module, subnet, module.params['map_public'], module.check_mode, start_time) + changed = True + + if module.params['assign_instances_ipv6'] != subnet.get('assign_ipv6_address_on_creation'): + ensure_assign_ipv6_on_create(conn, module, subnet, module.params['assign_instances_ipv6'], module.check_mode, start_time) + changed = True + + if module.params['tags'] != subnet['tags']: + stringified_tags_dict = dict((to_text(k), to_text(v)) for k, v in module.params['tags'].items()) + if ensure_tags(conn, module, subnet, stringified_tags_dict, module.params['purge_tags'], start_time): + changed = True + + subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + if not module.check_mode and module.params['wait']: + # GET calls are not monotonic for map_public_ip_on_launch and assign_ipv6_address_on_creation + # so we only wait for those if necessary just before returning the subnet + subnet = ensure_final_subnet(conn, module, subnet, start_time) + + return { + 'changed': changed, + 'subnet': subnet + } + + +def ensure_final_subnet(conn, module, subnet, start_time): + for _rewait in range(0, 30): + map_public_correct = False + assign_ipv6_correct = False + + if module.params['map_public'] == subnet['map_public_ip_on_launch']: + map_public_correct = True + else: + if module.params['map_public']: + handle_waiter(conn, module, 'subnet_has_map_public', {'SubnetIds': [subnet['id']]}, start_time) + else: + handle_waiter(conn, module, 'subnet_no_map_public', {'SubnetIds': [subnet['id']]}, start_time) + + if module.params['assign_instances_ipv6'] == subnet.get('assign_ipv6_address_on_creation'): + assign_ipv6_correct = True + else: + if module.params['assign_instances_ipv6']: + handle_waiter(conn, module, 'subnet_has_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time) + else: + handle_waiter(conn, module, 'subnet_no_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time) + + if map_public_correct and assign_ipv6_correct: + break + + time.sleep(5) + subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + + return subnet + + +def ensure_subnet_absent(conn, module): + subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + if subnet is None: + return {'changed': False} + + try: + if not module.check_mode: + conn.delete_subnet(aws_retry=True, SubnetId=subnet['id']) + if module.params['wait']: + handle_waiter(conn, module, 'subnet_deleted', {'SubnetIds': [subnet['id']]}, time.time()) + return {'changed': True} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete subnet") + + +def main(): + argument_spec = dict( + az=dict(default=None, required=False), + cidr=dict(required=True), + ipv6_cidr=dict(default='', required=False), + outpost_arn=dict(default='', type='str', required=False), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']), + vpc_id=dict(required=True), + map_public=dict(default=False, required=False, type='bool'), + assign_instances_ipv6=dict(default=False, required=False, type='bool'), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=300, required=False), + purge_tags=dict(default=True, type='bool') + ) + + required_if = [('assign_instances_ipv6', True, ['ipv6_cidr'])] + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if) + + if module.params.get('outpost_arn') and not module.params.get('az'): + module.fail_json(msg="To specify OutpostArn, you must specify the Availability Zone of the Outpost subnet.") + + if module.params.get('assign_instances_ipv6') and not module.params.get('ipv6_cidr'): + module.fail_json(msg="assign_instances_ipv6 is True but ipv6_cidr is None or an empty string") + + retry_decorator = AWSRetry.jittered_backoff(retries=10) + connection = module.client('ec2', retry_decorator=retry_decorator) + + state = module.params.get('state') + + try: + if state == 'present': + result = ensure_subnet_present(connection, module) + elif state == 'absent': + result = ensure_subnet_absent(connection, module) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py new file mode 100644 index 000000000..bbf1b976a --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py @@ -0,0 +1,225 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ec2_vpc_subnet_info +version_added: 1.0.0 +short_description: Gather information about ec2 VPC subnets in AWS +description: + - Gather information about ec2 VPC subnets in AWS +author: "Rob White (@wimnat)" +options: + subnet_ids: + description: + - A list of subnet IDs to gather information for. + aliases: ['subnet_id'] + type: list + elements: str + default: [] + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters. + type: dict + default: {} +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all VPC subnets +- amazon.aws.ec2_vpc_subnet_info: + +# Gather information about a particular VPC subnet using ID +- amazon.aws.ec2_vpc_subnet_info: + subnet_ids: subnet-00112233 + +# Gather information about any VPC subnet with a tag key Name and value Example +- amazon.aws.ec2_vpc_subnet_info: + filters: + "tag:Name": Example + +# Gather information about any VPC subnet within VPC with ID vpc-abcdef00 +- amazon.aws.ec2_vpc_subnet_info: + filters: + vpc-id: vpc-abcdef00 + +# Gather information about a set of VPC subnets, publicA, publicB and publicC within a +# VPC with ID vpc-abcdef00 and then use the jinja map function to return the +# subnet_ids as a list. + +- amazon.aws.ec2_vpc_subnet_info: + filters: + vpc-id: vpc-abcdef00 + "tag:Name": "{{ item }}" + loop: + - publicA + - publicB + - publicC + register: subnet_info + +- set_fact: + subnet_ids: "{{ subnet_info.results | sum(attribute='subnets', start=[]) | map(attribute='subnet_id') }}" +''' + +RETURN = ''' +subnets: + description: Returns an array of complex objects as described below. + returned: success + type: complex + contains: + subnet_id: + description: The ID of the Subnet. + returned: always + type: str + id: + description: The ID of the Subnet (for backwards compatibility). + returned: always + type: str + vpc_id: + description: The ID of the VPC . + returned: always + type: str + state: + description: The state of the subnet. + returned: always + type: str + tags: + description: A dict of tags associated with the Subnet. + returned: always + type: dict + map_public_ip_on_launch: + description: True/False depending on attribute setting for public IP mapping. + returned: always + type: bool + default_for_az: + description: True if this is the default subnet for AZ. + returned: always + type: bool + cidr_block: + description: The IPv4 CIDR block assigned to the subnet. + returned: always + type: str + available_ip_address_count: + description: Count of available IPs in subnet. + returned: always + type: str + availability_zone: + description: The availability zone where the subnet exists. + returned: always + type: str + assign_ipv6_address_on_creation: + description: True/False depending on attribute setting for IPv6 address assignment. + returned: always + type: bool + ipv6_cidr_block_association_set: + description: An array of IPv6 cidr block association set information. + returned: always + type: complex + contains: + association_id: + description: The association ID + returned: always + type: str + ipv6_cidr_block: + description: The IPv6 CIDR block that is associated with the subnet. + returned: always + type: str + ipv6_cidr_block_state: + description: A hash/dict that contains a single item. The state of the cidr block association. + returned: always + type: dict + contains: + state: + description: The CIDR block association state. + returned: always + type: str +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +@AWSRetry.exponential_backoff() +def describe_subnets_with_backoff(connection, subnet_ids, filters): + """ + Describe Subnets with AWSRetry backoff throttling support. + + connection : boto3 client connection object + subnet_ids : list of subnet ids for which to gather information + filters : additional filters to apply to request + """ + return connection.describe_subnets(SubnetIds=subnet_ids, Filters=filters) + + +def describe_subnets(connection, module): + """ + Describe Subnets. + + module : AnsibleAWSModule object + connection : boto3 client connection object + """ + # collect parameters + filters = ansible_dict_to_boto3_filter_list(module.params.get('filters')) + subnet_ids = module.params.get('subnet_ids') + + if subnet_ids is None: + # Set subnet_ids to empty list if it is None + subnet_ids = [] + + # init empty list for return vars + subnet_info = list() + + # Get the basic VPC info + try: + response = describe_subnets_with_backoff(connection, subnet_ids, filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to describe subnets') + + for subnet in response['Subnets']: + # for backwards compatibility + subnet['id'] = subnet['SubnetId'] + subnet_info.append(camel_dict_to_snake_dict(subnet)) + # convert tag list to ansible dict + subnet_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(subnet.get('Tags', [])) + + module.exit_json(subnets=subnet_info) + + +def main(): + argument_spec = dict( + subnet_ids=dict(type='list', elements='str', default=[], aliases=['subnet_id']), + filters=dict(type='dict', default={}) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + connection = module.client('ec2') + + describe_subnets(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py new file mode 100644 index 000000000..6f9cd1c86 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py @@ -0,0 +1,835 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: elb_application_lb +version_added: 5.0.0 +short_description: Manage an Application Load Balancer +description: + - Manage an AWS Application Elastic Load Balancer. See U(https://aws.amazon.com/blogs/aws/new-aws-application-load-balancer/) for details. + - This module was originally added to C(community.aws) in release 1.0.0. +author: + - "Rob White (@wimnat)" +options: + access_logs_enabled: + description: + - Whether or not to enable access logs. + - When set, I(access_logs_s3_bucket) must also be set. + type: bool + access_logs_s3_bucket: + description: + - The name of the S3 bucket for the access logs. + - The bucket must exist in the same + region as the load balancer and have a bucket policy that grants Elastic Load Balancing permission to write to the bucket. + - Required if access logs in Amazon S3 are enabled. + - When set, I(access_logs_enabled) must also be set. + type: str + access_logs_s3_prefix: + description: + - The prefix for the log location in the S3 bucket. + - If you don't specify a prefix, the access logs are stored in the root of the bucket. + - Cannot begin or end with a slash. + type: str + deletion_protection: + description: + - Indicates whether deletion protection for the ALB is enabled. + - Defaults to C(False). + type: bool + http2: + description: + - Indicates whether to enable HTTP2 routing. + - Defaults to C(True). + type: bool + http_desync_mitigation_mode: + description: + - Determines how the load balancer handles requests that might pose a security risk to an application. + - Defaults to C('defensive') + type: str + choices: ['monitor', 'defensive', 'strictest'] + version_added: 3.2.0 + version_added_collection: community.aws + http_drop_invalid_header_fields: + description: + - Indicates whether HTTP headers with invalid header fields are removed by the load balancer C(True) or routed to targets C(False). + - Defaults to C(False). + type: bool + version_added: 3.2.0 + version_added_collection: community.aws + http_x_amzn_tls_version_and_cipher_suite: + description: + - Indicates whether the two headers are added to the client request before sending it to the target. + - Defaults to C(False). + type: bool + version_added: 3.2.0 + version_added_collection: community.aws + http_xff_client_port: + description: + - Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. + - Defaults to C(False). + type: bool + version_added: 3.2.0 + version_added_collection: community.aws + idle_timeout: + description: + - The number of seconds to wait before an idle connection is closed. + type: int + listeners: + description: + - A list of dicts containing listeners to attach to the ALB. See examples for detail of the dict required. Note that listener keys + are CamelCased. + type: list + elements: dict + suboptions: + Port: + description: The port on which the load balancer is listening. + required: true + type: int + Protocol: + description: The protocol for connections from clients to the load balancer. + required: true + type: str + Certificates: + description: The SSL server certificate. + type: list + elements: dict + suboptions: + CertificateArn: + description: The Amazon Resource Name (ARN) of the certificate. + type: str + SslPolicy: + description: The security policy that defines which ciphers and protocols are supported. + type: str + DefaultActions: + description: The default actions for the listener. + required: true + type: list + elements: dict + suboptions: + Type: + description: The type of action. + type: str + TargetGroupArn: + description: + - The Amazon Resource Name (ARN) of the target group. + - Mutually exclusive with I(TargetGroupName). + type: str + TargetGroupName: + description: + - The name of the target group. + - Mutually exclusive with I(TargetGroupArn). + Rules: + type: list + elements: dict + description: + - A list of ALB Listener Rules. + - 'For the complete documentation of possible Conditions and Actions please see the boto3 documentation:' + - 'https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_rule' + - > + Keep in mind that AWS uses default values for parameters that are not requested. For example for I(Scope) + and I(SessionTimeout) when the action type is C(authenticate-oidc). + suboptions: + Conditions: + type: list + description: Conditions which must be met for the actions to be applied. + elements: dict + Priority: + type: int + description: The rule priority. + Actions: + type: list + description: Actions to apply if all of the rule's conditions are met. + elements: dict + name: + description: + - The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric + characters or hyphens, and must not begin or end with a hyphen. + required: true + type: str + purge_listeners: + description: + - If C(true), existing listeners will be purged from the ALB to match exactly what is defined by I(listeners) parameter. + - If the I(listeners) parameter is not set then listeners will not be modified. + default: true + type: bool + subnets: + description: + - A list of the IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify subnets from + at least two Availability Zones. + - Required if I(state=present). + type: list + elements: str + security_groups: + description: + - A list of the names or IDs of the security groups to assign to the load balancer. + - Required if I(state=present). + - If C([]), the VPC's default security group will be used. + type: list + elements: str + scheme: + description: + - Internet-facing or internal load balancer. An ALB scheme can not be modified after creation. + default: internet-facing + choices: [ 'internet-facing', 'internal' ] + type: str + state: + description: + - Create or destroy the load balancer. + default: present + choices: [ 'present', 'absent' ] + type: str + wait: + description: + - Wait for the load balancer to have a state of 'active' before completing. A status check is + performed every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. + default: false + type: bool + wait_timeout: + description: + - The time in seconds to use in conjunction with I(wait). + type: int + purge_rules: + description: + - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete. + default: true + type: bool + ip_address_type: + description: + - Sets the type of IP addresses used by the subnets of the specified Application Load Balancer. + choices: [ 'ipv4', 'dualstack' ] + type: str + waf_fail_open: + description: + - Indicates whether to allow a AWS WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. + - Defaults to C(False). + type: bool + version_added: 3.2.0 + version_added_collection: community.aws +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 + +notes: + - Listeners are matched based on port. If a listener's port is changed then a new listener will be created. + - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created. +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create an ALB and attach a listener +- amazon.aws.elb_application_lb: + name: myalb + security_groups: + - sg-12345678 + - my-sec-group + subnets: + - subnet-012345678 + - subnet-abcdef000 + listeners: + - Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive). + Port: 80 # Required. The port on which the load balancer is listening. + # The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy. + SslPolicy: ELBSecurityPolicy-2015-05 + Certificates: # The ARN of the certificate (only one certficate ARN should be provided) + - CertificateArn: arn:aws:iam::123456789012:server-certificate/test.domain.com + DefaultActions: + - Type: forward # Required. + TargetGroupName: # Required. The name of the target group + state: present + +# Create an ALB and attach a listener with logging enabled +- amazon.aws.elb_application_lb: + access_logs_enabled: true + access_logs_s3_bucket: mybucket + access_logs_s3_prefix: "logs" + name: myalb + security_groups: + - sg-12345678 + - my-sec-group + subnets: + - subnet-012345678 + - subnet-abcdef000 + listeners: + - Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive). + Port: 80 # Required. The port on which the load balancer is listening. + # The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy. + SslPolicy: ELBSecurityPolicy-2015-05 + Certificates: # The ARN of the certificate (only one certficate ARN should be provided) + - CertificateArn: arn:aws:iam::123456789012:server-certificate/test.domain.com + DefaultActions: + - Type: forward # Required. + TargetGroupName: # Required. The name of the target group + state: present + +# Create an ALB with listeners and rules +- amazon.aws.elb_application_lb: + name: test-alb + subnets: + - subnet-12345678 + - subnet-87654321 + security_groups: + - sg-12345678 + scheme: internal + listeners: + - Protocol: HTTPS + Port: 443 + DefaultActions: + - Type: forward + TargetGroupName: test-target-group + Certificates: + - CertificateArn: arn:aws:iam::123456789012:server-certificate/test.domain.com + SslPolicy: ELBSecurityPolicy-2015-05 + Rules: + - Conditions: + - Field: path-pattern + Values: + - '/test' + Priority: '1' + Actions: + - TargetGroupName: test-target-group + Type: forward + - Conditions: + - Field: path-pattern + Values: + - "/redirect-path/*" + Priority: '2' + Actions: + - Type: redirect + RedirectConfig: + Host: "#{host}" + Path: "/example/redir" # or /#{path} + Port: "#{port}" + Protocol: "#{protocol}" + Query: "#{query}" + StatusCode: "HTTP_302" # or HTTP_301 + - Conditions: + - Field: path-pattern + Values: + - "/fixed-response-path/" + Priority: '3' + Actions: + - Type: fixed-response + FixedResponseConfig: + ContentType: "text/plain" + MessageBody: "This is the page you're looking for" + StatusCode: "200" + - Conditions: + - Field: host-header + Values: + - "hostname.domain.com" + - "alternate.domain.com" + Priority: '4' + Actions: + - TargetGroupName: test-target-group + Type: forward + state: present + +# Remove an ALB +- amazon.aws.elb_application_lb: + name: myalb + state: absent + +''' + +RETURN = r''' +access_logs_s3_bucket: + description: The name of the S3 bucket for the access logs. + returned: when state is present + type: str + sample: "mys3bucket" +access_logs_s3_enabled: + description: Indicates whether access logs stored in Amazon S3 are enabled. + returned: when state is present + type: bool + sample: true +access_logs_s3_prefix: + description: The prefix for the location in the S3 bucket. + returned: when state is present + type: str + sample: "my/logs" +availability_zones: + description: The Availability Zones for the load balancer. + returned: when state is present + type: list + sample: [{ "load_balancer_addresses": [], "subnet_id": "subnet-aabbccddff", "zone_name": "ap-southeast-2a" }] +canonical_hosted_zone_id: + description: The ID of the Amazon Route 53 hosted zone associated with the load balancer. + returned: when state is present + type: str + sample: "ABCDEF12345678" +changed: + description: Whether an ALB was created/updated/deleted + returned: always + type: bool + sample: true +created_time: + description: The date and time the load balancer was created. + returned: when state is present + type: str + sample: "2015-02-12T02:14:02+00:00" +deletion_protection_enabled: + description: Indicates whether deletion protection is enabled. + returned: when state is present + type: bool + sample: true +dns_name: + description: The public DNS name of the load balancer. + returned: when state is present + type: str + sample: "internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com" +idle_timeout_timeout_seconds: + description: The idle timeout value, in seconds. + returned: when state is present + type: int + sample: 60 +ip_address_type: + description: The type of IP addresses used by the subnets for the load balancer. + returned: when state is present + type: str + sample: "ipv4" +listeners: + description: Information about the listeners. + returned: when state is present + type: complex + contains: + listener_arn: + description: The Amazon Resource Name (ARN) of the listener. + returned: when state is present + type: str + sample: "" + load_balancer_arn: + description: The Amazon Resource Name (ARN) of the load balancer. + returned: when state is present + type: str + sample: "" + port: + description: The port on which the load balancer is listening. + returned: when state is present + type: int + sample: 80 + protocol: + description: The protocol for connections from clients to the load balancer. + returned: when state is present + type: str + sample: "HTTPS" + certificates: + description: The SSL server certificate. + returned: when state is present + type: complex + contains: + certificate_arn: + description: The Amazon Resource Name (ARN) of the certificate. + returned: when state is present + type: str + sample: "" + ssl_policy: + description: The security policy that defines which ciphers and protocols are supported. + returned: when state is present + type: str + sample: "" + default_actions: + description: The default actions for the listener. + returned: when state is present + type: str + contains: + type: + description: The type of action. + returned: when state is present + type: str + sample: "" + target_group_arn: + description: The Amazon Resource Name (ARN) of the target group. + returned: when state is present + type: str + sample: "" +load_balancer_arn: + description: The Amazon Resource Name (ARN) of the load balancer. + returned: when state is present + type: str + sample: "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:loadbalancer/app/my-alb/001122334455" +load_balancer_name: + description: The name of the load balancer. + returned: when state is present + type: str + sample: "my-alb" +routing_http2_enabled: + description: Indicates whether HTTP/2 is enabled. + returned: when state is present + type: bool + sample: true +routing_http_desync_mitigation_mode: + description: Determines how the load balancer handles requests that might pose a security risk to an application. + returned: when state is present + type: str + sample: "defensive" +routing_http_drop_invalid_header_fields_enabled: + description: Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). + returned: when state is present + type: bool + sample: false +routing_http_x_amzn_tls_version_and_cipher_suite_enabled: + description: Indicates whether the two headers are added to the client request before sending it to the target. + returned: when state is present + type: bool + sample: false +routing_http_xff_client_port_enabled: + description: Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. + returned: when state is present + type: bool + sample: false +scheme: + description: Internet-facing or internal load balancer. + returned: when state is present + type: str + sample: "internal" +security_groups: + description: The IDs of the security groups for the load balancer. + returned: when state is present + type: list + sample: ['sg-0011223344'] +state: + description: The state of the load balancer. + returned: when state is present + type: dict + sample: {'code': 'active'} +tags: + description: The tags attached to the load balancer. + returned: when state is present + type: dict + sample: { + 'Tag': 'Example' + } +type: + description: The type of load balancer. + returned: when state is present + type: str + sample: "application" +vpc_id: + description: The ID of the VPC for the load balancer. + returned: when state is present + type: str + sample: "vpc-0011223344" +waf_fail_open_enabled: + description: Indicates whether to allow a AWS WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. + returned: when state is present + type: bool + sample: false +''' +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ( + ApplicationLoadBalancer, + ELBListener, + ELBListenerRule, + ELBListenerRules, + ELBListeners, +) +from ansible_collections.amazon.aws.plugins.module_utils.elb_utils import get_elb_listener_rules + + +@AWSRetry.jittered_backoff() +def describe_sgs_with_backoff(connection, **params): + paginator = connection.get_paginator('describe_security_groups') + return paginator.paginate(**params).build_full_result()['SecurityGroups'] + + +def find_default_sg(connection, module, vpc_id): + """ + Finds the default security group for the given VPC ID. + """ + filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'group-name': 'default'}) + try: + sg = describe_sgs_with_backoff(connection, Filters=filters) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='No default security group found for VPC {0}'.format(vpc_id)) + if len(sg) == 1: + return sg[0]['GroupId'] + elif len(sg) == 0: + module.fail_json(msg='No default security group found for VPC {0}'.format(vpc_id)) + else: + module.fail_json(msg='Multiple security groups named "default" found for VPC {0}'.format(vpc_id)) + + +def create_or_update_alb(alb_obj): + """Create ALB or modify main attributes. json_exit here""" + if alb_obj.elb: + # ALB exists so check subnets, security groups and tags match what has been passed + # Subnets + if not alb_obj.compare_subnets(): + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.modify_subnets() + + # Security Groups + if not alb_obj.compare_security_groups(): + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.modify_security_groups() + + # ALB attributes + if not alb_obj.compare_elb_attributes(): + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.update_elb_attributes() + alb_obj.modify_elb_attributes() + + # Tags - only need to play with tags if tags parameter has been set to something + if alb_obj.tags is not None: + + tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(alb_obj.elb['tags']), + boto3_tag_list_to_ansible_dict(alb_obj.tags), alb_obj.purge_tags) + + # Exit on check_mode + if alb_obj.module.check_mode and (tags_need_modify or tags_to_delete): + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + + # Delete necessary tags + if tags_to_delete: + alb_obj.delete_tags(tags_to_delete) + + # Add/update tags + if tags_need_modify: + alb_obj.modify_tags() + + else: + # Create load balancer + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have created ALB if not in check mode.') + alb_obj.create_elb() + + # Add ALB attributes + alb_obj.update_elb_attributes() + alb_obj.modify_elb_attributes() + + # Listeners + listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn']) + listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners() + + # Exit on check_mode + if alb_obj.module.check_mode and (listeners_to_add or listeners_to_modify or listeners_to_delete): + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + + # Delete listeners + for listener_to_delete in listeners_to_delete: + listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb['LoadBalancerArn']) + listener_obj.delete() + listeners_obj.changed = True + + # Add listeners + for listener_to_add in listeners_to_add: + listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_add, alb_obj.elb['LoadBalancerArn']) + listener_obj.add() + listeners_obj.changed = True + + # Modify listeners + for listener_to_modify in listeners_to_modify: + listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_modify, alb_obj.elb['LoadBalancerArn']) + listener_obj.modify() + listeners_obj.changed = True + + # If listeners changed, mark ALB as changed + if listeners_obj.changed: + alb_obj.changed = True + + # Rules of each listener + for listener in listeners_obj.listeners: + if 'Rules' in listener: + rules_obj = ELBListenerRules(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn'], listener['Rules'], listener['Port']) + rules_to_add, rules_to_modify, rules_to_delete = rules_obj.compare_rules() + + # Exit on check_mode + if alb_obj.module.check_mode and (rules_to_add or rules_to_modify or rules_to_delete): + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + + # Delete rules + if alb_obj.module.params['purge_rules']: + for rule in rules_to_delete: + rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, {'RuleArn': rule}, rules_obj.listener_arn) + rule_obj.delete() + alb_obj.changed = True + + # Add rules + for rule in rules_to_add: + rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, rule, rules_obj.listener_arn) + rule_obj.create() + alb_obj.changed = True + + # Modify rules + for rule in rules_to_modify: + rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, rule, rules_obj.listener_arn) + rule_obj.modify() + alb_obj.changed = True + + # Update ALB ip address type only if option has been provided + if alb_obj.module.params.get('ip_address_type') and alb_obj.elb_ip_addr_type != alb_obj.module.params.get('ip_address_type'): + # Exit on check_mode + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + + alb_obj.modify_ip_address_type(alb_obj.module.params.get('ip_address_type')) + + # Exit on check_mode - no changes + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=False, msg='IN CHECK MODE - no changes to make to ALB specified.') + + # Get the ALB again + alb_obj.update() + + # Get the ALB listeners again + listeners_obj.update() + + # Update the ALB attributes + alb_obj.update_elb_attributes() + + # Convert to snake_case and merge in everything we want to return to the user + snaked_alb = camel_dict_to_snake_dict(alb_obj.elb) + snaked_alb.update(camel_dict_to_snake_dict(alb_obj.elb_attributes)) + snaked_alb['listeners'] = [] + for listener in listeners_obj.current_listeners: + # For each listener, get listener rules + listener['rules'] = get_elb_listener_rules(alb_obj.connection, alb_obj.module, listener['ListenerArn']) + snaked_alb['listeners'].append(camel_dict_to_snake_dict(listener)) + + # Change tags to ansible friendly dict + snaked_alb['tags'] = boto3_tag_list_to_ansible_dict(snaked_alb['tags']) + + # ip address type + snaked_alb['ip_address_type'] = alb_obj.get_elb_ip_address_type() + + alb_obj.module.exit_json(changed=alb_obj.changed, **snaked_alb) + + +def delete_alb(alb_obj): + + if alb_obj.elb: + + # Exit on check_mode + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have deleted ALB if not in check mode.') + + listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn']) + for listener_to_delete in [i['ListenerArn'] for i in listeners_obj.current_listeners]: + listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb['LoadBalancerArn']) + listener_obj.delete() + + alb_obj.delete() + + else: + + # Exit on check_mode - no changes + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=False, msg='IN CHECK MODE - ALB already absent.') + + alb_obj.module.exit_json(changed=alb_obj.changed) + + +def main(): + + argument_spec = dict( + access_logs_enabled=dict(type='bool'), + access_logs_s3_bucket=dict(type='str'), + access_logs_s3_prefix=dict(type='str'), + deletion_protection=dict(type='bool'), + http2=dict(type='bool'), + http_desync_mitigation_mode=dict(type='str', choices=['monitor', 'defensive', 'strictest']), + http_drop_invalid_header_fields=dict(type='bool'), + http_x_amzn_tls_version_and_cipher_suite=dict(type='bool'), + http_xff_client_port=dict(type='bool'), + idle_timeout=dict(type='int'), + listeners=dict(type='list', + elements='dict', + options=dict( + Protocol=dict(type='str', required=True), + Port=dict(type='int', required=True), + SslPolicy=dict(type='str'), + Certificates=dict(type='list', elements='dict'), + DefaultActions=dict(type='list', required=True, elements='dict'), + Rules=dict(type='list', elements='dict') + ) + ), + name=dict(required=True, type='str'), + purge_listeners=dict(default=True, type='bool'), + purge_tags=dict(default=True, type='bool'), + subnets=dict(type='list', elements='str'), + security_groups=dict(type='list', elements='str'), + scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), + state=dict(choices=['present', 'absent'], default='present'), + tags=dict(type='dict', aliases=['resource_tags']), + waf_fail_open=dict(type='bool'), + wait_timeout=dict(type='int'), + wait=dict(default=False, type='bool'), + purge_rules=dict(default=True, type='bool'), + ip_address_type=dict(type='str', choices=['ipv4', 'dualstack']) + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['subnets', 'security_groups']) + ], + required_together=[ + ['access_logs_enabled', 'access_logs_s3_bucket'] + ], + supports_check_mode=True, + ) + + # Quick check of listeners parameters + listeners = module.params.get("listeners") + if listeners is not None: + for listener in listeners: + for key in listener.keys(): + if key == 'Protocol' and listener[key] == 'HTTPS': + if listener.get('SslPolicy') is None: + module.fail_json(msg="'SslPolicy' is a required listener dict key when Protocol = HTTPS") + + if listener.get('Certificates') is None: + module.fail_json(msg="'Certificates' is a required listener dict key when Protocol = HTTPS") + + connection = module.client('elbv2') + connection_ec2 = module.client('ec2') + + state = module.params.get("state") + + alb = ApplicationLoadBalancer(connection, connection_ec2, module) + + # Update security group if default is specified + if alb.elb and module.params.get('security_groups') == []: + module.params['security_groups'] = [find_default_sg(connection_ec2, module, alb.elb['VpcId'])] + alb = ApplicationLoadBalancer(connection, connection_ec2, module) + + if state == 'present': + create_or_update_alb(alb) + elif state == 'absent': + delete_alb(alb) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/elb_application_lb_info.py b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb_info.py new file mode 100644 index 000000000..42ad25a85 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb_info.py @@ -0,0 +1,343 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: elb_application_lb_info +version_added: 5.0.0 +short_description: Gather information about Application Load Balancers in AWS +description: + - Gather information about Application Load Balancers in AWS + - This module was originally added to C(community.aws) in release 1.0.0. +author: + - Rob White (@wimnat) +options: + load_balancer_arns: + description: + - The Amazon Resource Names (ARN) of the load balancers. You can specify up to 20 load balancers in a single call. + required: false + type: list + elements: str + names: + description: + - The names of the load balancers. + required: false + type: list + elements: str + +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 + +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all ALBs + amazon.aws.elb_application_lb_info: + +- name: Gather information about a particular ALB given its ARN + amazon.aws.elb_application_lb_info: + load_balancer_arns: + - "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:loadbalancer/app/my-alb/aabbccddeeff" + +- name: Gather information about ALBs named 'alb1' and 'alb2' + amazon.aws.elb_application_lb_info: + names: + - alb1 + - alb2 + +- name: Gather information about specific ALB + amazon.aws.elb_application_lb_info: + names: "alb-name" + region: "aws-region" + register: alb_info +- ansible.builtin.debug: + var: alb_info +''' + +RETURN = r''' +load_balancers: + description: a list of load balancers + returned: always + type: complex + contains: + access_logs_s3_bucket: + description: The name of the S3 bucket for the access logs. + type: str + sample: "mys3bucket" + access_logs_s3_enabled: + description: Indicates whether access logs stored in Amazon S3 are enabled. + type: bool + sample: true + access_logs_s3_prefix: + description: The prefix for the location in the S3 bucket. + type: str + sample: "my/logs" + availability_zones: + description: The Availability Zones for the load balancer. + type: list + sample: [{ "load_balancer_addresses": [], "subnet_id": "subnet-aabbccddff", "zone_name": "ap-southeast-2a" }] + canonical_hosted_zone_id: + description: The ID of the Amazon Route 53 hosted zone associated with the load balancer. + type: str + sample: "ABCDEF12345678" + created_time: + description: The date and time the load balancer was created. + type: str + sample: "2015-02-12T02:14:02+00:00" + deletion_protection_enabled: + description: Indicates whether deletion protection is enabled. + type: bool + sample: true + dns_name: + description: The public DNS name of the load balancer. + type: str + sample: "internal-my-alb-123456789.ap-southeast-2.elb.amazonaws.com" + idle_timeout_timeout_seconds: + description: The idle timeout value, in seconds. + type: int + sample: 60 + ip_address_type: + description: The type of IP addresses used by the subnets for the load balancer. + type: str + sample: "ipv4" + listeners: + description: Information about the listeners. + type: complex + contains: + listener_arn: + description: The Amazon Resource Name (ARN) of the listener. + type: str + sample: "" + load_balancer_arn: + description: The Amazon Resource Name (ARN) of the load balancer. + type: str + sample: "" + port: + description: The port on which the load balancer is listening. + type: int + sample: 80 + protocol: + description: The protocol for connections from clients to the load balancer. + type: str + sample: "HTTPS" + certificates: + description: The SSL server certificate. + type: complex + contains: + certificate_arn: + description: The Amazon Resource Name (ARN) of the certificate. + type: str + sample: "" + ssl_policy: + description: The security policy that defines which ciphers and protocols are supported. + type: str + sample: "" + default_actions: + description: The default actions for the listener. + type: str + contains: + type: + description: The type of action. + type: str + sample: "" + target_group_arn: + description: The Amazon Resource Name (ARN) of the target group. + type: str + sample: "" + load_balancer_arn: + description: The Amazon Resource Name (ARN) of the load balancer. + type: str + sample: "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:loadbalancer/app/my-alb/001122334455" + load_balancer_name: + description: The name of the load balancer. + type: str + sample: "my-alb" + routing_http2_enabled: + description: Indicates whether HTTP/2 is enabled. + type: bool + sample: true + routing_http_desync_mitigation_mode: + description: Determines how the load balancer handles requests that might pose a security risk to an application. + type: str + sample: "defensive" + routing_http_drop_invalid_header_fields_enabled: + description: Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). + type: bool + sample: false + routing_http_x_amzn_tls_version_and_cipher_suite_enabled: + description: Indicates whether the two headers are added to the client request before sending it to the target. + type: bool + sample: false + routing_http_xff_client_port_enabled: + description: Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. + type: bool + sample: false + scheme: + description: Internet-facing or internal load balancer. + type: str + sample: "internal" + security_groups: + description: The IDs of the security groups for the load balancer. + type: list + sample: ['sg-0011223344'] + state: + description: The state of the load balancer. + type: dict + sample: {'code': 'active'} + tags: + description: The tags attached to the load balancer. + type: dict + sample: { + 'Tag': 'Example' + } + type: + description: The type of load balancer. + type: str + sample: "application" + vpc_id: + description: The ID of the VPC for the load balancer. + type: str + sample: "vpc-0011223344" + waf_fail_open_enabled: + description: Indicates whether to allow a AWS WAF-enabled load balancer to route requests to targets + if it is unable to forward the request to AWS WAF. + type: bool + sample: false +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict + + +@AWSRetry.jittered_backoff(retries=10) +def get_paginator(connection, **kwargs): + paginator = connection.get_paginator('describe_load_balancers') + return paginator.paginate(**kwargs).build_full_result() + + +def get_alb_listeners(connection, module, alb_arn): + + try: + return connection.describe_listeners(LoadBalancerArn=alb_arn)['Listeners'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe alb listeners") + + +def get_listener_rules(connection, module, listener_arn): + + try: + return connection.describe_rules(ListenerArn=listener_arn)['Rules'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe listener rules") + + +def get_load_balancer_attributes(connection, module, load_balancer_arn): + + try: + load_balancer_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=load_balancer_arn)['Attributes']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe load balancer attributes") + + # Replace '.' with '_' in attribute key names to make it more Ansibley + for k, v in list(load_balancer_attributes.items()): + load_balancer_attributes[k.replace('.', '_')] = v + del load_balancer_attributes[k] + + return load_balancer_attributes + + +def get_load_balancer_tags(connection, module, load_balancer_arn): + + try: + return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[load_balancer_arn])['TagDescriptions'][0]['Tags']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe load balancer tags") + + +def get_load_balancer_ipaddresstype(connection, module, load_balancer_arn): + try: + return connection.describe_load_balancers(LoadBalancerArns=[load_balancer_arn])['LoadBalancers'][0]['IpAddressType'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe load balancer ip address type") + + +def list_load_balancers(connection, module): + load_balancer_arns = module.params.get("load_balancer_arns") + names = module.params.get("names") + + try: + if not load_balancer_arns and not names: + load_balancers = get_paginator(connection) + if load_balancer_arns: + load_balancers = get_paginator(connection, LoadBalancerArns=load_balancer_arns) + if names: + load_balancers = get_paginator(connection, Names=names) + except is_boto3_error_code('LoadBalancerNotFound'): + module.exit_json(load_balancers=[]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to list load balancers") + + for load_balancer in load_balancers['LoadBalancers']: + # Get the attributes for each alb + load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer['LoadBalancerArn'])) + + # Get the listeners for each alb + load_balancer['listeners'] = get_alb_listeners(connection, module, load_balancer['LoadBalancerArn']) + + # For each listener, get listener rules + for listener in load_balancer['listeners']: + listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn']) + + # Get ALB ip address type + load_balancer['IpAddressType'] = get_load_balancer_ipaddresstype(connection, module, load_balancer['LoadBalancerArn']) + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']] + + # Get tags for each load balancer + for snaked_load_balancer in snaked_load_balancers: + snaked_load_balancer['tags'] = get_load_balancer_tags(connection, module, snaked_load_balancer['load_balancer_arn']) + + module.exit_json(load_balancers=snaked_load_balancers) + + +def main(): + + argument_spec = dict( + load_balancer_arns=dict(type='list', elements='str'), + names=dict(type='list', elements='str') + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[['load_balancer_arns', 'names']], + supports_check_mode=True, + ) + + try: + connection = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + list_load_balancers(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py b/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py new file mode 100644 index 000000000..5d49d92f6 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py @@ -0,0 +1,2147 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: elb_classic_lb +version_added: 1.0.0 +description: + - Creates, updates or destroys an Amazon Elastic Load Balancer (ELB). + - This module was renamed from C(amazon.aws.ec2_elb_lb) to M(amazon.aws.elb_classic_lb) in version + 2.1.0 of the amazon.aws collection. +short_description: Creates, updates or destroys an Amazon ELB +author: + - "Jim Dalton (@jsdalton)" + - "Mark Chappell (@tremble)" +options: + state: + description: + - Create or destroy the ELB. + type: str + choices: [ absent, present ] + required: true + name: + description: + - The name of the ELB. + - The name of an ELB must be less than 32 characters and unique per-region per-account. + type: str + required: true + listeners: + description: + - List of ports/protocols for this ELB to listen on (see examples). + - Required when I(state=present) and the ELB doesn't exist. + type: list + elements: dict + suboptions: + load_balancer_port: + description: + - The port on which the load balancer will listen. + type: int + required: True + instance_port: + description: + - The port on which the instance is listening. + type: int + required: True + ssl_certificate_id: + description: + - The Amazon Resource Name (ARN) of the SSL certificate. + type: str + protocol: + description: + - The transport protocol to use for routing. + - Valid values are C(HTTP), C(HTTPS), C(TCP), or C(SSL). + type: str + required: True + instance_protocol: + description: + - The protocol to use for routing traffic to instances. + - Valid values are C(HTTP), C(HTTPS), C(TCP), or C(SSL), + type: str + proxy_protocol: + description: + - Enable proxy protocol for the listener. + - Beware, ELB controls for the proxy protocol are based on the + I(instance_port). If you have multiple listeners talking to + the same I(instance_port), this will affect all of them. + type: bool + purge_listeners: + description: + - Purge existing listeners on ELB that are not found in listeners. + type: bool + default: true + instance_ids: + description: + - List of instance ids to attach to this ELB. + type: list + elements: str + purge_instance_ids: + description: + - Purge existing instance ids on ELB that are not found in I(instance_ids). + type: bool + default: false + zones: + description: + - List of availability zones to enable on this ELB. + - Mutually exclusive with I(subnets). + type: list + elements: str + purge_zones: + description: + - Purge existing availability zones on ELB that are not found in I(zones). + type: bool + default: false + security_group_ids: + description: + - A list of security groups to apply to the ELB. + type: list + elements: str + security_group_names: + description: + - A list of security group names to apply to the ELB. + type: list + elements: str + health_check: + description: + - A dictionary of health check configuration settings (see examples). + type: dict + suboptions: + ping_protocol: + description: + - The protocol which the ELB health check will use when performing a + health check. + - Valid values are C('HTTP'), C('HTTPS'), C('TCP') and C('SSL'). + required: true + type: str + ping_path: + description: + - The URI path which the ELB health check will query when performing a + health check. + - Required when I(ping_protocol=HTTP) or I(ping_protocol=HTTPS). + required: false + type: str + ping_port: + description: + - The TCP port to which the ELB will connect when performing a + health check. + required: true + type: int + interval: + description: + - The approximate interval, in seconds, between health checks of an individual instance. + required: true + type: int + timeout: + description: + - The amount of time, in seconds, after which no response means a failed health check. + aliases: ['response_timeout'] + required: true + type: int + unhealthy_threshold: + description: + - The number of consecutive health check failures required before moving + the instance to the Unhealthy state. + required: true + type: int + healthy_threshold: + description: + - The number of consecutive health checks successes required before moving + the instance to the Healthy state. + required: true + type: int + access_logs: + description: + - A dictionary of access logs configuration settings (see examples). + type: dict + suboptions: + enabled: + description: + - When set to C(True) will configure delivery of access logs to an S3 + bucket. + - When set to C(False) will disable delivery of access logs. + required: false + type: bool + default: true + s3_location: + description: + - The S3 bucket to deliver access logs to. + - See U(https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html) + for more information about the necessary S3 bucket policies. + - Required when I(enabled=True). + required: false + type: str + s3_prefix: + description: + - Where in the S3 bucket to deliver the logs. + - If the prefix is not provided or set to C(""), the log is placed at the root level of the bucket. + required: false + type: str + default: "" + interval: + description: + - The interval for publishing the access logs to S3. + required: false + type: int + default: 60 + choices: [ 5, 60 ] + subnets: + description: + - A list of VPC subnets to use when creating the ELB. + - Mutually exclusive with I(zones). + type: list + elements: str + purge_subnets: + description: + - Purge existing subnets on the ELB that are not found in I(subnets). + - Because it is not permitted to add multiple subnets from the same + availability zone, subnets to be purged will be removed before new + subnets are added. This may cause a brief outage if you try to replace + all subnets at once. + type: bool + default: false + scheme: + description: + - The scheme to use when creating the ELB. + - For a private VPC-visible ELB use C(internal). + - If you choose to update your scheme with a different value the ELB will be destroyed and + a new ELB created. + - Defaults to I(scheme=internet-facing). + type: str + choices: ["internal", "internet-facing"] + connection_draining_timeout: + description: + - Wait a specified timeout allowing connections to drain before terminating an instance. + - Set to C(0) to disable connection draining. + type: int + idle_timeout: + description: + - ELB connections from clients and to servers are timed out after this amount of time. + type: int + cross_az_load_balancing: + description: + - Distribute load across all configured Availability Zones. + - Defaults to C(false). + type: bool + stickiness: + description: + - A dictionary of stickiness policy settings. + - Policy will be applied to all listeners (see examples). + type: dict + suboptions: + type: + description: + - The type of stickiness policy to apply. + - Required if I(enabled=true). + - Ignored if I(enabled=false). + required: false + type: 'str' + choices: ['application','loadbalancer'] + enabled: + description: + - When I(enabled=false) session stickiness will be disabled for all listeners. + required: false + type: bool + default: true + cookie: + description: + - The name of the application cookie used for stickiness. + - Required if I(enabled=true) and I(type=application). + - Ignored if I(enabled=false). + required: false + type: str + expiration: + description: + - The time period, in seconds, after which the cookie should be considered stale. + - If this parameter is not specified, the stickiness session lasts for the duration of the browser session. + - Ignored if I(enabled=false). + required: false + type: int + wait: + description: + - When creating, deleting, or adding instances to an ELB, if I(wait=true) + Ansible will wait for both the load balancer and related network interfaces + to finish creating/deleting. + - Support for waiting when adding instances was added in release 2.1.0. + type: bool + default: false + wait_timeout: + description: + - Used in conjunction with wait. Number of seconds to wait for the ELB to be terminated. + - A maximum of 600 seconds (10 minutes) is allowed. + type: int + default: 180 + +notes: + - The ec2_elb fact previously set by this module was deprecated in release 2.1.0 and since release + 4.0.0 is no longer set. + - Support for I(purge_tags) was added in release 2.1.0. + +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +''' + +EXAMPLES = """ +# Note: None of these examples set aws_access_key, aws_secret_key, or region. +# It is assumed that their matching environment variables are set. + +# Basic provisioning example (non-VPC) + +- amazon.aws.elb_classic_lb: + name: "test-please-delete" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http # options are http, https, ssl, tcp + load_balancer_port: 80 + instance_port: 80 + proxy_protocol: True + - protocol: https + load_balancer_port: 443 + instance_protocol: http # optional, defaults to value of protocol setting + instance_port: 80 + # ssl certificate required for https or ssl + ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert" + +# Internal ELB example + +- amazon.aws.elb_classic_lb: + name: "test-vpc" + scheme: internal + state: present + instance_ids: + - i-abcd1234 + purge_instance_ids: true + subnets: + - subnet-abcd1234 + - subnet-1a2b3c4d + listeners: + - protocol: http # options are http, https, ssl, tcp + load_balancer_port: 80 + instance_port: 80 + +# Configure a health check and the access logs +- amazon.aws.elb_classic_lb: + name: "test-please-delete" + state: present + zones: + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + health_check: + ping_protocol: http # options are http, https, ssl, tcp + ping_port: 80 + ping_path: "/index.html" # not required for tcp or ssl + response_timeout: 5 # seconds + interval: 30 # seconds + unhealthy_threshold: 2 + healthy_threshold: 10 + access_logs: + interval: 5 # minutes (defaults to 60) + s3_location: "my-bucket" # This value is required if access_logs is set + s3_prefix: "logs" + +# Ensure ELB is gone +- amazon.aws.elb_classic_lb: + name: "test-please-delete" + state: absent + +# Ensure ELB is gone and wait for check (for default timeout) +- amazon.aws.elb_classic_lb: + name: "test-please-delete" + state: absent + wait: true + +# Ensure ELB is gone and wait for check with timeout value +- amazon.aws.elb_classic_lb: + name: "test-please-delete" + state: absent + wait: true + wait_timeout: 600 + +# Normally, this module will purge any listeners that exist on the ELB +# but aren't specified in the listeners parameter. If purge_listeners is +# false it leaves them alone +- amazon.aws.elb_classic_lb: + name: "test-please-delete" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + purge_listeners: false + +# Normally, this module will leave availability zones that are enabled +# on the ELB alone. If purge_zones is true, then any extraneous zones +# will be removed +- amazon.aws.elb_classic_lb: + name: "test-please-delete" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + purge_zones: true + +# Creates a ELB and assigns a list of subnets to it. +- amazon.aws.elb_classic_lb: + state: present + name: 'New ELB' + security_group_ids: 'sg-123456, sg-67890' + subnets: 'subnet-123456,subnet-67890' + purge_subnets: true + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + +# Create an ELB with connection draining, increased idle timeout and cross availability +# zone load balancing +- amazon.aws.elb_classic_lb: + name: "New ELB" + state: present + connection_draining_timeout: 60 + idle_timeout: 300 + cross_az_load_balancing: "yes" + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + +# Create an ELB with load balancer stickiness enabled +- amazon.aws.elb_classic_lb: + name: "New ELB" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + stickiness: + type: loadbalancer + enabled: true + expiration: 300 + +# Create an ELB with application stickiness enabled +- amazon.aws.elb_classic_lb: + name: "New ELB" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + stickiness: + type: application + enabled: true + cookie: SESSIONID + +# Create an ELB and add tags +- amazon.aws.elb_classic_lb: + name: "New ELB" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + tags: + Name: "New ELB" + stack: "production" + client: "Bob" + +# Delete all tags from an ELB +- amazon.aws.elb_classic_lb: + name: "New ELB" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + tags: {} +""" + +RETURN = ''' +elb: + description: Load Balancer attributes + returned: always + type: dict + contains: + app_cookie_policy: + description: The name of the policy used to control if the ELB is using a application cookie stickiness policy. + type: str + sample: ec2-elb-lb-AppCookieStickinessPolicyType + returned: when state is not 'absent' + backends: + description: A description of the backend policy applied to the ELB (instance-port:policy-name). + type: str + sample: 8181:ProxyProtocol-policy + returned: when state is not 'absent' + connection_draining_timeout: + description: The maximum time, in seconds, to keep the existing connections open before deregistering the instances. + type: int + sample: 25 + returned: when state is not 'absent' + cross_az_load_balancing: + description: Either C('yes') if cross-AZ load balancing is enabled, or C('no') if cross-AZ load balancing is disabled. + type: str + sample: 'yes' + returned: when state is not 'absent' + dns_name: + description: The DNS name of the ELB. + type: str + sample: internal-ansible-test-935c585850ac-1516306744.us-east-1.elb.amazonaws.com + returned: when state is not 'absent' + health_check: + description: A dictionary describing the health check used for the ELB. + type: dict + returned: when state is not 'absent' + contains: + healthy_threshold: + description: The number of consecutive successful health checks before marking an instance as healthy. + type: int + sample: 2 + interval: + description: The time, in seconds, between each health check. + type: int + sample: 10 + target: + description: The Protocol, Port, and for HTTP(S) health checks the path tested by the health check. + type: str + sample: TCP:22 + timeout: + description: The time, in seconds, after which an in progress health check is considered failed due to a timeout. + type: int + sample: 5 + unhealthy_threshold: + description: The number of consecutive failed health checks before marking an instance as unhealthy. + type: int + sample: 2 + hosted_zone_id: + description: The ID of the Amazon Route 53 hosted zone for the load balancer. + type: str + sample: Z35SXDOTRQ7X7K + returned: when state is not 'absent' + hosted_zone_name: + description: The DNS name of the load balancer when using a custom hostname. + type: str + sample: 'ansible-module.example' + returned: when state is not 'absent' + idle_timeout: + description: The length of of time before an idle connection is dropped by the ELB. + type: int + sample: 50 + returned: when state is not 'absent' + in_service_count: + description: The number of instances attached to the ELB in an in-service state. + type: int + sample: 1 + returned: when state is not 'absent' + instance_health: + description: A list of dictionaries describing the health of each instance attached to the ELB. + type: list + elements: dict + returned: when state is not 'absent' + contains: + description: + description: A human readable description of why the instance is not in service. + type: str + sample: N/A + returned: when state is not 'absent' + instance_id: + description: The ID of the instance. + type: str + sample: i-03dcc8953a03d6435 + returned: when state is not 'absent' + reason_code: + description: A code describing why the instance is not in service. + type: str + sample: N/A + returned: when state is not 'absent' + state: + description: The current service state of the instance. + type: str + sample: InService + returned: when state is not 'absent' + instances: + description: A list of the IDs of instances attached to the ELB. + type: list + elements: str + sample: ['i-03dcc8953a03d6435'] + returned: when state is not 'absent' + lb_cookie_policy: + description: The name of the policy used to control if the ELB is using a cookie stickiness policy. + type: str + sample: ec2-elb-lb-LBCookieStickinessPolicyType + returned: when state is not 'absent' + listeners: + description: + - A list of lists describing the listeners attached to the ELB. + - The nested list contains the listener port, the instance port, the listener protoco, the instance port, + and where appropriate the ID of the SSL certificate for the port. + type: list + elements: list + sample: [[22, 22, 'TCP', 'TCP'], [80, 8181, 'HTTP', 'HTTP']] + returned: when state is not 'absent' + name: + description: The name of the ELB. This name is unique per-region, per-account. + type: str + sample: ansible-test-935c585850ac + returned: when state is not 'absent' + out_of_service_count: + description: The number of instances attached to the ELB in an out-of-service state. + type: int + sample: 0 + returned: when state is not 'absent' + proxy_policy: + description: The name of the policy used to control if the ELB operates using the Proxy protocol. + type: str + sample: ProxyProtocol-policy + returned: when the proxy protocol policy exists. + region: + description: The AWS region in which the ELB is running. + type: str + sample: us-east-1 + returned: always + scheme: + description: Whether the ELB is an C('internal') or a C('internet-facing') load balancer. + type: str + sample: internal + returned: when state is not 'absent' + security_group_ids: + description: A list of the IDs of the Security Groups attached to the ELB. + type: list + elements: str + sample: ['sg-0c12ebd82f2fb97dc', 'sg-01ec7378d0c7342e6'] + returned: when state is not 'absent' + status: + description: A minimal description of the current state of the ELB. Valid values are C('exists'), C('gone'), C('deleted'), C('created'). + type: str + sample: exists + returned: always + subnets: + description: A list of the subnet IDs attached to the ELB. + type: list + elements: str + sample: ['subnet-00d9d0f70c7e5f63c', 'subnet-03fa5253586b2d2d5'] + returned: when state is not 'absent' + tags: + description: A dictionary describing the tags attached to the ELB. + type: dict + sample: {'Name': 'ansible-test-935c585850ac', 'ExampleTag': 'Example Value'} + returned: when state is not 'absent' + unknown_instance_state_count: + description: The number of instances attached to the ELB in an unknown state. + type: int + sample: 0 + returned: when state is not 'absent' + zones: + description: A list of the AWS regions in which the ELB is running. + type: list + elements: str + sample: ['us-east-1b', 'us-east-1a'] + returned: when state is not 'absent' +''' + +try: + import botocore +except ImportError: + pass # Taken care of by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + + +class ElbManager(object): + """Handles ELB creation and destruction""" + + def __init__(self, module): + + self.module = module + + self.name = module.params['name'] + self.listeners = module.params['listeners'] + self.purge_listeners = module.params['purge_listeners'] + self.instance_ids = module.params['instance_ids'] + self.purge_instance_ids = module.params['purge_instance_ids'] + self.zones = module.params['zones'] + self.purge_zones = module.params['purge_zones'] + self.health_check = module.params['health_check'] + self.access_logs = module.params['access_logs'] + self.subnets = module.params['subnets'] + self.purge_subnets = module.params['purge_subnets'] + self.scheme = module.params['scheme'] + self.connection_draining_timeout = module.params['connection_draining_timeout'] + self.idle_timeout = module.params['idle_timeout'] + self.cross_az_load_balancing = module.params['cross_az_load_balancing'] + self.stickiness = module.params['stickiness'] + self.wait = module.params['wait'] + self.wait_timeout = module.params['wait_timeout'] + self.tags = module.params['tags'] + self.purge_tags = module.params['purge_tags'] + + self.changed = False + self.status = 'gone' + + retry_decorator = AWSRetry.jittered_backoff() + self.client = self.module.client('elb', retry_decorator=retry_decorator) + self.ec2_client = self.module.client('ec2', retry_decorator=retry_decorator) + + security_group_names = module.params['security_group_names'] + self.security_group_ids = module.params['security_group_ids'] + + self._update_descriptions() + + if security_group_names: + # Use the subnets attached to the VPC to find which VPC we're in and + # limit the search + if self.elb and self.elb.get('Subnets', None): + subnets = set(self.elb.get('Subnets') + list(self.subnets or [])) + else: + subnets = set(self.subnets) + if subnets: + vpc_id = self._get_vpc_from_subnets(subnets) + else: + vpc_id = None + try: + self.security_group_ids = self._get_ec2_security_group_ids_from_names( + sec_group_list=security_group_names, vpc_id=vpc_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to convert security group names to IDs, try using security group IDs rather than names") + + def _update_descriptions(self): + try: + self.elb = self._get_elb() + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Unable to describe load balancer') + try: + self.elb_attributes = self._get_elb_attributes() + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Unable to describe load balancer attributes') + try: + self.elb_policies = self._get_elb_policies() + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Unable to describe load balancer policies') + try: + self.elb_health = self._get_elb_instance_health() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg='Unable to describe load balancer instance health') + + # We have a number of complex parameters which can't be validated by + # AnsibleModule or are only required if the ELB doesn't exist. + def validate_params(self, state=None): + problem_found = False + # Validate that protocol is one of the permitted values + problem_found |= self._validate_listeners(self.listeners) + problem_found |= self._validate_health_check(self.health_check) + problem_found |= self._validate_stickiness(self.stickiness) + if state == 'present': + # When creating a new ELB + problem_found |= self._validate_creation_requirements() + problem_found |= self._validate_access_logs(self.access_logs) + + # Pass check_mode down through to the module + @property + def check_mode(self): + return self.module.check_mode + + def _get_elb_policies(self): + try: + attributes = self.client.describe_load_balancer_policies(LoadBalancerName=self.name) + except is_boto3_error_code(['LoadBalancerNotFound', 'LoadBalancerAttributeNotFoundException']): + return {} + except is_boto3_error_code('AccessDenied'): # pylint: disable=duplicate-except + # Be forgiving if we can't see the attributes + # Note: This will break idempotency if someone has set but not describe + self.module.warn('Access Denied trying to describe load balancer policies') + return {} + return attributes['PolicyDescriptions'] + + def _get_elb_instance_health(self): + try: + instance_health = self.client.describe_instance_health(LoadBalancerName=self.name) + except is_boto3_error_code(['LoadBalancerNotFound', 'LoadBalancerAttributeNotFoundException']): + return [] + except is_boto3_error_code('AccessDenied'): # pylint: disable=duplicate-except + # Be forgiving if we can't see the attributes + # Note: This will break idempotency if someone has set but not describe + self.module.warn('Access Denied trying to describe instance health') + return [] + return instance_health['InstanceStates'] + + def _get_elb_attributes(self): + try: + attributes = self.client.describe_load_balancer_attributes(LoadBalancerName=self.name) + except is_boto3_error_code(['LoadBalancerNotFound', 'LoadBalancerAttributeNotFoundException']): + return {} + except is_boto3_error_code('AccessDenied'): # pylint: disable=duplicate-except + # Be forgiving if we can't see the attributes + # Note: This will break idempotency if someone has set but not describe + self.module.warn('Access Denied trying to describe load balancer attributes') + return {} + return attributes['LoadBalancerAttributes'] + + def _get_elb(self): + try: + elbs = self._describe_loadbalancer(self.name) + except is_boto3_error_code('LoadBalancerNotFound'): + return None + + # Shouldn't happen, but Amazon could change the rules on us... + if len(elbs) > 1: + self.module.fail_json('Found multiple ELBs with name {0}'.format(self.name)) + + self.status = 'exists' if self.status == 'gone' else self.status + + return elbs[0] + + def _delete_elb(self): + # True if succeeds, exception raised if not + try: + if not self.check_mode: + self.client.delete_load_balancer(aws_retry=True, LoadBalancerName=self.name) + self.changed = True + self.status = 'deleted' + except is_boto3_error_code('LoadBalancerNotFound'): + return False + return True + + def _create_elb(self): + listeners = list(self._format_listener(l) for l in self.listeners) + if not self.scheme: + self.scheme = 'internet-facing' + params = dict( + LoadBalancerName=self.name, + AvailabilityZones=self.zones, + SecurityGroups=self.security_group_ids, + Subnets=self.subnets, + Listeners=listeners, + Scheme=self.scheme) + params = scrub_none_parameters(params) + if self.tags: + params['Tags'] = ansible_dict_to_boto3_tag_list(self.tags) + + if not self.check_mode: + self.client.create_load_balancer(aws_retry=True, **params) + # create_load_balancer only returns the DNS name + self.elb = self._get_elb() + self.changed = True + self.status = 'created' + return True + + def _format_listener(self, listener, inject_protocol=False): + """Formats listener into the format needed by the + ELB API""" + + listener = scrub_none_parameters(listener) + + for protocol in ['protocol', 'instance_protocol']: + if protocol in listener: + listener[protocol] = listener[protocol].upper() + + if inject_protocol and 'instance_protocol' not in listener: + listener['instance_protocol'] = listener['protocol'] + + # Remove proxy_protocol, it has to be handled as a policy + listener.pop('proxy_protocol', None) + + ssl_id = listener.pop('ssl_certificate_id', None) + + formatted_listener = snake_dict_to_camel_dict(listener, True) + if ssl_id: + formatted_listener['SSLCertificateId'] = ssl_id + + return formatted_listener + + def _format_healthcheck_target(self): + """Compose target string from healthcheck parameters""" + protocol = self.health_check['ping_protocol'].upper() + path = "" + + if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check: + path = self.health_check['ping_path'] + + return "%s:%s%s" % (protocol, self.health_check['ping_port'], path) + + def _format_healthcheck(self): + return dict( + Target=self._format_healthcheck_target(), + Timeout=self.health_check['timeout'], + Interval=self.health_check['interval'], + UnhealthyThreshold=self.health_check['unhealthy_threshold'], + HealthyThreshold=self.health_check['healthy_threshold'], + ) + + def ensure_ok(self): + """Create the ELB""" + if not self.elb: + try: + self._create_elb() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to create load balancer") + try: + self.elb_attributes = self._get_elb_attributes() + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Unable to describe load balancer attributes') + self._wait_created() + + # Some attributes are configured on creation, others need to be updated + # after creation. Skip updates for those set on creation + else: + if self._check_scheme(): + # XXX We should probably set 'None' parameters based on the + # current state prior to deletion + + # the only way to change the scheme is by recreating the resource + self.ensure_gone() + # We need to wait for it to be gone-gone + self._wait_gone(True) + try: + self._create_elb() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to recreate load balancer") + try: + self.elb_attributes = self._get_elb_attributes() + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Unable to describe load balancer attributes') + else: + self._set_subnets() + self._set_zones() + self._set_security_groups() + self._set_elb_listeners() + self._set_tags() + + self._set_health_check() + self._set_elb_attributes() + self._set_backend_policies() + self._set_stickiness_policies() + self._set_instance_ids() + +# if self._check_attribute_support('access_log'): +# self._set_access_log() + + def ensure_gone(self): + """Destroy the ELB""" + if self.elb: + try: + self._delete_elb() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to delete load balancer") + self._wait_gone() + + def _wait_gone(self, wait=None): + if not wait and not self.wait: + return + try: + self._wait_for_elb_removed() + # Unfortunately even though the ELB itself is removed quickly + # the interfaces take longer so reliant security groups cannot + # be deleted until the interface has registered as removed. + self._wait_for_elb_interface_removed() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed while waiting for load balancer deletion") + + def _wait_created(self, wait=False): + if not wait and not self.wait: + return + try: + self._wait_for_elb_created() + # Can take longer than creation + self._wait_for_elb_interface_created() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed while waiting for load balancer deletion") + + def get_load_balancer(self): + self._update_descriptions() + elb = dict(self.elb or {}) + if not elb: + return {} + + elb['LoadBalancerAttributes'] = self.elb_attributes + elb['LoadBalancerPolicies'] = self.elb_policies + load_balancer = camel_dict_to_snake_dict(elb) + try: + load_balancer['tags'] = self._get_tags() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to get load balancer tags") + + return load_balancer + + def get_info(self): + self._update_descriptions() + + if not self.elb: + return dict( + name=self.name, + status=self.status, + region=self.module.region + ) + check_elb = dict(self.elb) + check_elb_attrs = dict(self.elb_attributes or {}) + check_policies = check_elb.get('Policies', {}) + try: + lb_cookie_policy = check_policies['LBCookieStickinessPolicies'][0]['PolicyName'] + except (KeyError, IndexError): + lb_cookie_policy = None + try: + app_cookie_policy = check_policies['AppCookieStickinessPolicies'][0]['PolicyName'] + except (KeyError, IndexError): + app_cookie_policy = None + + health_check = camel_dict_to_snake_dict(check_elb.get('HealthCheck', {})) + + backend_policies = list() + for port, policies in self._get_backend_policies().items(): + for policy in policies: + backend_policies.append("{0}:{1}".format(port, policy)) + + info = dict( + name=check_elb.get('LoadBalancerName'), + dns_name=check_elb.get('DNSName'), + zones=check_elb.get('AvailabilityZones'), + security_group_ids=check_elb.get('SecurityGroups'), + status=self.status, + subnets=check_elb.get('Subnets'), + scheme=check_elb.get('Scheme'), + hosted_zone_name=check_elb.get('CanonicalHostedZoneName'), + hosted_zone_id=check_elb.get('CanonicalHostedZoneNameID'), + lb_cookie_policy=lb_cookie_policy, + app_cookie_policy=app_cookie_policy, + proxy_policy=self._get_proxy_protocol_policy(), + backends=backend_policies, + instances=self._get_instance_ids(), + out_of_service_count=0, + in_service_count=0, + unknown_instance_state_count=0, + region=self.module.region, + health_check=health_check, + ) + + instance_health = camel_dict_to_snake_dict(dict(InstanceHealth=self.elb_health)) + info.update(instance_health) + + # instance state counts: InService or OutOfService + if info['instance_health']: + for instance_state in info['instance_health']: + if instance_state['state'] == "InService": + info['in_service_count'] += 1 + elif instance_state['state'] == "OutOfService": + info['out_of_service_count'] += 1 + else: + info['unknown_instance_state_count'] += 1 + + listeners = check_elb.get('ListenerDescriptions', []) + if listeners: + info['listeners'] = list( + self._api_listener_as_tuple(l['Listener']) for l in listeners + ) + else: + info['listeners'] = [] + + try: + info['connection_draining_timeout'] = check_elb_attrs['ConnectionDraining']['Timeout'] + except KeyError: + pass + try: + info['idle_timeout'] = check_elb_attrs['ConnectionSettings']['IdleTimeout'] + except KeyError: + pass + try: + is_enabled = check_elb_attrs['CrossZoneLoadBalancing']['Enabled'] + info['cross_az_load_balancing'] = 'yes' if is_enabled else 'no' + except KeyError: + pass + + # # return stickiness info? + + try: + info['tags'] = self._get_tags() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to get load balancer tags") + + return info + + @property + def _waiter_config(self): + delay = min(10, self.wait_timeout) + max_attempts = (self.wait_timeout // delay) + return {'Delay': delay, 'MaxAttempts': max_attempts} + + def _wait_for_elb_created(self): + if self.check_mode: + return True + + waiter = get_waiter(self.client, 'load_balancer_created') + + try: + waiter.wait( + WaiterConfig=self._waiter_config, + LoadBalancerNames=[self.name], + ) + except botocore.exceptions.WaiterError as e: + self.module.fail_json_aws(e, 'Timeout waiting for ELB removal') + + return True + + def _wait_for_elb_interface_created(self): + if self.check_mode: + return True + waiter = get_waiter(self.ec2_client, 'network_interface_available') + + filters = ansible_dict_to_boto3_filter_list( + {'requester-id': 'amazon-elb', + 'description': 'ELB {0}'.format(self.name)} + ) + + try: + waiter.wait( + WaiterConfig=self._waiter_config, + Filters=filters, + ) + except botocore.exceptions.WaiterError as e: + self.module.fail_json_aws(e, 'Timeout waiting for ELB Interface removal') + + return True + + def _wait_for_elb_removed(self): + if self.check_mode: + return True + + waiter = get_waiter(self.client, 'load_balancer_deleted') + + try: + waiter.wait( + WaiterConfig=self._waiter_config, + LoadBalancerNames=[self.name], + ) + except botocore.exceptions.WaiterError as e: + self.module.fail_json_aws(e, 'Timeout waiting for ELB removal') + + return True + + def _wait_for_elb_interface_removed(self): + if self.check_mode: + return True + + waiter = get_waiter(self.ec2_client, 'network_interface_deleted') + + filters = ansible_dict_to_boto3_filter_list( + {'requester-id': 'amazon-elb', + 'description': 'ELB {0}'.format(self.name)} + ) + + try: + waiter.wait( + WaiterConfig=self._waiter_config, + Filters=filters, + ) + except botocore.exceptions.WaiterError as e: + self.module.fail_json_aws(e, 'Timeout waiting for ELB Interface removal') + + return True + + def _wait_for_instance_state(self, waiter_name, instances): + if not instances: + return False + + if self.check_mode: + return True + + waiter = get_waiter(self.client, waiter_name) + + instance_list = list(dict(InstanceId=instance) for instance in instances) + + try: + waiter.wait( + WaiterConfig=self._waiter_config, + LoadBalancerName=self.name, + Instances=instance_list, + ) + except botocore.exceptions.WaiterError as e: + self.module.fail_json_aws(e, 'Timeout waiting for ELB Instance State') + + return True + + def _create_elb_listeners(self, listeners): + """Takes a list of listener definitions and creates them""" + if not listeners: + return False + self.changed = True + if self.check_mode: + return True + + self.client.create_load_balancer_listeners( + aws_retry=True, + LoadBalancerName=self.name, + Listeners=listeners, + ) + return True + + def _delete_elb_listeners(self, ports): + """Takes a list of listener ports and deletes them from the ELB""" + if not ports: + return False + self.changed = True + if self.check_mode: + return True + + self.client.delete_load_balancer_listeners( + aws_retry=True, + LoadBalancerName=self.name, + LoadBalancerPorts=ports, + ) + return True + + def _set_elb_listeners(self): + """ + Creates listeners specified by self.listeners; overwrites existing + listeners on these ports; removes extraneous listeners + """ + + if not self.listeners: + return False + + # We can't use sets here: dicts aren't hashable, so convert to the boto3 + # format and use a generator to filter + new_listeners = list(self._format_listener(l, True) for l in self.listeners) + existing_listeners = list(l['Listener'] for l in self.elb['ListenerDescriptions']) + listeners_to_remove = list(l for l in existing_listeners if l not in new_listeners) + listeners_to_add = list(l for l in new_listeners if l not in existing_listeners) + + changed = False + + if self.purge_listeners: + ports_to_remove = list(l['LoadBalancerPort'] for l in listeners_to_remove) + else: + old_ports = set(l['LoadBalancerPort'] for l in listeners_to_remove) + new_ports = set(l['LoadBalancerPort'] for l in listeners_to_add) + # If we're not purging, then we need to remove Listeners + # where the full definition doesn't match, but the port does + ports_to_remove = list(old_ports & new_ports) + + # Update is a delete then add, so do the deletion first + try: + changed |= self._delete_elb_listeners(ports_to_remove) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to remove listeners from load balancer") + try: + changed |= self._create_elb_listeners(listeners_to_add) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to remove listeners from load balancer") + + return changed + + def _api_listener_as_tuple(self, listener): + """Adds ssl_certificate_id to ELB API tuple if present""" + base_tuple = [ + listener.get('LoadBalancerPort'), + listener.get('InstancePort'), + listener.get('Protocol'), + listener.get('InstanceProtocol'), + ] + if listener.get('SSLCertificateId', False): + base_tuple.append(listener.get('SSLCertificateId')) + return tuple(base_tuple) + + def _attach_subnets(self, subnets): + if not subnets: + return False + self.changed = True + if self.check_mode: + return True + self.client.attach_load_balancer_to_subnets( + aws_retry=True, + LoadBalancerName=self.name, + Subnets=subnets) + return True + + def _detach_subnets(self, subnets): + if not subnets: + return False + self.changed = True + if self.check_mode: + return True + self.client.detach_load_balancer_from_subnets( + aws_retry=True, + LoadBalancerName=self.name, + Subnets=subnets) + return True + + def _set_subnets(self): + """Determine which subnets need to be attached or detached on the ELB""" + # Subnets parameter not set, nothing to change + if self.subnets is None: + return False + + changed = False + + if self.purge_subnets: + subnets_to_detach = list(set(self.elb['Subnets']) - set(self.subnets)) + else: + subnets_to_detach = list() + subnets_to_attach = list(set(self.subnets) - set(self.elb['Subnets'])) + + # You can't add multiple subnets from the same AZ. Remove first, then + # add. + try: + changed |= self._detach_subnets(subnets_to_detach) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to detach subnets from load balancer") + try: + changed |= self._attach_subnets(subnets_to_attach) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to attach subnets to load balancer") + + return changed + + def _check_scheme(self): + """Determine if the current scheme is different than the scheme of the ELB""" + if self.scheme: + if self.elb['Scheme'] != self.scheme: + return True + return False + + def _enable_zones(self, zones): + if not zones: + return False + self.changed = True + if self.check_mode: + return True + + try: + self.client.enable_availability_zones_for_load_balancer( + aws_retry=True, + LoadBalancerName=self.name, + AvailabilityZones=zones, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg='Failed to enable zones for load balancer') + return True + + def _disable_zones(self, zones): + if not zones: + return False + self.changed = True + if self.check_mode: + return True + + try: + self.client.disable_availability_zones_for_load_balancer( + aws_retry=True, + LoadBalancerName=self.name, + AvailabilityZones=zones, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg='Failed to disable zones for load balancer') + return True + + def _set_zones(self): + """Determine which zones need to be enabled or disabled on the ELB""" + # zones parameter not set, nothing to changeA + if self.zones is None: + return False + + changed = False + + if self.purge_zones: + zones_to_disable = list(set(self.elb['AvailabilityZones']) - set(self.zones)) + else: + zones_to_disable = list() + zones_to_enable = list(set(self.zones) - set(self.elb['AvailabilityZones'])) + + # Add before we remove to reduce the chance of an outage if someone + # replaces all zones at once + try: + changed |= self._enable_zones(zones_to_enable) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to enable zone on load balancer") + try: + changed |= self._disable_zones(zones_to_disable) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to attach zone to load balancer") + + return changed + + def _set_security_groups(self): + if not self.security_group_ids: + return False + # Security Group Names should already by converted to IDs by this point. + if set(self.elb['SecurityGroups']) == set(self.security_group_ids): + return False + + self.changed = True + + if self.check_mode: + return True + + try: + self.client.apply_security_groups_to_load_balancer( + aws_retry=True, + LoadBalancerName=self.name, + SecurityGroups=self.security_group_ids, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to apply security groups to load balancer") + return True + + def _set_health_check(self): + if not self.health_check: + return False + + """Set health check values on ELB as needed""" + health_check_config = self._format_healthcheck() + + if self.elb and health_check_config == self.elb['HealthCheck']: + return False + + self.changed = True + if self.check_mode: + return True + try: + self.client.configure_health_check( + aws_retry=True, + LoadBalancerName=self.name, + HealthCheck=health_check_config, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to apply healthcheck to load balancer") + + return True + + def _set_elb_attributes(self): + attributes = {} + if self.cross_az_load_balancing is not None: + attr = dict(Enabled=self.cross_az_load_balancing) + if not self.elb_attributes.get('CrossZoneLoadBalancing', None) == attr: + attributes['CrossZoneLoadBalancing'] = attr + + if self.idle_timeout is not None: + attr = dict(IdleTimeout=self.idle_timeout) + if not self.elb_attributes.get('ConnectionSettings', None) == attr: + attributes['ConnectionSettings'] = attr + + if self.connection_draining_timeout is not None: + curr_attr = dict(self.elb_attributes.get('ConnectionDraining', {})) + if self.connection_draining_timeout == 0: + attr = dict(Enabled=False) + curr_attr.pop('Timeout', None) + else: + attr = dict(Enabled=True, Timeout=self.connection_draining_timeout) + if not curr_attr == attr: + attributes['ConnectionDraining'] = attr + + if self.access_logs is not None: + curr_attr = dict(self.elb_attributes.get('AccessLog', {})) + # For disabling we only need to compare and pass 'Enabled' + if not self.access_logs.get('enabled'): + curr_attr = dict(Enabled=curr_attr.get('Enabled', False)) + attr = dict(Enabled=self.access_logs.get('enabled')) + else: + attr = dict( + Enabled=True, + S3BucketName=self.access_logs['s3_location'], + S3BucketPrefix=self.access_logs.get('s3_prefix', ''), + EmitInterval=self.access_logs.get('interval', 60), + ) + if not curr_attr == attr: + attributes['AccessLog'] = attr + + if not attributes: + return False + + self.changed = True + if self.check_mode: + return True + + try: + self.client.modify_load_balancer_attributes( + aws_retry=True, + LoadBalancerName=self.name, + LoadBalancerAttributes=attributes + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to apply load balancer attrbutes") + + def _proxy_policy_name(self): + return 'ProxyProtocol-policy' + + def _policy_name(self, policy_type): + return 'ec2-elb-lb-{0}'.format(policy_type) + + def _get_listener_policies(self): + """Get a list of listener policies mapped to the LoadBalancerPort""" + if not self.elb: + return {} + listener_descriptions = self.elb.get('ListenerDescriptions', []) + policies = {l['LoadBalancerPort']: l['PolicyNames'] for l in listener_descriptions} + return policies + + def _set_listener_policies(self, port, policies): + self.changed = True + if self.check_mode: + return True + + try: + self.client.set_load_balancer_policies_of_listener( + aws_retry=True, + LoadBalancerName=self.name, + LoadBalancerPort=port, + PolicyNames=list(policies), + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to set load balancer listener policies", + port=port, policies=policies) + + return True + + def _get_stickiness_policies(self): + """Get a list of AppCookieStickinessPolicyType and LBCookieStickinessPolicyType policies""" + return list(p['PolicyName'] for p in self.elb_policies if p['PolicyTypeName'] in ['AppCookieStickinessPolicyType', 'LBCookieStickinessPolicyType']) + + def _get_app_stickness_policy_map(self): + """Get a mapping of App Cookie Stickiness policy names to their definitions""" + policies = self.elb.get('Policies', {}).get('AppCookieStickinessPolicies', []) + return {p['PolicyName']: p for p in policies} + + def _get_lb_stickness_policy_map(self): + """Get a mapping of LB Cookie Stickiness policy names to their definitions""" + policies = self.elb.get('Policies', {}).get('LBCookieStickinessPolicies', []) + return {p['PolicyName']: p for p in policies} + + def _purge_stickiness_policies(self): + """Removes all stickiness policies from all Load Balancers""" + # Used when purging stickiness policies or updating a policy (you can't + # update a policy while it's connected to a Listener) + stickiness_policies = set(self._get_stickiness_policies()) + listeners = self.elb['ListenerDescriptions'] + changed = False + for listener in listeners: + port = listener['Listener']['LoadBalancerPort'] + policies = set(listener['PolicyNames']) + new_policies = set(policies - stickiness_policies) + if policies != new_policies: + changed |= self._set_listener_policies(port, new_policies) + + return changed + + def _set_stickiness_policies(self): + if self.stickiness is None: + return False + + # Make sure that the list of policies and listeners is up to date, we're + # going to make changes to all listeners + self._update_descriptions() + + if not self.stickiness['enabled']: + return self._purge_stickiness_policies() + + if self.stickiness['type'] == 'loadbalancer': + policy_name = self._policy_name('LBCookieStickinessPolicyType') + expiration = self.stickiness.get('expiration') + if not expiration: + expiration = 0 + policy_description = dict( + PolicyName=policy_name, + CookieExpirationPeriod=expiration, + ) + existing_policies = self._get_lb_stickness_policy_map() + add_method = self.client.create_lb_cookie_stickiness_policy + elif self.stickiness['type'] == 'application': + policy_name = self._policy_name('AppCookieStickinessPolicyType') + policy_description = dict( + PolicyName=policy_name, + CookieName=self.stickiness.get('cookie', 0) + ) + existing_policies = self._get_app_stickness_policy_map() + add_method = self.client.create_app_cookie_stickiness_policy + else: + # We shouldn't get here... + self.module.fail_json( + msg='Unknown stickiness policy {0}'.format( + self.stickiness['type'] + ) + ) + + changed = False + # To update a policy we need to delete then re-add, and we can only + # delete if the policy isn't attached to a listener + if policy_name in existing_policies: + if existing_policies[policy_name] != policy_description: + changed |= self._purge_stickiness_policies() + + if changed: + self._update_descriptions() + + changed |= self._set_stickiness_policy( + method=add_method, + description=policy_description, + existing_policies=existing_policies, + ) + + listeners = self.elb['ListenerDescriptions'] + for listener in listeners: + changed |= self._set_lb_stickiness_policy( + listener=listener, + policy=policy_name + ) + return changed + + def _delete_loadbalancer_policy(self, policy_name): + self.changed = True + if self.check_mode: + return True + + try: + self.client.delete_load_balancer_policy( + LoadBalancerName=self.name, + PolicyName=policy_name, + ) + except is_boto3_error_code('InvalidConfigurationRequest'): + # Already deleted + return False + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Failed to load balancer policy {0}".format(policy_name)) + return True + + def _set_stickiness_policy(self, method, description, existing_policies=None): + changed = False + if existing_policies: + policy_name = description['PolicyName'] + if policy_name in existing_policies: + if existing_policies[policy_name] == description: + return False + if existing_policies[policy_name] != description: + changed |= self._delete_loadbalancer_policy(policy_name) + + self.changed = True + changed = True + + if self.check_mode: + return changed + + # This needs to be in place for comparisons, but not passed to the + # method. + if not description.get('CookieExpirationPeriod', None): + description.pop('CookieExpirationPeriod', None) + + try: + method( + aws_retry=True, + LoadBalancerName=self.name, + **description + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to create load balancer stickiness policy", + description=description) + return changed + + def _set_lb_stickiness_policy(self, listener, policy): + port = listener['Listener']['LoadBalancerPort'] + stickiness_policies = set(self._get_stickiness_policies()) + changed = False + + policies = set(listener['PolicyNames']) + new_policies = list(policies - stickiness_policies) + new_policies.append(policy) + + if policies != set(new_policies): + changed |= self._set_listener_policies(port, new_policies) + + return changed + + def _get_backend_policies(self): + """Get a list of backend policies mapped to the InstancePort""" + if not self.elb: + return {} + server_descriptions = self.elb.get('BackendServerDescriptions', []) + policies = {b['InstancePort']: b['PolicyNames'] for b in server_descriptions} + return policies + + def _get_proxy_protocol_policy(self): + """Returns the name of the name of the ProxyPolicy if created""" + all_proxy_policies = self._get_proxy_policies() + if not all_proxy_policies: + return None + if len(all_proxy_policies) == 1: + return all_proxy_policies[0] + return all_proxy_policies + + def _get_proxy_policies(self): + """Get a list of ProxyProtocolPolicyType policies""" + return list(p['PolicyName'] for p in self.elb_policies if p['PolicyTypeName'] == 'ProxyProtocolPolicyType') + + def _get_policy_map(self): + """Get a mapping of Policy names to their definitions""" + return {p['PolicyName']: p for p in self.elb_policies} + + def _set_backend_policies(self): + """Sets policies for all backends""" + # Currently only supports setting ProxyProtocol policies + if not self.listeners: + return False + + backend_policies = self._get_backend_policies() + proxy_policies = set(self._get_proxy_policies()) + + proxy_ports = dict() + for listener in self.listeners: + proxy_protocol = listener.get('proxy_protocol', None) + # Only look at the listeners for which proxy_protocol is defined + if proxy_protocol is None: + next + instance_port = listener.get('instance_port') + if proxy_ports.get(instance_port, None) is not None: + if proxy_ports[instance_port] != proxy_protocol: + self.module.fail_json_aws( + 'proxy_protocol set to conflicting values for listeners' + ' on port {0}'.format(instance_port)) + proxy_ports[instance_port] = proxy_protocol + + if not proxy_ports: + return False + + changed = False + + # If anyone's set proxy_protocol to true, make sure we have our policy + # in place. + proxy_policy_name = self._proxy_policy_name() + if any(proxy_ports.values()): + changed |= self._set_proxy_protocol_policy(proxy_policy_name) + + for port in proxy_ports: + current_policies = set(backend_policies.get(port, [])) + new_policies = list(current_policies - proxy_policies) + if proxy_ports[port]: + new_policies.append(proxy_policy_name) + + changed |= self._set_backend_policy(port, new_policies) + + return changed + + def _set_backend_policy(self, port, policies): + backend_policies = self._get_backend_policies() + current_policies = set(backend_policies.get(port, [])) + + if current_policies == set(policies): + return False + + self.changed = True + + if self.check_mode: + return True + + try: + self.client.set_load_balancer_policies_for_backend_server( + aws_retry=True, + LoadBalancerName=self.name, + InstancePort=port, + PolicyNames=policies, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to set load balancer backend policies", + port=port, policies=policies) + + return True + + def _set_proxy_protocol_policy(self, policy_name): + """Install a proxy protocol policy if needed""" + policy_map = self._get_policy_map() + + policy_attributes = [dict(AttributeName='ProxyProtocol', AttributeValue='true')] + + proxy_policy = dict( + PolicyName=policy_name, + PolicyTypeName='ProxyProtocolPolicyType', + PolicyAttributeDescriptions=policy_attributes, + ) + + existing_policy = policy_map.get(policy_name) + if proxy_policy == existing_policy: + return False + + if existing_policy is not None: + self.module.fail_json( + msg="Unable to configure ProxyProtocol policy. " + "Policy with name {0} already exists and doesn't match.".format(policy_name), + policy=proxy_policy, existing_policy=existing_policy, + ) + + proxy_policy['PolicyAttributes'] = proxy_policy.pop('PolicyAttributeDescriptions') + proxy_policy['LoadBalancerName'] = self.name + self.changed = True + + if self.check_mode: + return True + + try: + self.client.create_load_balancer_policy( + aws_retry=True, + **proxy_policy + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to create load balancer policy", policy=proxy_policy) + + return True + + def _get_instance_ids(self): + """Get the current list of instance ids installed in the elb""" + elb = self.elb or {} + return list(i['InstanceId'] for i in elb.get('Instances', [])) + + def _change_instances(self, method, instances): + if not instances: + return False + + self.changed = True + if self.check_mode: + return True + + instance_id_list = list({'InstanceId': i} for i in instances) + try: + method( + aws_retry=True, + LoadBalancerName=self.name, + Instances=instance_id_list, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to change instance registration", + instances=instance_id_list, name=self.name) + return True + + def _set_instance_ids(self): + """Register or deregister instances from an lb instance""" + new_instances = self.instance_ids or [] + existing_instances = self._get_instance_ids() + + instances_to_add = set(new_instances) - set(existing_instances) + if self.purge_instance_ids: + instances_to_remove = set(existing_instances) - set(new_instances) + else: + instances_to_remove = [] + + changed = False + + changed |= self._change_instances(self.client.register_instances_with_load_balancer, + instances_to_add) + if self.wait: + self._wait_for_instance_state('instance_in_service', list(instances_to_add)) + changed |= self._change_instances(self.client.deregister_instances_from_load_balancer, + instances_to_remove) + if self.wait: + self._wait_for_instance_state('instance_deregistered', list(instances_to_remove)) + + return changed + + def _get_tags(self): + tags = self.client.describe_tags(aws_retry=True, + LoadBalancerNames=[self.name]) + if not tags: + return {} + try: + tags = tags['TagDescriptions'][0]['Tags'] + except (KeyError, TypeError): + return {} + return boto3_tag_list_to_ansible_dict(tags) + + def _add_tags(self, tags_to_set): + if not tags_to_set: + return False + self.changed = True + if self.check_mode: + return True + tags_to_add = ansible_dict_to_boto3_tag_list(tags_to_set) + self.client.add_tags(LoadBalancerNames=[self.name], Tags=tags_to_add) + return True + + def _remove_tags(self, tags_to_unset): + if not tags_to_unset: + return False + self.changed = True + if self.check_mode: + return True + tags_to_remove = [dict(Key=tagkey) for tagkey in tags_to_unset] + self.client.remove_tags(LoadBalancerNames=[self.name], Tags=tags_to_remove) + return True + + def _set_tags(self): + """Add/Delete tags""" + if self.tags is None: + return False + + try: + current_tags = self._get_tags() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to get load balancer tags") + + tags_to_set, tags_to_unset = compare_aws_tags(current_tags, self.tags, + self.purge_tags) + + changed = False + try: + changed |= self._remove_tags(tags_to_unset) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to remove load balancer tags") + try: + changed |= self._add_tags(tags_to_set) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to add load balancer tags") + + return changed + + def _validate_stickiness(self, stickiness): + problem_found = False + if not stickiness: + return problem_found + if not stickiness['enabled']: + return problem_found + if stickiness['type'] == 'application': + if not stickiness.get('cookie'): + problem_found = True + self.module.fail_json( + msg='cookie must be specified when stickiness type is "application"', + stickiness=stickiness, + ) + if stickiness.get('expiration'): + self.warn( + msg='expiration is ignored when stickiness type is "application"',) + if stickiness['type'] == 'loadbalancer': + if stickiness.get('cookie'): + self.warn( + msg='cookie is ignored when stickiness type is "loadbalancer"',) + return problem_found + + def _validate_access_logs(self, access_logs): + problem_found = False + if not access_logs: + return problem_found + if not access_logs['enabled']: + return problem_found + if not access_logs.get('s3_location', None): + problem_found = True + self.module.fail_json( + msg='s3_location must be provided when access_logs.state is "present"') + return problem_found + + def _validate_creation_requirements(self): + if self.elb: + return False + problem_found = False + if not self.subnets and not self.zones: + problem_found = True + self.module.fail_json( + msg='One of subnets or zones must be provided when creating an ELB') + if not self.listeners: + problem_found = True + self.module.fail_json( + msg='listeners must be provided when creating an ELB') + return problem_found + + def _validate_listeners(self, listeners): + if not listeners: + return False + return any(self._validate_listener(listener) for listener in listeners) + + def _validate_listener(self, listener): + problem_found = False + if not listener: + return problem_found + for protocol in ['instance_protocol', 'protocol']: + value = listener.get(protocol, None) + problem = self._validate_protocol(value) + problem_found |= problem + if problem: + self.module.fail_json( + msg='Invalid protocol ({0}) in listener'.format(value), + listener=listener) + return problem_found + + def _validate_health_check(self, health_check): + if not health_check: + return False + protocol = health_check['ping_protocol'] + if self._validate_protocol(protocol): + self.module.fail_json( + msg='Invalid protocol ({0}) defined in health check'.format(protocol), + health_check=health_check,) + if protocol.upper() in ['HTTP', 'HTTPS']: + if not health_check['ping_path']: + self.module.fail_json( + msg='For HTTP and HTTPS health checks a ping_path must be provided', + health_check=health_check,) + return False + + def _validate_protocol(self, protocol): + if not protocol: + return False + return protocol.upper() not in ['HTTP', 'HTTPS', 'TCP', 'SSL'] + + @AWSRetry.jittered_backoff() + def _describe_loadbalancer(self, lb_name): + paginator = self.client.get_paginator('describe_load_balancers') + return paginator.paginate(LoadBalancerNames=[lb_name]).build_full_result()['LoadBalancerDescriptions'] + + def _get_vpc_from_subnets(self, subnets): + if not subnets: + return None + + subnet_details = self._describe_subnets(list(subnets)) + vpc_ids = set(subnet['VpcId'] for subnet in subnet_details) + + if not vpc_ids: + return None + if len(vpc_ids) > 1: + self.module.fail_json("Subnets for an ELB may not span multiple VPCs", + subnets=subnet_details, vpc_ids=vpc_ids) + return vpc_ids.pop() + + @AWSRetry.jittered_backoff() + def _describe_subnets(self, subnet_ids): + paginator = self.ec2_client.get_paginator('describe_subnets') + return paginator.paginate(SubnetIds=subnet_ids).build_full_result()['Subnets'] + + # Wrap it so we get the backoff + @AWSRetry.jittered_backoff() + def _get_ec2_security_group_ids_from_names(self, **params): + return get_ec2_security_group_ids_from_names(ec2_connection=self.ec2_client, **params) + + +def main(): + + access_log_spec = dict( + enabled=dict(required=False, type='bool', default=True), + s3_location=dict(required=False, type='str'), + s3_prefix=dict(required=False, type='str', default=""), + interval=dict(required=False, type='int', default=60, choices=[5, 60]), + ) + + stickiness_spec = dict( + type=dict(required=False, type='str', choices=['application', 'loadbalancer']), + enabled=dict(required=False, type='bool', default=True), + cookie=dict(required=False, type='str'), + expiration=dict(required=False, type='int') + ) + + healthcheck_spec = dict( + ping_protocol=dict(required=True, type='str'), + ping_path=dict(required=False, type='str'), + ping_port=dict(required=True, type='int'), + interval=dict(required=True, type='int'), + timeout=dict(aliases=['response_timeout'], required=True, type='int'), + unhealthy_threshold=dict(required=True, type='int'), + healthy_threshold=dict(required=True, type='int'), + ) + + listeners_spec = dict( + load_balancer_port=dict(required=True, type='int'), + instance_port=dict(required=True, type='int'), + ssl_certificate_id=dict(required=False, type='str'), + protocol=dict(required=True, type='str'), + instance_protocol=dict(required=False, type='str'), + proxy_protocol=dict(required=False, type='bool'), + ) + + argument_spec = dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True), + listeners=dict(type='list', elements='dict', options=listeners_spec), + purge_listeners=dict(default=True, type='bool'), + instance_ids=dict(type='list', elements='str'), + purge_instance_ids=dict(default=False, type='bool'), + zones=dict(type='list', elements='str'), + purge_zones=dict(default=False, type='bool'), + security_group_ids=dict(type='list', elements='str'), + security_group_names=dict(type='list', elements='str'), + health_check=dict(type='dict', options=healthcheck_spec), + subnets=dict(type='list', elements='str'), + purge_subnets=dict(default=False, type='bool'), + scheme=dict(choices=['internal', 'internet-facing']), + connection_draining_timeout=dict(type='int'), + idle_timeout=dict(type='int'), + cross_az_load_balancing=dict(type='bool'), + stickiness=dict(type='dict', options=stickiness_spec), + access_logs=dict(type='dict', options=access_log_spec), + wait=dict(default=False, type='bool'), + wait_timeout=dict(default=180, type='int'), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, type='bool'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['security_group_ids', 'security_group_names'], + ['zones', 'subnets'], + ], + supports_check_mode=True, + ) + + wait_timeout = module.params['wait_timeout'] + state = module.params['state'] + + if wait_timeout > 600: + module.fail_json(msg='wait_timeout maximum is 600 seconds') + + elb_man = ElbManager(module) + elb_man.validate_params(state) + + if state == 'present': + elb_man.ensure_ok() + # original boto style + elb = elb_man.get_info() + # boto3 style + lb = elb_man.get_load_balancer() + ec2_result = dict(elb=elb, load_balancer=lb) + elif state == 'absent': + elb_man.ensure_gone() + # original boto style + elb = elb_man.get_info() + ec2_result = dict(elb=elb) + + module.exit_json( + changed=elb_man.changed, + **ec2_result, + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_policy.py b/ansible_collections/amazon/aws/plugins/modules/iam_policy.py new file mode 100644 index 000000000..8eef40304 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_policy.py @@ -0,0 +1,351 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: iam_policy +version_added: 5.0.0 +short_description: Manage inline IAM policies for users, groups, and roles +description: + - Allows uploading or removing inline IAM policies for IAM users, groups or roles. + - To administer managed policies please see M(community.aws.iam_user), M(community.aws.iam_role), + M(community.aws.iam_group) and M(community.aws.iam_managed_policy) + - This module was originally added to C(community.aws) in release 1.0.0. +options: + iam_type: + description: + - Type of IAM resource. + required: true + choices: [ "user", "group", "role"] + type: str + iam_name: + description: + - Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name. + required: true + type: str + policy_name: + description: + - The name label for the policy to create or remove. + required: true + type: str + policy_json: + description: + - A properly json formatted policy as string. + type: json + state: + description: + - Whether to create or delete the IAM policy. + choices: [ "present", "absent"] + default: present + type: str + skip_duplicates: + description: + - When I(skip_duplicates=true) the module looks for any policies that match the document you pass in. + If there is a match it will not make a new policy object with the same rules. + default: false + type: bool + +author: + - "Jonathan I. Davila (@defionscode)" + - "Dennis Podkovyrin (@sbj-ss)" +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 +''' + +EXAMPLES = ''' +# Advanced example, create two new groups and add a READ-ONLY policy to both +# groups. +- name: Create Two Groups, Mario and Luigi + community.aws.iam_group: + name: "{{ item }}" + state: present + loop: + - Mario + - Luigi + register: new_groups + +- name: Apply READ-ONLY policy to new groups that have been recently created + amazon.aws.iam_policy: + iam_type: group + iam_name: "{{ item.iam_group.group.group_name }}" + policy_name: "READ-ONLY" + policy_json: "{{ lookup('template', 'readonly.json.j2') }}" + state: present + loop: "{{ new_groups.results }}" + +# Create a new S3 policy with prefix per user +- name: Create S3 policy from template + amazon.aws.iam_policy: + iam_type: user + iam_name: "{{ item.user }}" + policy_name: "s3_limited_access_{{ item.prefix }}" + state: present + policy_json: "{{ lookup('template', 's3_policy.json.j2') }}" + loop: + - user: s3_user + prefix: s3_user_prefix + +''' +RETURN = ''' +policy_names: + description: A list of names of the inline policies embedded in the specified IAM resource (user, group, or role). + returned: always + type: list + elements: str +''' + +import json + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass + +from ansible.module_utils.six import string_types +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code + + +class PolicyError(Exception): + pass + + +class Policy: + + def __init__(self, client, name, policy_name, policy_json, skip_duplicates, state, check_mode): + self.client = client + self.name = name + self.policy_name = policy_name + self.policy_json = policy_json + self.skip_duplicates = skip_duplicates + self.state = state + self.check_mode = check_mode + self.changed = False + + self.original_policies = self.get_all_policies().copy() + self.updated_policies = {} + + @staticmethod + def _iam_type(): + return '' + + def _list(self, name): + return {} + + def list(self): + try: + return self._list(self.name).get('PolicyNames', []) + except is_boto3_error_code('AccessDenied'): + return [] + + def _get(self, name, policy_name): + return '{}' + + def get(self, policy_name): + try: + return self._get(self.name, policy_name)['PolicyDocument'] + except is_boto3_error_code('AccessDenied'): + return {} + + def _put(self, name, policy_name, policy_doc): + pass + + def put(self, policy_doc): + self.changed = True + + if self.check_mode: + return + + self._put(self.name, self.policy_name, json.dumps(policy_doc, sort_keys=True)) + + def _delete(self, name, policy_name): + pass + + def delete(self): + self.updated_policies = self.original_policies.copy() + + if self.policy_name not in self.list(): + self.changed = False + return + + self.changed = True + self.updated_policies.pop(self.policy_name, None) + + if self.check_mode: + return + + self._delete(self.name, self.policy_name) + + def get_policy_text(self): + try: + if self.policy_json is not None: + return self.get_policy_from_json() + except json.JSONDecodeError as e: + raise PolicyError('Failed to decode the policy as valid JSON: %s' % str(e)) + return None + + def get_policy_from_json(self): + if isinstance(self.policy_json, string_types): + pdoc = json.loads(self.policy_json) + else: + pdoc = self.policy_json + return pdoc + + def get_all_policies(self): + policies = {} + for pol in self.list(): + policies[pol] = self.get(pol) + return policies + + def create(self): + matching_policies = [] + policy_doc = self.get_policy_text() + policy_match = False + for pol in self.list(): + if not compare_policies(self.original_policies[pol], policy_doc): + matching_policies.append(pol) + policy_match = True + + self.updated_policies = self.original_policies.copy() + + if self.policy_name in matching_policies: + return + if self.skip_duplicates and policy_match: + return + + self.put(policy_doc) + self.updated_policies[self.policy_name] = policy_doc + + def run(self): + if self.state == 'present': + self.create() + elif self.state == 'absent': + self.delete() + return { + 'changed': self.changed, + self._iam_type() + '_name': self.name, + 'policies': self.list(), + 'policy_names': self.list(), + 'diff': dict( + before=self.original_policies, + after=self.updated_policies, + ), + } + + +class UserPolicy(Policy): + + @staticmethod + def _iam_type(): + return 'user' + + def _list(self, name): + return self.client.list_user_policies(aws_retry=True, UserName=name) + + def _get(self, name, policy_name): + return self.client.get_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name) + + def _put(self, name, policy_name, policy_doc): + return self.client.put_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name, PolicyDocument=policy_doc) + + def _delete(self, name, policy_name): + return self.client.delete_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name) + + +class RolePolicy(Policy): + + @staticmethod + def _iam_type(): + return 'role' + + def _list(self, name): + return self.client.list_role_policies(aws_retry=True, RoleName=name) + + def _get(self, name, policy_name): + return self.client.get_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name) + + def _put(self, name, policy_name, policy_doc): + return self.client.put_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name, PolicyDocument=policy_doc) + + def _delete(self, name, policy_name): + return self.client.delete_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name) + + +class GroupPolicy(Policy): + + @staticmethod + def _iam_type(): + return 'group' + + def _list(self, name): + return self.client.list_group_policies(aws_retry=True, GroupName=name) + + def _get(self, name, policy_name): + return self.client.get_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name) + + def _put(self, name, policy_name, policy_doc): + return self.client.put_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name, PolicyDocument=policy_doc) + + def _delete(self, name, policy_name): + return self.client.delete_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name) + + +def main(): + argument_spec = dict( + iam_type=dict(required=True, choices=['user', 'group', 'role']), + state=dict(default='present', choices=['present', 'absent']), + iam_name=dict(required=True), + policy_name=dict(required=True), + policy_json=dict(type='json', default=None, required=False), + skip_duplicates=dict(type='bool', default=False, required=False) + ) + required_if = [ + ('state', 'present', ('policy_json',), True), + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=required_if, + supports_check_mode=True + ) + + args = dict( + client=module.client('iam', retry_decorator=AWSRetry.jittered_backoff()), + name=module.params.get('iam_name'), + policy_name=module.params.get('policy_name'), + policy_json=module.params.get('policy_json'), + skip_duplicates=module.params.get('skip_duplicates'), + state=module.params.get('state'), + check_mode=module.check_mode, + ) + iam_type = module.params.get('iam_type') + + try: + if iam_type == 'user': + policy = UserPolicy(**args) + elif iam_type == 'role': + policy = RolePolicy(**args) + elif iam_type == 'group': + policy = GroupPolicy(**args) + + module.deprecate("The 'policies' return key is deprecated and will be replaced by 'policy_names'. Both values are returned for now.", + date='2024-08-01', collection_name='amazon.aws') + + module.exit_json(**(policy.run())) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e) + except PolicyError as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_policy_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_policy_info.py new file mode 100644 index 000000000..125f55e1f --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_policy_info.py @@ -0,0 +1,209 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: iam_policy_info +version_added: 5.0.0 +short_description: Retrieve inline IAM policies for users, groups, and roles +description: + - Supports fetching of inline IAM policies for IAM users, groups and roles. + - This module was originally added to C(community.aws) in release 1.0.0. +options: + iam_type: + description: + - Type of IAM resource you wish to retrieve inline policies for. + required: true + choices: [ "user", "group", "role"] + type: str + iam_name: + description: + - Name of IAM resource you wish to retrieve inline policies for. In other words, the user name, group name or role name. + required: true + type: str + policy_name: + description: + - Name of a specific IAM inline policy you with to retrieve. + required: false + type: str +author: + - Mark Chappell (@tremble) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 + +''' + +EXAMPLES = ''' +- name: Describe all inline IAM policies on an IAM User + amazon.aws.iam_policy_info: + iam_type: user + iam_name: example_user + +- name: Describe a specific inline policy on an IAM Role + amazon.aws.iam_policy_info: + iam_type: role + iam_name: example_role + policy_name: example_policy + +''' +RETURN = ''' +policies: + description: A list containing the matching IAM inline policy names and their data + returned: success + type: complex + contains: + policy_name: + description: The Name of the inline policy + returned: success + type: str + policy_document: + description: The JSON document representing the inline IAM policy + returned: success + type: list +policy_names: + description: A list of matching names of the IAM inline policies on the queried object + returned: success + type: list +all_policy_names: + description: A list of names of all of the IAM inline policies on the queried object + returned: success + type: list +''' + +try: + import botocore +except ImportError: + pass + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +class Policy: + + def __init__(self, client, name, policy_name): + self.client = client + self.name = name + self.policy_name = policy_name + self.changed = False + + @staticmethod + def _iam_type(): + return '' + + def _list(self, name): + return {} + + def list(self): + return self._list(self.name).get('PolicyNames', []) + + def _get(self, name, policy_name): + return '{}' + + def get(self, policy_name): + return self._get(self.name, policy_name)['PolicyDocument'] + + def get_all(self): + policies = list() + for policy in self.list(): + policies.append({"policy_name": policy, "policy_document": self.get(policy)}) + return policies + + def run(self): + policy_list = self.list() + ret_val = { + 'changed': False, + self._iam_type() + '_name': self.name, + 'all_policy_names': policy_list + } + if self.policy_name is None: + ret_val.update(policies=self.get_all()) + ret_val.update(policy_names=policy_list) + elif self.policy_name in policy_list: + ret_val.update(policies=[{ + "policy_name": self.policy_name, + "policy_document": self.get(self.policy_name)}]) + ret_val.update(policy_names=[self.policy_name]) + return ret_val + + +class UserPolicy(Policy): + + @staticmethod + def _iam_type(): + return 'user' + + def _list(self, name): + return self.client.list_user_policies(aws_retry=True, UserName=name) + + def _get(self, name, policy_name): + return self.client.get_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name) + + +class RolePolicy(Policy): + + @staticmethod + def _iam_type(): + return 'role' + + def _list(self, name): + return self.client.list_role_policies(aws_retry=True, RoleName=name) + + def _get(self, name, policy_name): + return self.client.get_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name) + + +class GroupPolicy(Policy): + + @staticmethod + def _iam_type(): + return 'group' + + def _list(self, name): + return self.client.list_group_policies(aws_retry=True, GroupName=name) + + def _get(self, name, policy_name): + return self.client.get_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name) + + +def main(): + argument_spec = dict( + iam_type=dict(required=True, choices=['user', 'group', 'role']), + iam_name=dict(required=True), + policy_name=dict(default=None, required=False), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + args = dict( + client=module.client('iam', retry_decorator=AWSRetry.jittered_backoff()), + name=module.params.get('iam_name'), + policy_name=module.params.get('policy_name'), + ) + iam_type = module.params.get('iam_type') + + try: + if iam_type == 'user': + policy = UserPolicy(**args) + elif iam_type == 'role': + policy = RolePolicy(**args) + elif iam_type == 'group': + policy = GroupPolicy(**args) + + module.exit_json(**(policy.run())) + except is_boto3_error_code('NoSuchEntity') as e: + module.exit_json(changed=False, msg=e.response['Error']['Message']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_user.py b/ansible_collections/amazon/aws/plugins/modules/iam_user.py new file mode 100644 index 000000000..a4e056c0e --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_user.py @@ -0,0 +1,581 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: iam_user +version_added: 5.0.0 +short_description: Manage AWS IAM users +description: + - A module to manage AWS IAM users. + - The module does not manage groups that users belong to, groups memberships can be managed using M(community.aws.iam_group). + - This module was originally added to C(community.aws) in release 1.0.0. +author: + - Josh Souza (@joshsouza) +options: + name: + description: + - The name of the user to create. + required: true + type: str + password: + description: + - The password to apply to the user. + required: false + type: str + version_added: 2.2.0 + version_added_collection: community.aws + password_reset_required: + description: + - Defines if the user is required to set a new password after login. + required: false + type: bool + default: false + version_added: 3.1.0 + version_added_collection: community.aws + update_password: + default: always + choices: ['always', 'on_create'] + description: + - When to update user passwords. + - I(update_password=always) will ensure the password is set to I(password). + - I(update_password=on_create) will only set the password for newly created users. + type: str + version_added: 2.2.0 + version_added_collection: community.aws + remove_password: + description: + - Option to delete user login passwords. + - This field is mutually exclusive to I(password). + type: 'bool' + version_added: 2.2.0 + version_added_collection: community.aws + managed_policies: + description: + - A list of managed policy ARNs or friendly names to attach to the user. + - To embed an inline policy, use M(community.aws.iam_policy). + required: false + type: list + elements: str + default: [] + aliases: ['managed_policy'] + state: + description: + - Create or remove the IAM user. + required: true + choices: [ 'present', 'absent' ] + type: str + purge_policies: + description: + - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detached. + required: false + default: false + type: bool + aliases: ['purge_policy', 'purge_managed_policies'] + wait: + description: + - When I(wait=True) the module will wait for up to I(wait_timeout) seconds + for IAM user creation before returning. + default: True + type: bool + version_added: 2.2.0 + version_added_collection: community.aws + wait_timeout: + description: + - How long (in seconds) to wait for creation / updates to complete. + default: 120 + type: int + version_added: 2.2.0 + version_added_collection: community.aws +notes: + - Support for I(tags) and I(purge_tags) was added in release 2.1.0. +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +# Note: This module does not allow management of groups that users belong to. +# Groups should manage their membership directly using community.aws.iam_group, +# as users belong to them. + +- name: Create a user + amazon.aws.iam_user: + name: testuser1 + state: present + +- name: Create a user with a password + amazon.aws.iam_user: + name: testuser1 + password: SomeSecurePassword + state: present + +- name: Create a user and attach a managed policy using its ARN + amazon.aws.iam_user: + name: testuser1 + managed_policies: + - arn:aws:iam::aws:policy/AmazonSNSFullAccess + state: present + +- name: Remove all managed policies from an existing user with an empty list + amazon.aws.iam_user: + name: testuser1 + state: present + purge_policies: true + +- name: Create user with tags + amazon.aws.iam_user: + name: testuser1 + state: present + tags: + Env: Prod + +- name: Delete the user + amazon.aws.iam_user: + name: testuser1 + state: absent + +''' +RETURN = r''' +user: + description: dictionary containing all the user information + returned: success + type: complex + contains: + arn: + description: the Amazon Resource Name (ARN) specifying the user + type: str + sample: "arn:aws:iam::123456789012:user/testuser1" + create_date: + description: the date and time, in ISO 8601 date-time format, when the user was created + type: str + sample: "2017-02-08T04:36:28+00:00" + user_id: + description: the stable and unique string identifying the user + type: str + sample: "AGPA12345EXAMPLE54321" + user_name: + description: the friendly name that identifies the user + type: str + sample: "testuser1" + path: + description: the path to the user + type: str + sample: "/" + tags: + description: user tags + type: dict + returned: always + sample: {"Env": "Prod"} +''' + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags + + +def compare_attached_policies(current_attached_policies, new_attached_policies): + + # If new_attached_policies is None it means we want to remove all policies + if len(current_attached_policies) > 0 and new_attached_policies is None: + return False + + current_attached_policies_arn_list = [] + for policy in current_attached_policies: + current_attached_policies_arn_list.append(policy['PolicyArn']) + + if not set(current_attached_policies_arn_list).symmetric_difference(set(new_attached_policies)): + return True + else: + return False + + +def convert_friendly_names_to_arns(connection, module, policy_names): + + # List comprehension that looks for any policy in the 'policy_names' list + # that does not begin with 'arn'. If there aren't any, short circuit. + # If there are, translate friendly name to the full arn + if not any(not policy.startswith('arn:') for policy in policy_names if policy is not None): + return policy_names + allpolicies = {} + paginator = connection.get_paginator('list_policies') + policies = paginator.paginate().build_full_result()['Policies'] + + for policy in policies: + allpolicies[policy['PolicyName']] = policy['Arn'] + allpolicies[policy['Arn']] = policy['Arn'] + try: + return [allpolicies[policy] for policy in policy_names] + except KeyError as e: + module.fail_json(msg="Couldn't find policy: " + str(e)) + + +def wait_iam_exists(connection, module): + + user_name = module.params.get('name') + wait_timeout = module.params.get('wait_timeout') + + delay = min(wait_timeout, 5) + max_attempts = wait_timeout // delay + + try: + waiter = connection.get_waiter('user_exists') + waiter.wait( + WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, + UserName=user_name, + ) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg='Timeout while waiting on IAM user creation') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed while waiting on IAM user creation') + + +def create_or_update_login_profile(connection, module): + + # Apply new password / update password for the user + user_params = dict() + user_params['UserName'] = module.params.get('name') + user_params['Password'] = module.params.get('password') + user_params['PasswordResetRequired'] = module.params.get('password_reset_required') + retval = {} + + try: + retval = connection.update_login_profile(**user_params) + except is_boto3_error_code('NoSuchEntity'): + # Login profile does not yet exist - create it + try: + retval = connection.create_login_profile(**user_params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to create user login profile") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to update user login profile") + + return True, retval + + +def delete_login_profile(connection, module): + ''' + Deletes a users login profile. + Parameters: + connection: IAM client + module: AWSModule + Returns: + (bool): True if login profile deleted, False if no login profile found to delete + ''' + user_params = dict() + user_params['UserName'] = module.params.get('name') + + # User does not have login profile - nothing to delete + if not user_has_login_profile(connection, module, user_params['UserName']): + return False + + if not module.check_mode: + try: + connection.delete_login_profile(**user_params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to delete user login profile") + + return True + + +def create_or_update_user(connection, module): + + params = dict() + params['UserName'] = module.params.get('name') + managed_policies = module.params.get('managed_policies') + purge_policies = module.params.get('purge_policies') + + if module.params.get('tags') is not None: + params["Tags"] = ansible_dict_to_boto3_tag_list(module.params.get('tags')) + + changed = False + + if managed_policies: + managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies) + + # Get user + user = get_user(connection, module, params['UserName']) + + # If user is None, create it + new_login_profile = False + if user is None: + # Check mode means we would create the user + if module.check_mode: + module.exit_json(changed=True) + + try: + connection.create_user(**params) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to create user") + + # Wait for user to be fully available before continuing + if module.params.get('wait'): + wait_iam_exists(connection, module) + + if module.params.get('password') is not None: + login_profile_result, login_profile_data = create_or_update_login_profile(connection, module) + + if login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False): + new_login_profile = True + else: + login_profile_result = None + update_result = update_user_tags(connection, module, params, user) + + if module.params['update_password'] == "always" and module.params.get('password') is not None: + # Can't compare passwords, so just return changed on check mode runs + if module.check_mode: + module.exit_json(changed=True) + login_profile_result, login_profile_data = create_or_update_login_profile(connection, module) + + if login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False): + new_login_profile = True + + elif module.params.get('remove_password'): + login_profile_result = delete_login_profile(connection, module) + + changed = bool(update_result) or bool(login_profile_result) + + # Manage managed policies + current_attached_policies = get_attached_policy_list(connection, module, params['UserName']) + if not compare_attached_policies(current_attached_policies, managed_policies): + current_attached_policies_arn_list = [] + for policy in current_attached_policies: + current_attached_policies_arn_list.append(policy['PolicyArn']) + + # If managed_policies has a single empty element we want to remove all attached policies + if purge_policies: + # Detach policies not present + for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)): + changed = True + if not module.check_mode: + try: + connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to detach policy {0} from user {1}".format( + policy_arn, params['UserName'])) + + # If there are policies to adjust that aren't in the current list, then things have changed + # Otherwise the only changes were in purging above + if set(managed_policies).difference(set(current_attached_policies_arn_list)): + changed = True + # If there are policies in managed_policies attach each policy + if managed_policies != [None] and not module.check_mode: + for policy_arn in managed_policies: + try: + connection.attach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to attach policy {0} to user {1}".format( + policy_arn, params['UserName'])) + + if module.check_mode: + module.exit_json(changed=changed) + + # Get the user again + user = get_user(connection, module, params['UserName']) + if changed and new_login_profile: + # `LoginProfile` is only returned on `create_login_profile` method + user['user']['password_reset_required'] = login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False) + + module.exit_json(changed=changed, iam_user=user, user=user['user']) + + +def destroy_user(connection, module): + + user_name = module.params.get('name') + + user = get_user(connection, module, user_name) + # User is not present + if not user: + module.exit_json(changed=False) + + # Check mode means we would remove this user + if module.check_mode: + module.exit_json(changed=True) + + # Remove any attached policies otherwise deletion fails + try: + for policy in get_attached_policy_list(connection, module, user_name): + connection.detach_user_policy(UserName=user_name, PolicyArn=policy['PolicyArn']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name)) + + try: + # Remove user's access keys + access_keys = connection.list_access_keys(UserName=user_name)["AccessKeyMetadata"] + for access_key in access_keys: + connection.delete_access_key(UserName=user_name, AccessKeyId=access_key["AccessKeyId"]) + + # Remove user's login profile (console password) + delete_login_profile(connection, module) + + # Remove user's ssh public keys + ssh_public_keys = connection.list_ssh_public_keys(UserName=user_name)["SSHPublicKeys"] + for ssh_public_key in ssh_public_keys: + connection.delete_ssh_public_key(UserName=user_name, SSHPublicKeyId=ssh_public_key["SSHPublicKeyId"]) + + # Remove user's service specific credentials + service_credentials = connection.list_service_specific_credentials(UserName=user_name)["ServiceSpecificCredentials"] + for service_specific_credential in service_credentials: + connection.delete_service_specific_credential( + UserName=user_name, + ServiceSpecificCredentialId=service_specific_credential["ServiceSpecificCredentialId"] + ) + + # Remove user's signing certificates + signing_certificates = connection.list_signing_certificates(UserName=user_name)["Certificates"] + for signing_certificate in signing_certificates: + connection.delete_signing_certificate( + UserName=user_name, + CertificateId=signing_certificate["CertificateId"] + ) + + # Remove user's MFA devices + mfa_devices = connection.list_mfa_devices(UserName=user_name)["MFADevices"] + for mfa_device in mfa_devices: + connection.deactivate_mfa_device(UserName=user_name, SerialNumber=mfa_device["SerialNumber"]) + + # Remove user's inline policies + inline_policies = connection.list_user_policies(UserName=user_name)["PolicyNames"] + for policy_name in inline_policies: + connection.delete_user_policy(UserName=user_name, PolicyName=policy_name) + + # Remove user's group membership + user_groups = connection.list_groups_for_user(UserName=user_name)["Groups"] + for group in user_groups: + connection.remove_user_from_group(UserName=user_name, GroupName=group["GroupName"]) + + connection.delete_user(UserName=user_name) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name)) + + module.exit_json(changed=True) + + +def get_user(connection, module, name): + + params = dict() + params['UserName'] = name + + try: + user = connection.get_user(**params) + except is_boto3_error_code('NoSuchEntity'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to get user {0}".format(name)) + + tags = boto3_tag_list_to_ansible_dict(user['User'].pop('Tags', [])) + user = camel_dict_to_snake_dict(user) + user['user']['tags'] = tags + return user + + +def get_attached_policy_list(connection, module, name): + + try: + return connection.list_attached_user_policies(UserName=name)['AttachedPolicies'] + except is_boto3_error_code('NoSuchEntity'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to get policies for user {0}".format(name)) + + +def user_has_login_profile(connection, module, name): + ''' + Returns whether or not given user has a login profile. + Parameters: + connection: IAM client + module: AWSModule + name (str): Username of user + Returns: + (bool): True if user had login profile, False if not + ''' + try: + connection.get_login_profile(UserName=name) + except is_boto3_error_code('NoSuchEntity'): + return False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to get login profile for user {0}".format(name)) + return True + + +def update_user_tags(connection, module, params, user): + user_name = params['UserName'] + existing_tags = user['user']['tags'] + new_tags = params.get('Tags') + if new_tags is None: + return False + new_tags = boto3_tag_list_to_ansible_dict(new_tags) + + purge_tags = module.params.get('purge_tags') + + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) + + if not module.check_mode: + try: + if tags_to_remove: + connection.untag_user(UserName=user_name, TagKeys=tags_to_remove) + if tags_to_add: + connection.tag_user(UserName=user_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to set tags for user %s' % user_name) + + changed = bool(tags_to_add) or bool(tags_to_remove) + return changed + + +def main(): + + argument_spec = dict( + name=dict(required=True, type='str'), + password=dict(type='str', no_log=True), + password_reset_required=dict(type='bool', default=False, no_log=False), + update_password=dict(default='always', choices=['always', 'on_create'], no_log=False), + remove_password=dict(type='bool', no_log=False), + managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'), + state=dict(choices=['present', 'absent'], required=True), + purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + wait=dict(type='bool', default=True), + wait_timeout=dict(default=120, type='int'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['password', 'remove_password']], + ) + + module.deprecate("The 'iam_user' return key is deprecated and will be replaced by 'user'. Both values are returned for now.", + date='2024-05-01', collection_name='amazon.aws') + + connection = module.client('iam') + + state = module.params.get("state") + + if state == 'present': + create_or_update_user(connection, module) + else: + destroy_user(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_user_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_user_info.py new file mode 100644 index 000000000..e9c95edca --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_user_info.py @@ -0,0 +1,199 @@ +#!/usr/bin/python + +# -*- coding: utf-8 -*- +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: iam_user_info +version_added: 5.0.0 +short_description: Gather IAM user(s) facts in AWS +description: + - This module can be used to gather IAM user(s) facts in AWS. + - This module was originally added to C(community.aws) in release 1.0.0. +author: + - Constantin Bugneac (@Constantin07) + - Abhijeet Kasurde (@Akasurde) +options: + name: + description: + - The name of the IAM user to look for. + required: false + type: str + group: + description: + - The group name name of the IAM user to look for. Mutually exclusive with C(path). + required: false + type: str + path: + description: + - The path to the IAM user. Mutually exclusive with C(group). + - If specified, then would get all user names whose path starts with user provided value. + required: false + default: '/' + type: str +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 + +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +# Gather facts about "test" user. +- name: Get IAM user info + amazon.aws.iam_user_info: + name: "test" + +# Gather facts about all users in the "dev" group. +- name: Get IAM user info + amazon.aws.iam_user_info: + group: "dev" + +# Gather facts about all users with "/division_abc/subdivision_xyz/" path. +- name: Get IAM user info + amazon.aws.iam_user_info: + path: "/division_abc/subdivision_xyz/" +''' + +RETURN = r''' +iam_users: + description: list of maching iam users + returned: success + type: complex + contains: + arn: + description: the ARN of the user + returned: if user exists + type: str + sample: "arn:aws:iam::123456789012:user/dev/test_user" + create_date: + description: the datetime user was created + returned: if user exists + type: str + sample: "2016-05-24T12:24:59+00:00" + password_last_used: + description: the last datetime the password was used by user + returned: if password was used at least once + type: str + sample: "2016-05-25T13:39:11+00:00" + path: + description: the path to user + returned: if user exists + type: str + sample: "/dev/" + user_id: + description: the unique user id + returned: if user exists + type: str + sample: "AIDUIOOCQKTUGI6QJLGH2" + user_name: + description: the user name + returned: if user exists + type: str + sample: "test_user" + tags: + description: User tags. + type: dict + returned: if user exists + sample: '{"Env": "Prod"}' +''' + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + + +@AWSRetry.exponential_backoff() +def list_iam_users_with_backoff(client, operation, **kwargs): + paginator = client.get_paginator(operation) + return paginator.paginate(**kwargs).build_full_result() + + +def describe_iam_user(user): + tags = boto3_tag_list_to_ansible_dict(user.pop('Tags', [])) + user = camel_dict_to_snake_dict(user) + user['tags'] = tags + return user + + +def list_iam_users(connection, module): + + name = module.params.get('name') + group = module.params.get('group') + path = module.params.get('path') + + params = dict() + iam_users = [] + + if not group and not path: + if name: + params['UserName'] = name + try: + iam_users.append(connection.get_user(**params)['User']) + except is_boto3_error_code('NoSuchEntity'): + pass + except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't get IAM user info for user %s" % name) + + if group: + params['GroupName'] = group + try: + iam_users = list_iam_users_with_backoff(connection, 'get_group', **params)['Users'] + except is_boto3_error_code('NoSuchEntity'): + pass + except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't get IAM user info for group %s" % group) + if name: + iam_users = [user for user in iam_users if user['UserName'] == name] + + if path and not group: + params['PathPrefix'] = path + try: + iam_users = list_iam_users_with_backoff(connection, 'list_users', **params)['Users'] + except is_boto3_error_code('NoSuchEntity'): + pass + except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't get IAM user info for path %s" % path) + if name: + iam_users = [user for user in iam_users if user['UserName'] == name] + + module.exit_json(iam_users=[describe_iam_user(user) for user in iam_users]) + + +def main(): + argument_spec = dict( + name=dict(), + group=dict(), + path=dict(default='/') + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['group', 'path'] + ], + supports_check_mode=True + ) + + connection = module.client('iam') + + list_iam_users(connection, module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/kms_key.py b/ansible_collections/amazon/aws/plugins/modules/kms_key.py new file mode 100644 index 000000000..0cbaa9b05 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/kms_key.py @@ -0,0 +1,1079 @@ +#!/usr/bin/python +# -*- coding: utf-8 -* +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: kms_key +version_added: 5.0.0 +short_description: Perform various KMS key management tasks +description: + - Manage role/user access to a KMS key. + - Not designed for encrypting/decrypting. + - Prior to release 5.0.0 this module was called C(community.aws.aws_kms). + The usage did not change. + - This module was originally added to C(community.aws) in release 1.0.0. +options: + alias: + description: + - An alias for a key. + - For safety, even though KMS does not require keys to have an alias, this module expects all + new keys to be given an alias to make them easier to manage. Existing keys without an alias + may be referred to by I(key_id). Use M(amazon.aws.kms_key_info) to find key ids. + - Note that passing a I(key_id) and I(alias) will only cause a new alias to be added, an alias will never be renamed. + - The C(alias/) prefix is optional. + - Required if I(key_id) is not given. + required: false + aliases: + - key_alias + type: str + key_id: + description: + - Key ID or ARN of the key. + - One of I(alias) or I(key_id) are required. + required: false + aliases: + - key_arn + type: str + enable_key_rotation: + description: + - Whether the key should be automatically rotated every year. + required: false + type: bool + state: + description: + - Whether a key should be present or absent. + - Note that making an existing key C(absent) only schedules a key for deletion. + - Passing a key that is scheduled for deletion with I(state=present) will cancel key deletion. + required: False + choices: + - present + - absent + default: present + type: str + enabled: + description: Whether or not a key is enabled. + default: True + type: bool + description: + description: + - A description of the CMK. + - Use a description that helps you decide whether the CMK is appropriate for a task. + type: str + multi_region: + description: + - Whether to create a multi-Region primary key or not. + default: False + type: bool + version_added: 5.5.0 + pending_window: + description: + - The number of days between requesting deletion of the CMK and when it will actually be deleted. + - Only used when I(state=absent) and the CMK has not yet been deleted. + - Valid values are between 7 and 30 (inclusive). + - 'See also: U(https://docs.aws.amazon.com/kms/latest/APIReference/API_ScheduleKeyDeletion.html#KMS-ScheduleKeyDeletion-request-PendingWindowInDays)' + type: int + aliases: ['deletion_delay'] + version_added: 1.4.0 + version_added_collection: community.aws + purge_grants: + description: + - Whether the I(grants) argument should cause grants not in the list to be removed. + default: False + type: bool + grants: + description: + - A list of grants to apply to the key. Each item must contain I(grantee_principal). + Each item can optionally contain I(retiring_principal), I(operations), I(constraints), + I(name). + - I(grantee_principal) and I(retiring_principal) must be ARNs + - 'For full documentation of suboptions see the boto3 documentation:' + - 'U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.create_grant)' + type: list + elements: dict + default: [] + suboptions: + grantee_principal: + description: The full ARN of the principal being granted permissions. + required: true + type: str + retiring_principal: + description: The full ARN of the principal permitted to revoke/retire the grant. + type: str + operations: + type: list + elements: str + description: + - A list of operations that the grantee may perform using the CMK. + choices: ['Decrypt', 'Encrypt', 'GenerateDataKey', 'GenerateDataKeyWithoutPlaintext', 'ReEncryptFrom', 'ReEncryptTo', + 'CreateGrant', 'RetireGrant', 'DescribeKey', 'Verify', 'Sign'] + constraints: + description: + - Constraints is a dict containing C(encryption_context_subset) or C(encryption_context_equals), + either or both being a dict specifying an encryption context match. + See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) or + U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.create_grant) + type: dict + policy: + description: + - policy to apply to the KMS key. + - See U(https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + type: json + key_spec: + aliases: + - customer_master_key_spec + description: + - Specifies the type of KMS key to create. + - The specification is not changeable once the key is created. + type: str + default: SYMMETRIC_DEFAULT + choices: ['SYMMETRIC_DEFAULT', 'RSA_2048', 'RSA_3072', 'RSA_4096', 'ECC_NIST_P256', 'ECC_NIST_P384', 'ECC_NIST_P521', 'ECC_SECG_P256K1'] + version_added: 2.1.0 + version_added_collection: community.aws + key_usage: + description: + - Determines the cryptographic operations for which you can use the KMS key. + - The usage is not changeable once the key is created. + type: str + default: ENCRYPT_DECRYPT + choices: ['ENCRYPT_DECRYPT', 'SIGN_VERIFY'] + version_added: 2.1.0 + version_added_collection: community.aws +author: + - Ted Timmons (@tedder) + - Will Thames (@willthames) + - Mark Chappell (@tremble) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 + +notes: + - There are known inconsistencies in the amount of time required for updates of KMS keys to be fully reflected on AWS. + This can cause issues when running duplicate tasks in succession or using the M(amazon.aws.kms_key_info) module to fetch key metadata + shortly after modifying keys. + For this reason, it is recommended to use the return data from this module (M(amazon.aws.kms_key)) to fetch a key's metadata. +''' + +EXAMPLES = r''' +# Create a new KMS key +- amazon.aws.kms_key: + alias: mykey + tags: + Name: myKey + Purpose: protect_stuff + +# Create a new multi-region KMS key +- amazon.aws.kms_key: + alias: mykey + multi_region: true + tags: + Name: myKey + Purpose: protect_stuff + +# Update previous key with more tags +- amazon.aws.kms_key: + alias: mykey + tags: + Name: myKey + Purpose: protect_stuff + Owner: security_team + +# Update a known key with grants allowing an instance with the billing-prod IAM profile +# to decrypt data encrypted with the environment: production, application: billing +# encryption context +- amazon.aws.kms_key: + key_id: abcd1234-abcd-1234-5678-ef1234567890 + grants: + - name: billing_prod + grantee_principal: arn:aws:iam::123456789012:role/billing_prod + constraints: + encryption_context_equals: + environment: production + application: billing + operations: + - Decrypt + - RetireGrant + +- name: Update IAM policy on an existing KMS key + amazon.aws.kms_key: + alias: my-kms-key + policy: '{"Version": "2012-10-17", "Id": "my-kms-key-permissions", "Statement": [ { } ]}' + state: present + +- name: Example using lookup for policy json + amazon.aws.kms_key: + alias: my-kms-key + policy: "{{ lookup('template', 'kms_iam_policy_template.json.j2') }}" + state: present +''' + +RETURN = r''' +key_id: + description: ID of key. + type: str + returned: always + sample: abcd1234-abcd-1234-5678-ef1234567890 +key_arn: + description: ARN of key. + type: str + returned: always + sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890 +key_state: + description: + - The state of the key. + - Will be one of C('Creating'), C('Enabled'), C('Disabled'), C('PendingDeletion'), C('PendingImport'), + C('PendingReplicaDeletion'), C('Unavailable'), or C('Updating'). + type: str + returned: always + sample: PendingDeletion +key_usage: + description: The cryptographic operations for which you can use the key. + type: str + returned: always + sample: ENCRYPT_DECRYPT +origin: + description: The source of the key's key material. When this value is C(AWS_KMS), + AWS KMS created the key material. When this value is C(EXTERNAL), the + key material was imported or the CMK lacks key material. + type: str + returned: always + sample: AWS_KMS +aws_account_id: + description: The AWS Account ID that the key belongs to. + type: str + returned: always + sample: 1234567890123 +creation_date: + description: Date and time of creation of the key. + type: str + returned: always + sample: "2017-04-18T15:12:08.551000+10:00" +deletion_date: + description: Date and time after which KMS deletes this KMS key. + type: str + returned: when key_state is PendingDeletion + sample: "2017-04-18T15:12:08.551000+10:00" + version_added: 3.3.0 + version_added_collection: community.aws +description: + description: Description of the key. + type: str + returned: always + sample: "My Key for Protecting important stuff" +enabled: + description: Whether the key is enabled. True if I(key_state) is C(Enabled). + type: bool + returned: always + sample: false +enable_key_rotation: + description: Whether the automatic annual key rotation is enabled. Returns None if key rotation status can't be determined. + type: bool + returned: always + sample: false +aliases: + description: List of aliases associated with the key. + type: list + returned: always + sample: + - aws/acm + - aws/ebs +policies: + description: List of policy documents for the key. Empty when access is denied even if there are policies. + type: list + returned: always + elements: str + sample: + Version: "2012-10-17" + Id: "auto-ebs-2" + Statement: + - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS" + Effect: "Allow" + Principal: + AWS: "*" + Action: + - "kms:Encrypt" + - "kms:Decrypt" + - "kms:ReEncrypt*" + - "kms:GenerateDataKey*" + - "kms:CreateGrant" + - "kms:DescribeKey" + Resource: "*" + Condition: + StringEquals: + kms:CallerAccount: "123456789012" + kms:ViaService: "ec2.ap-southeast-2.amazonaws.com" + - Sid: "Allow direct access to key metadata to the account" + Effect: "Allow" + Principal: + AWS: "arn:aws:iam::123456789012:root" + Action: + - "kms:Describe*" + - "kms:Get*" + - "kms:List*" + - "kms:RevokeGrant" + Resource: "*" +key_policies: + description: List of policy documents for the key. Empty when access is denied even if there are policies. + type: list + returned: always + elements: dict + sample: + Version: "2012-10-17" + Id: "auto-ebs-2" + Statement: + - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS" + Effect: "Allow" + Principal: + AWS: "*" + Action: + - "kms:Encrypt" + - "kms:Decrypt" + - "kms:ReEncrypt*" + - "kms:GenerateDataKey*" + - "kms:CreateGrant" + - "kms:DescribeKey" + Resource: "*" + Condition: + StringEquals: + kms:CallerAccount: "123456789012" + kms:ViaService: "ec2.ap-southeast-2.amazonaws.com" + - Sid: "Allow direct access to key metadata to the account" + Effect: "Allow" + Principal: + AWS: "arn:aws:iam::123456789012:root" + Action: + - "kms:Describe*" + - "kms:Get*" + - "kms:List*" + - "kms:RevokeGrant" + Resource: "*" + version_added: 3.3.0 + version_added_collection: community.aws +tags: + description: Dictionary of tags applied to the key. Empty when access is denied even if there are tags. + type: dict + returned: always + sample: + Name: myKey + Purpose: protecting_stuff +grants: + description: List of grants associated with a key. + type: list + elements: dict + returned: always + contains: + constraints: + description: Constraints on the encryption context that the grant allows. + See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details + type: dict + returned: always + sample: + encryption_context_equals: + "aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:123456789012:function:xyz" + creation_date: + description: Date of creation of the grant. + type: str + returned: always + sample: "2017-04-18T15:12:08+10:00" + grant_id: + description: The unique ID for the grant. + type: str + returned: always + sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234 + grantee_principal: + description: The principal that receives the grant's permissions. + type: str + returned: always + sample: arn:aws:sts::123456789012:assumed-role/lambda_xyz/xyz + issuing_account: + description: The AWS account under which the grant was issued. + type: str + returned: always + sample: arn:aws:iam::123456789012:root + key_id: + description: The key ARN to which the grant applies. + type: str + returned: always + sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890 + name: + description: The friendly name that identifies the grant. + type: str + returned: always + sample: xyz + operations: + description: The list of operations permitted by the grant. + type: list + returned: always + sample: + - Decrypt + - RetireGrant + retiring_principal: + description: The principal that can retire the grant. + type: str + returned: always + sample: arn:aws:sts::123456789012:assumed-role/lambda_xyz/xyz +changes_needed: + description: Grant types that would be changed/were changed. + type: dict + returned: always + sample: { "role": "add", "role grant": "add" } +had_invalid_entries: + description: Whether there are invalid (non-ARN) entries in the KMS entry. These don't count as a change, but will be removed if any changes are being made. + type: bool + returned: always +multi_region: + description: + - Indicates whether the CMK is a multi-Region C(True) or regional C(False) key. + - This value is True for multi-Region primary and replica CMKs and False for regional CMKs. + type: bool + version_added: 5.5.0 + returned: always + sample: False + + +''' + +# these mappings are used to go from simple labels to the actual 'Sid' values returned +# by get_policy. They seem to be magic values. +statement_label = { + 'role': 'Allow use of the key', + 'role grant': 'Allow attachment of persistent resources', + 'admin': 'Allow access for Key Administrators' +} + +import json + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies + + +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) +def get_iam_roles_with_backoff(connection): + paginator = connection.get_paginator('list_roles') + return paginator.paginate().build_full_result() + + +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) +def get_kms_keys_with_backoff(connection): + paginator = connection.get_paginator('list_keys') + return paginator.paginate().build_full_result() + + +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) +def get_kms_aliases_with_backoff(connection): + paginator = connection.get_paginator('list_aliases') + return paginator.paginate().build_full_result() + + +def get_kms_aliases_lookup(connection): + _aliases = dict() + for alias in get_kms_aliases_with_backoff(connection)['Aliases']: + # Not all aliases are actually associated with a key + if 'TargetKeyId' in alias: + # strip off leading 'alias/' and add it to key's aliases + if alias['TargetKeyId'] in _aliases: + _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:]) + else: + _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]] + return _aliases + + +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) +def get_kms_tags_with_backoff(connection, key_id, **kwargs): + return connection.list_resource_tags(KeyId=key_id, **kwargs) + + +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) +def get_kms_grants_with_backoff(connection, key_id): + params = dict(KeyId=key_id) + paginator = connection.get_paginator('list_grants') + return paginator.paginate(**params).build_full_result() + + +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) +def get_kms_metadata_with_backoff(connection, key_id): + return connection.describe_key(KeyId=key_id) + + +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) +def list_key_policies_with_backoff(connection, key_id): + paginator = connection.get_paginator('list_key_policies') + return paginator.paginate(KeyId=key_id).build_full_result() + + +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) +def get_key_policy_with_backoff(connection, key_id, policy_name): + return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name) + + +def get_kms_tags(connection, module, key_id): + # Handle pagination here as list_resource_tags does not have + # a paginator + kwargs = {} + tags = [] + more = True + while more: + try: + tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs) + tags.extend(tag_response['Tags']) + except is_boto3_error_code('AccessDeniedException'): + tag_response = {} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to obtain key tags") + if tag_response.get('NextMarker'): + kwargs['Marker'] = tag_response['NextMarker'] + else: + more = False + return tags + + +def get_kms_policies(connection, module, key_id): + try: + policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames'] + return [ + get_key_policy_with_backoff(connection, key_id, policy)['Policy'] + for policy in policies + ] + except is_boto3_error_code('AccessDeniedException'): + return [] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to obtain key policies") + + +def camel_to_snake_grant(grant): + '''camel_to_snake_grant snakifies everything except the encryption context ''' + constraints = grant.get('Constraints', {}) + result = camel_dict_to_snake_dict(grant) + if 'EncryptionContextEquals' in constraints: + result['constraints']['encryption_context_equals'] = constraints['EncryptionContextEquals'] + if 'EncryptionContextSubset' in constraints: + result['constraints']['encryption_context_subset'] = constraints['EncryptionContextSubset'] + return result + + +def get_key_details(connection, module, key_id): + try: + result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to obtain key metadata") + result['KeyArn'] = result.pop('Arn') + + try: + aliases = get_kms_aliases_lookup(connection) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to obtain aliases") + + try: + current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) + result['enable_key_rotation'] = current_rotation_status.get('KeyRotationEnabled') + except is_boto3_error_code(['AccessDeniedException', 'UnsupportedOperationException']) as e: + result['enable_key_rotation'] = None + result['aliases'] = aliases.get(result['KeyId'], []) + + result = camel_dict_to_snake_dict(result) + + # grants and tags get snakified differently + try: + result['grants'] = [ + camel_to_snake_grant(grant) + for grant in get_kms_grants_with_backoff(connection, key_id)['Grants'] + ] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to obtain key grants") + tags = get_kms_tags(connection, module, key_id) + result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue') + result['policies'] = get_kms_policies(connection, module, key_id) + result['key_policies'] = [json.loads(policy) for policy in result['policies']] + return result + + +def get_kms_facts(connection, module): + try: + keys = get_kms_keys_with_backoff(connection)['Keys'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to obtain keys") + + return [get_key_details(connection, module, key['KeyId']) for key in keys] + + +def convert_grant_params(grant, key): + grant_params = dict( + KeyId=key['key_arn'], GranteePrincipal=grant['grantee_principal'] + ) + if grant.get('operations'): + grant_params['Operations'] = grant['operations'] + if grant.get('retiring_principal'): + grant_params['RetiringPrincipal'] = grant['retiring_principal'] + if grant.get('name'): + grant_params['Name'] = grant['name'] + if grant.get('constraints'): + grant_params['Constraints'] = dict() + if grant['constraints'].get('encryption_context_subset'): + grant_params['Constraints']['EncryptionContextSubset'] = grant['constraints']['encryption_context_subset'] + if grant['constraints'].get('encryption_context_equals'): + grant_params['Constraints']['EncryptionContextEquals'] = grant['constraints']['encryption_context_equals'] + return grant_params + + +def different_grant(existing_grant, desired_grant): + if existing_grant.get('grantee_principal') != desired_grant.get('grantee_principal'): + return True + if existing_grant.get('retiring_principal') != desired_grant.get('retiring_principal'): + return True + if set(existing_grant.get('operations', [])) != set(desired_grant.get('operations')): + return True + if existing_grant.get('constraints') != desired_grant.get('constraints'): + return True + return False + + +def compare_grants(existing_grants, desired_grants, purge_grants=False): + existing_dict = dict((eg['name'], eg) for eg in existing_grants) + desired_dict = dict((dg['name'], dg) for dg in desired_grants) + to_add_keys = set(desired_dict.keys()) - set(existing_dict.keys()) + if purge_grants: + to_remove_keys = set(existing_dict.keys()) - set(desired_dict.keys()) + else: + to_remove_keys = set() + to_change_candidates = set(existing_dict.keys()) & set(desired_dict.keys()) + for candidate in to_change_candidates: + if different_grant(existing_dict[candidate], desired_dict[candidate]): + to_add_keys.add(candidate) + to_remove_keys.add(candidate) + + to_add = [] + to_remove = [] + for key in to_add_keys: + grant = desired_dict[key] + to_add.append(grant) + for key in to_remove_keys: + grant = existing_dict[key] + to_remove.append(grant) + return to_add, to_remove + + +def start_key_deletion(connection, module, key_metadata): + if key_metadata['KeyState'] == 'PendingDeletion': + return False + + if module.check_mode: + return True + + deletion_params = {'KeyId': key_metadata['Arn']} + if module.params.get('pending_window'): + deletion_params['PendingWindowInDays'] = module.params.get('pending_window') + + try: + connection.schedule_key_deletion(**deletion_params) + return True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to schedule key for deletion") + + +def cancel_key_deletion(connection, module, key): + key_id = key['key_arn'] + if key['key_state'] != 'PendingDeletion': + return False + + if module.check_mode: + return True + + try: + connection.cancel_key_deletion(KeyId=key_id) + # key is disabled after deletion cancellation + # set this so that ensure_enabled_disabled works correctly + key['key_state'] = 'Disabled' + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to cancel key deletion") + + return True + + +def ensure_enabled_disabled(connection, module, key, enabled): + desired_state = 'Enabled' + if not enabled: + desired_state = 'Disabled' + + if key['key_state'] == desired_state: + return False + + key_id = key['key_arn'] + if not module.check_mode: + if enabled: + try: + connection.enable_key(KeyId=key_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to enable key") + else: + try: + connection.disable_key(KeyId=key_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to disable key") + + return True + + +def update_alias(connection, module, key, alias): + alias = canonicalize_alias_name(alias) + + if alias is None: + return False + + key_id = key['key_arn'] + aliases = get_kms_aliases_with_backoff(connection)['Aliases'] + # We will only add new aliases, not rename existing ones + if alias in [_alias['AliasName'] for _alias in aliases]: + return False + + if not module.check_mode: + try: + connection.create_alias(TargetKeyId=key_id, AliasName=alias) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed create key alias") + + return True + + +def update_description(connection, module, key, description): + if description is None: + return False + if key['description'] == description: + return False + + key_id = key['key_arn'] + if not module.check_mode: + try: + connection.update_key_description(KeyId=key_id, Description=description) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to update key description") + + return True + + +def update_tags(connection, module, key, desired_tags, purge_tags): + if desired_tags is None: + return False + + to_add, to_remove = compare_aws_tags(key['tags'], desired_tags, purge_tags) + if not (bool(to_add) or bool(to_remove)): + return False + + key_id = key['key_arn'] + if not module.check_mode: + if to_remove: + try: + connection.untag_resource(KeyId=key_id, TagKeys=to_remove) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to remove tag") + if to_add: + try: + tags = ansible_dict_to_boto3_tag_list( + module.params['tags'], + tag_name_key_name='TagKey', + tag_value_key_name='TagValue', + ) + connection.tag_resource(KeyId=key_id, Tags=tags) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to add tag to key") + + return True + + +def update_policy(connection, module, key, policy): + if policy is None: + return False + try: + new_policy = json.loads(policy) + except ValueError as e: + module.fail_json_aws(e, msg="Unable to parse new policy as JSON") + + key_id = key['key_arn'] + try: + keyret = connection.get_key_policy(KeyId=key_id, PolicyName='default') + original_policy = json.loads(keyret['Policy']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): + # If we can't fetch the current policy assume we're making a change + # Could occur if we have PutKeyPolicy without GetKeyPolicy + original_policy = {} + + if not compare_policies(original_policy, new_policy): + return False + + if not module.check_mode: + try: + connection.put_key_policy(KeyId=key_id, PolicyName='default', Policy=policy) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to update key policy") + + return True + + +def update_key_rotation(connection, module, key, enable_key_rotation): + if enable_key_rotation is None: + return False + key_id = key['key_arn'] + + try: + current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) + if current_rotation_status.get('KeyRotationEnabled') == enable_key_rotation: + return False + except is_boto3_error_code('AccessDeniedException'): + pass + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to get current key rotation status") + + if not module.check_mode: + try: + if enable_key_rotation: + connection.enable_key_rotation(KeyId=key_id) + else: + connection.disable_key_rotation(KeyId=key_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to enable/disable key rotation") + + return True + + +def update_grants(connection, module, key, desired_grants, purge_grants): + existing_grants = key['grants'] + + to_add, to_remove = compare_grants(existing_grants, desired_grants, purge_grants) + if not (bool(to_add) or bool(to_remove)): + return False + + key_id = key['key_arn'] + if not module.check_mode: + for grant in to_remove: + try: + connection.retire_grant(KeyId=key_id, GrantId=grant['grant_id']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to retire grant") + for grant in to_add: + grant_params = convert_grant_params(grant, key) + try: + connection.create_grant(**grant_params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to create grant") + + return True + + +def update_key(connection, module, key): + changed = False + + changed |= cancel_key_deletion(connection, module, key) + changed |= ensure_enabled_disabled(connection, module, key, module.params['enabled']) + changed |= update_alias(connection, module, key, module.params['alias']) + changed |= update_description(connection, module, key, module.params['description']) + changed |= update_tags(connection, module, key, module.params['tags'], module.params.get('purge_tags')) + changed |= update_policy(connection, module, key, module.params.get('policy')) + changed |= update_grants(connection, module, key, module.params.get('grants'), module.params.get('purge_grants')) + changed |= update_key_rotation(connection, module, key, module.params.get('enable_key_rotation')) + + # make results consistent with kms_facts before returning + result = get_key_details(connection, module, key['key_arn']) + result['changed'] = changed + return result + + +def create_key(connection, module): + key_usage = module.params.get('key_usage') + key_spec = module.params.get('key_spec') + multi_region = module.params.get('multi_region') + tags_list = ansible_dict_to_boto3_tag_list( + module.params['tags'] or {}, + # KMS doesn't use 'Key' and 'Value' as other APIs do. + tag_name_key_name='TagKey', + tag_value_key_name='TagValue', + ) + params = dict( + BypassPolicyLockoutSafetyCheck=False, + Tags=tags_list, + KeyUsage=key_usage, + CustomerMasterKeySpec=key_spec, + Origin='AWS_KMS', + MultiRegion=multi_region, + ) + + if module.check_mode: + return {'changed': True} + + if module.params.get('description'): + params['Description'] = module.params['description'] + if module.params.get('policy'): + params['Policy'] = module.params['policy'] + try: + result = connection.create_key(**params)['KeyMetadata'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create initial key") + + key = get_key_details(connection, module, result['KeyId']) + update_alias(connection, module, key, module.params['alias']) + update_key_rotation(connection, module, key, module.params.get('enable_key_rotation')) + + ensure_enabled_disabled(connection, module, key, module.params.get('enabled')) + update_grants(connection, module, key, module.params.get('grants'), False) + + # make results consistent with kms_facts + result = get_key_details(connection, module, key['key_id']) + result['changed'] = True + return result + + +def delete_key(connection, module, key_metadata): + changed = False + + changed |= start_key_deletion(connection, module, key_metadata) + + result = get_key_details(connection, module, key_metadata['Arn']) + result['changed'] = changed + return result + + +def get_arn_from_role_name(iam, rolename): + ret = iam.get_role(RoleName=rolename) + if ret.get('Role') and ret['Role'].get('Arn'): + return ret['Role']['Arn'] + raise Exception('could not find arn for name {0}.'.format(rolename)) + + +def canonicalize_alias_name(alias): + if alias is None: + return None + if alias.startswith('alias/'): + return alias + return 'alias/' + alias + + +def fetch_key_metadata(connection, module, key_id, alias): + # Note - fetching a key's metadata is very inconsistent shortly after any sort of update to a key has occurred. + # Combinations of manual waiters, checking expecting key values to actual key value, and static sleeps + # have all been exhausted, but none of those available options have solved the problem. + # Integration tests will wait for 10 seconds to combat this issue. + # See https://github.com/ansible-collections/community.aws/pull/1052. + + alias = canonicalize_alias_name(module.params.get('alias')) + + try: + # Fetch by key_id where possible + if key_id: + return get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] + # Or try alias as a backup + return get_kms_metadata_with_backoff(connection, alias)['KeyMetadata'] + + except connection.exceptions.NotFoundException: + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Failed to fetch key metadata.") + + +def validate_params(module, key_metadata): + # We can't create keys with a specific ID, if we can't access the key we'll have to fail + if ( + module.params.get('state') == 'present' + and module.params.get('key_id') + and not key_metadata + ): + module.fail_json( + msg='Could not find key with id {0} to update'.format( + module.params.get('key_id') + ) + ) + if ( + module.params.get('multi_region') + and key_metadata + and module.params.get('state') == 'present' + ): + module.fail_json( + msg='You cannot change the multi-region property on an existing key.' + ) + + +def main(): + argument_spec = dict( + alias=dict(aliases=['key_alias']), + pending_window=dict(aliases=['deletion_delay'], type='int'), + key_id=dict(aliases=['key_arn']), + description=dict(), + enabled=dict(type='bool', default=True), + multi_region=dict(type='bool', default=False), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + grants=dict(type='list', default=[], elements='dict'), + policy=dict(type='json'), + purge_grants=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'absent']), + enable_key_rotation=(dict(type='bool')), + key_spec=dict( + type='str', + default='SYMMETRIC_DEFAULT', + aliases=['customer_master_key_spec'], + choices=[ + 'SYMMETRIC_DEFAULT', + 'RSA_2048', + 'RSA_3072', + 'RSA_4096', + 'ECC_NIST_P256', + 'ECC_NIST_P384', + 'ECC_NIST_P521', + 'ECC_SECG_P256K1', + ], + ), + key_usage=dict( + type='str', + default='ENCRYPT_DECRYPT', + choices=['ENCRYPT_DECRYPT', 'SIGN_VERIFY'], + ), + ) + + module = AnsibleAWSModule( + supports_check_mode=True, + argument_spec=argument_spec, + required_one_of=[['alias', 'key_id']], + ) + + kms = module.client('kms') + + module.deprecate( + "The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned for now.", + date='2024-05-01', + collection_name='amazon.aws', + ) + + key_metadata = fetch_key_metadata(kms, module, module.params.get('key_id'), module.params.get('alias')) + validate_params(module, key_metadata) + + if module.params.get('state') == 'absent': + if key_metadata is None: + module.exit_json(changed=False) + result = delete_key(kms, module, key_metadata) + module.exit_json(**result) + + if key_metadata: + key_details = get_key_details(kms, module, key_metadata['Arn']) + result = update_key(kms, module, key_details) + module.exit_json(**result) + + result = create_key(kms, module) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py b/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py new file mode 100644 index 000000000..ba8f30a2f --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py @@ -0,0 +1,531 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: kms_key_info +version_added: 5.0.0 +short_description: Gather information about AWS KMS keys +description: + - Gather information about AWS KMS keys including tags and grants. + - Prior to release 5.0.0 this module was called C(community.aws.aws_kms_info). + The usage did not change. + - This module was originally added to C(community.aws) in release 1.0.0. +author: + - "Will Thames (@willthames)" +options: + alias: + description: + - Alias for key. + - Mutually exclusive with I(key_id) and I(filters). + required: false + aliases: + - key_alias + type: str + version_added: 1.4.0 + version_added_collection: community.aws + key_id: + description: + - Key ID or ARN of the key. + - Mutually exclusive with I(alias) and I(filters). + required: false + aliases: + - key_arn + type: str + version_added: 1.4.0 + version_added_collection: community.aws + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + The filters aren't natively supported by boto3, but are supported to provide similar + functionality to other modules. Standard tag filters (C(tag-key), C(tag-value) and + C(tag:tagName)) are available, as are C(key-id) and C(alias) + - Mutually exclusive with I(alias) and I(key_id). + type: dict + pending_deletion: + description: Whether to get full details (tags, grants etc.) of keys pending deletion. + default: False + type: bool +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all KMS keys +- amazon.aws.kms_key_info: + +# Gather information about all keys with a Name tag +- amazon.aws.kms_key_info: + filters: + tag-key: Name + +# Gather information about all keys with a specific name +- amazon.aws.kms_key_info: + filters: + "tag:Name": Example +''' + +RETURN = r''' +kms_keys: + description: List of keys. + type: complex + returned: always + contains: + key_id: + description: ID of key. + type: str + returned: always + sample: abcd1234-abcd-1234-5678-ef1234567890 + key_arn: + description: ARN of key. + type: str + returned: always + sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890 + key_state: + description: + - The state of the key. + - Will be one of C('Creating'), C('Enabled'), C('Disabled'), C('PendingDeletion'), C('PendingImport'), + C('PendingReplicaDeletion'), C('Unavailable'), or C('Updating'). + type: str + returned: always + sample: PendingDeletion + key_usage: + description: The cryptographic operations for which you can use the key. + type: str + returned: always + sample: ENCRYPT_DECRYPT + origin: + description: The source of the key's key material. When this value is C(AWS_KMS), + AWS KMS created the key material. When this value is C(EXTERNAL), the + key material was imported or the CMK lacks key material. + type: str + returned: always + sample: AWS_KMS + aws_account_id: + description: The AWS Account ID that the key belongs to. + type: str + returned: always + sample: 123456789012 + creation_date: + description: Date and time of creation of the key. + type: str + returned: always + sample: "2017-04-18T15:12:08.551000+10:00" + deletion_date: + description: Date and time after which KMS deletes this KMS key. + type: str + returned: when key_state is PendingDeletion + sample: "2017-04-18T15:12:08.551000+10:00" + version_added: 3.3.0 + version_added_collection: community.aws + description: + description: Description of the key. + type: str + returned: always + sample: "My Key for Protecting important stuff" + enabled: + description: Whether the key is enabled. True if I(key_state) is C(Enabled). + type: bool + returned: always + sample: false + enable_key_rotation: + description: Whether the automatic annual key rotation is enabled. Returns None if key rotation status can't be determined. + type: bool + returned: always + sample: false + aliases: + description: list of aliases associated with the key. + type: list + returned: always + sample: + - aws/acm + - aws/ebs + tags: + description: Dictionary of tags applied to the key. Empty when access is denied even if there are tags. + type: dict + returned: always + sample: + Name: myKey + Purpose: protecting_stuff + policies: + description: List of policy documents for the key. Empty when access is denied even if there are policies. + type: list + returned: always + elements: str + sample: + Version: "2012-10-17" + Id: "auto-ebs-2" + Statement: + - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS" + Effect: "Allow" + Principal: + AWS: "*" + Action: + - "kms:Encrypt" + - "kms:Decrypt" + - "kms:ReEncrypt*" + - "kms:GenerateDataKey*" + - "kms:CreateGrant" + - "kms:DescribeKey" + Resource: "*" + Condition: + StringEquals: + kms:CallerAccount: "123456789012" + kms:ViaService: "ec2.ap-southeast-2.amazonaws.com" + - Sid: "Allow direct access to key metadata to the account" + Effect: "Allow" + Principal: + AWS: "arn:aws:iam::123456789012:root" + Action: + - "kms:Describe*" + - "kms:Get*" + - "kms:List*" + - "kms:RevokeGrant" + Resource: "*" + key_policies: + description: List of policy documents for the key. Empty when access is denied even if there are policies. + type: list + returned: always + elements: dict + sample: + Version: "2012-10-17" + Id: "auto-ebs-2" + Statement: + - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS" + Effect: "Allow" + Principal: + AWS: "*" + Action: + - "kms:Encrypt" + - "kms:Decrypt" + - "kms:ReEncrypt*" + - "kms:GenerateDataKey*" + - "kms:CreateGrant" + - "kms:DescribeKey" + Resource: "*" + Condition: + StringEquals: + kms:CallerAccount: "123456789012" + kms:ViaService: "ec2.ap-southeast-2.amazonaws.com" + - Sid: "Allow direct access to key metadata to the account" + Effect: "Allow" + Principal: + AWS: "arn:aws:iam::123456789012:root" + Action: + - "kms:Describe*" + - "kms:Get*" + - "kms:List*" + - "kms:RevokeGrant" + Resource: "*" + version_added: 3.3.0 + version_added_collection: community.aws + grants: + description: List of grants associated with a key. + type: list + elements: dict + returned: always + contains: + constraints: + description: Constraints on the encryption context that the grant allows. + See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details + type: dict + returned: always + sample: + encryption_context_equals: + "aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:123456789012:function:xyz" + creation_date: + description: Date of creation of the grant. + type: str + returned: always + sample: "2017-04-18T15:12:08+10:00" + grant_id: + description: The unique ID for the grant. + type: str + returned: always + sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234 + grantee_principal: + description: The principal that receives the grant's permissions. + type: str + returned: always + sample: arn:aws:sts::123456789012:assumed-role/lambda_xyz/xyz + issuing_account: + description: The AWS account under which the grant was issued. + type: str + returned: always + sample: arn:aws:iam::123456789012:root + key_id: + description: The key ARN to which the grant applies. + type: str + returned: always + sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890 + name: + description: The friendly name that identifies the grant. + type: str + returned: always + sample: xyz + operations: + description: The list of operations permitted by the grant. + type: list + returned: always + sample: + - Decrypt + - RetireGrant + retiring_principal: + description: The principal that can retire the grant. + type: str + returned: always + sample: arn:aws:sts::123456789012:assumed-role/lambda_xyz/xyz +''' + +import json + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + +# Caching lookup for aliases +_aliases = dict() + + +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) +def get_kms_keys_with_backoff(connection): + paginator = connection.get_paginator('list_keys') + return paginator.paginate().build_full_result() + + +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) +def get_kms_aliases_with_backoff(connection): + paginator = connection.get_paginator('list_aliases') + return paginator.paginate().build_full_result() + + +def get_kms_aliases_lookup(connection): + if not _aliases: + for alias in get_kms_aliases_with_backoff(connection)['Aliases']: + # Not all aliases are actually associated with a key + if 'TargetKeyId' in alias: + # strip off leading 'alias/' and add it to key's aliases + if alias['TargetKeyId'] in _aliases: + _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:]) + else: + _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]] + return _aliases + + +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) +def get_kms_tags_with_backoff(connection, key_id, **kwargs): + return connection.list_resource_tags(KeyId=key_id, **kwargs) + + +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) +def get_kms_grants_with_backoff(connection, key_id, **kwargs): + params = dict(KeyId=key_id) + if kwargs.get('tokens'): + params['GrantTokens'] = kwargs['tokens'] + paginator = connection.get_paginator('list_grants') + return paginator.paginate(**params).build_full_result() + + +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) +def get_kms_metadata_with_backoff(connection, key_id): + return connection.describe_key(KeyId=key_id) + + +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) +def list_key_policies_with_backoff(connection, key_id): + paginator = connection.get_paginator('list_key_policies') + return paginator.paginate(KeyId=key_id).build_full_result() + + +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) +def get_key_policy_with_backoff(connection, key_id, policy_name): + return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name) + + +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) +def get_enable_key_rotation_with_backoff(connection, key_id): + try: + current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) + except is_boto3_error_code(['AccessDeniedException', 'UnsupportedOperationException']) as e: + return None + + return current_rotation_status.get('KeyRotationEnabled') + + +def canonicalize_alias_name(alias): + if alias is None: + return None + if alias.startswith('alias/'): + return alias + return 'alias/' + alias + + +def get_kms_tags(connection, module, key_id): + # Handle pagination here as list_resource_tags does not have + # a paginator + kwargs = {} + tags = [] + more = True + while more: + try: + tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs) + tags.extend(tag_response['Tags']) + except is_boto3_error_code('AccessDeniedException'): + tag_response = {} + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to obtain key tags") + if tag_response.get('NextMarker'): + kwargs['Marker'] = tag_response['NextMarker'] + else: + more = False + return tags + + +def get_kms_policies(connection, module, key_id): + try: + policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames'] + return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for + policy in policies] + except is_boto3_error_code('AccessDeniedException'): + return [] + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to obtain key policies") + + +def key_matches_filter(key, filtr): + if filtr[0] == 'key-id': + return filtr[1] == key['key_id'] + if filtr[0] == 'tag-key': + return filtr[1] in key['tags'] + if filtr[0] == 'tag-value': + return filtr[1] in key['tags'].values() + if filtr[0] == 'alias': + return filtr[1] in key['aliases'] + if filtr[0].startswith('tag:'): + tag_key = filtr[0][4:] + if tag_key not in key['tags']: + return False + return key['tags'].get(tag_key) == filtr[1] + + +def key_matches_filters(key, filters): + if not filters: + return True + else: + return all(key_matches_filter(key, filtr) for filtr in filters.items()) + + +def get_key_details(connection, module, key_id, tokens=None): + if not tokens: + tokens = [] + try: + result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] + # Make sure we have the canonical ARN, we might have been passed an alias + key_id = result['Arn'] + except is_boto3_error_code('NotFoundException'): + return None + except is_boto3_error_code('AccessDeniedException'): # pylint: disable=duplicate-except + module.warn('Permission denied fetching key metadata ({0})'.format(key_id)) + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to obtain key metadata") + result['KeyArn'] = result.pop('Arn') + + try: + aliases = get_kms_aliases_lookup(connection) + except is_boto3_error_code('AccessDeniedException'): + module.warn('Permission denied fetching key aliases') + aliases = {} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to obtain aliases") + # We can only get aliases for our own account, so we don't need the full ARN + result['aliases'] = aliases.get(result['KeyId'], []) + result['enable_key_rotation'] = get_enable_key_rotation_with_backoff(connection, key_id) + + if module.params.get('pending_deletion'): + return camel_dict_to_snake_dict(result) + + try: + result['grants'] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)['Grants'] + except is_boto3_error_code('AccessDeniedException'): + module.warn('Permission denied fetching key grants ({0})'.format(key_id)) + result['grants'] = [] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to obtain key grants") + + tags = get_kms_tags(connection, module, key_id) + + result = camel_dict_to_snake_dict(result) + result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue') + result['policies'] = get_kms_policies(connection, module, key_id) + result['key_policies'] = [json.loads(policy) for policy in result['policies']] + return result + + +def get_kms_info(connection, module): + if module.params.get('key_id'): + key_id = module.params.get('key_id') + details = get_key_details(connection, module, key_id) + if details: + return [details] + return [] + elif module.params.get('alias'): + alias = canonicalize_alias_name(module.params.get('alias')) + details = get_key_details(connection, module, alias) + if details: + return [details] + return [] + else: + try: + keys = get_kms_keys_with_backoff(connection)['Keys'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to obtain keys") + return [get_key_details(connection, module, key['KeyId']) for key in keys] + + +def main(): + argument_spec = dict( + alias=dict(aliases=['key_alias']), + key_id=dict(aliases=['key_arn']), + filters=dict(type='dict'), + pending_deletion=dict(type='bool', default=False), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, + mutually_exclusive=[['alias', 'filters', 'key_id']], + supports_check_mode=True) + + try: + connection = module.client('kms') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + module.deprecate("The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned for now.", + date='2024-05-01', collection_name='amazon.aws') + + all_keys = get_kms_info(connection, module) + filtered_keys = [key for key in all_keys if key_matches_filters(key, module.params['filters'])] + ret_params = dict(kms_keys=filtered_keys) + + module.exit_json(**ret_params) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda.py b/ansible_collections/amazon/aws/plugins/modules/lambda.py new file mode 100644 index 000000000..2c46a7ef5 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/lambda.py @@ -0,0 +1,911 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: lambda +version_added: 5.0.0 +short_description: Manage AWS Lambda functions +description: + - Allows for the management of Lambda functions. + - This module was originally added to C(community.aws) in release 1.0.0. +options: + name: + description: + - The name you want to assign to the function you are uploading. Cannot be changed. + required: true + type: str + state: + description: + - Create or delete Lambda function. + default: present + choices: [ 'present', 'absent' ] + type: str + runtime: + description: + - The runtime environment for the Lambda function you are uploading. + - Required when creating a function. Uses parameters as described in boto3 docs. + - Required when I(state=present). + - For supported list of runtimes, see U(https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html). + type: str + role: + description: + - The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) + resources. You may use the bare ARN if the role belongs to the same AWS account. + - Required when I(state=present). + type: str + handler: + description: + - The function within your code that Lambda calls to begin execution. + type: str + zip_file: + description: + - A .zip file containing your deployment package + - If I(state=present) then either I(zip_file) or I(s3_bucket) must be present. + aliases: [ 'src' ] + type: str + s3_bucket: + description: + - Amazon S3 bucket name where the .zip file containing your deployment package is stored. + - If I(state=present) then either I(zip_file) or I(s3_bucket) must be present. + - I(s3_bucket) and I(s3_key) are required together. + type: str + s3_key: + description: + - The Amazon S3 object (the deployment package) key name you want to upload. + - I(s3_bucket) and I(s3_key) are required together. + type: str + s3_object_version: + description: + - The Amazon S3 object (the deployment package) version you want to upload. + type: str + description: + description: + - A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit. + type: str + default: '' + timeout: + description: + - The function maximum execution time in seconds after which Lambda should terminate the function. + default: 3 + type: int + memory_size: + description: + - The amount of memory, in MB, your Lambda function is given. + default: 128 + type: int + vpc_subnet_ids: + description: + - List of subnet IDs to run Lambda function in. + - Use this option if you need to access resources in your VPC. Leave empty if you don't want to run the function in a VPC. + - If set, I(vpc_security_group_ids) must also be set. + type: list + elements: str + vpc_security_group_ids: + description: + - List of VPC security group IDs to associate with the Lambda function. + - Required when I(vpc_subnet_ids) is used. + type: list + elements: str + environment_variables: + description: + - A dictionary of environment variables the Lambda function is given. + type: dict + dead_letter_arn: + description: + - The parent object that contains the target Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic. + type: str + tracing_mode: + description: + - Set mode to 'Active' to sample and trace incoming requests with AWS X-Ray. Turned off (set to 'PassThrough') by default. + choices: ['Active', 'PassThrough'] + type: str + kms_key_arn: + description: + - The KMS key ARN used to encrypt the function's environment variables. + type: str + version_added: 3.3.0 + version_added_collection: community.aws + architecture: + description: + - The instruction set architecture that the function supports. + - Requires one of I(s3_bucket) or I(zip_file). + - Requires botocore >= 1.21.51. + type: str + choices: ['x86_64', 'arm64'] + aliases: ['architectures'] + version_added: 5.0.0 + layers: + description: + - A list of function layers to add to the function's execution environment. + - Specify each layer by its ARN, including the version. + suboptions: + layer_version_arn: + description: + - The ARN of the layer version. + - Mutually exclusive with I(layer_version_arn). + type: str + layer_name: + description: + - The name or Amazon Resource Name (ARN) of the layer. + - Mutually exclusive with I(layer_version_arn). + type: str + aliases: ['layer_arn'] + version: + description: + - The version number. + - Required when I(layer_name) is provided, ignored if not. + type: int + aliases: ['layer_version'] + type: list + elements: dict + version_added: 5.5.0 +author: + - 'Steyn Huizinga (@steynovich)' +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +''' + +EXAMPLES = r''' +# Create Lambda functions +- name: looped creation + amazon.aws.lambda: + name: '{{ item.name }}' + state: present + zip_file: '{{ item.zip_file }}' + runtime: 'python2.7' + role: 'arn:aws:iam::123456789012:role/lambda_basic_execution' + handler: 'hello_python.my_handler' + vpc_subnet_ids: + - subnet-123abcde + - subnet-edcba321 + vpc_security_group_ids: + - sg-123abcde + - sg-edcba321 + environment_variables: '{{ item.env_vars }}' + tags: + key1: 'value1' + loop: + - name: HelloWorld + zip_file: hello-code.zip + env_vars: + key1: "first" + key2: "second" + - name: ByeBye + zip_file: bye-code.zip + env_vars: + key1: "1" + key2: "2" + +# To remove previously added tags pass an empty dict +- name: remove tags + amazon.aws.lambda: + name: 'Lambda function' + state: present + zip_file: 'code.zip' + runtime: 'python2.7' + role: 'arn:aws:iam::123456789012:role/lambda_basic_execution' + handler: 'hello_python.my_handler' + tags: {} + +# Basic Lambda function deletion +- name: Delete Lambda functions HelloWorld and ByeBye + amazon.aws.lambda: + name: '{{ item }}' + state: absent + loop: + - HelloWorld + - ByeBye + +# Create Lambda functions with function layers +- name: looped creation + amazon.aws.lambda: + name: 'HelloWorld' + state: present + zip_file: 'hello-code.zip' + runtime: 'python2.7' + role: 'arn:aws:iam::123456789012:role/lambda_basic_execution' + handler: 'hello_python.my_handler' + layers: + - layer_version_arn: 'arn:aws:lambda:us-east-1:123456789012:layer:python27-env:7' +''' + +RETURN = r''' +code: + description: The lambda function's code returned by get_function in boto3. + returned: success + type: dict + contains: + location: + description: + - The presigned URL you can use to download the function's .zip file that you previously uploaded. + - The URL is valid for up to 10 minutes. + returned: success + type: str + sample: 'https://prod-04-2014-tasks.s3.us-east-1.amazonaws.com/snapshots/sample' + repository_type: + description: The repository from which you can download the function. + returned: success + type: str + sample: 'S3' +configuration: + description: the lambda function's configuration metadata returned by get_function in boto3 + returned: success + type: dict + contains: + architectures: + description: The architectures supported by the function. + returned: successful run where botocore >= 1.21.51 + type: list + elements: str + sample: ['arm64'] + code_sha256: + description: The SHA256 hash of the function's deployment package. + returned: success + type: str + sample: 'zOAGfF5JLFuzZoSNirUtOrQp+S341IOA3BcoXXoaIaU=' + code_size: + description: The size of the function's deployment package in bytes. + returned: success + type: int + sample: 123 + dead_letter_config: + description: The function's dead letter queue. + returned: when the function has a dead letter queue configured + type: dict + sample: { 'target_arn': arn:aws:lambda:us-east-1:123456789012:function:myFunction:1 } + contains: + target_arn: + description: The ARN of an SQS queue or SNS topic. + returned: when the function has a dead letter queue configured + type: str + sample: arn:aws:lambda:us-east-1:123456789012:function:myFunction:1 + description: + description: The function's description. + returned: success + type: str + sample: 'My function' + environment: + description: The function's environment variables. + returned: when environment variables exist + type: dict + contains: + variables: + description: Environment variable key-value pairs. + returned: when environment variables exist + type: dict + sample: {'key': 'value'} + error: + description: Error message for environment variables that could not be applied. + returned: when there is an error applying environment variables + type: dict + contains: + error_code: + description: The error code. + returned: when there is an error applying environment variables + type: str + message: + description: The error message. + returned: when there is an error applying environment variables + type: str + function_arn: + description: The function's Amazon Resource Name (ARN). + returned: on success + type: str + sample: 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1' + function_name: + description: The function's name. + returned: on success + type: str + sample: 'myFunction' + handler: + description: The function Lambda calls to begin executing your function. + returned: on success + type: str + sample: 'index.handler' + last_modified: + description: The date and time that the function was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ssTZD). + returned: on success + type: str + sample: '2017-08-01T00:00:00.000+0000' + memory_size: + description: The memory allocated to the function. + returned: on success + type: int + sample: 128 + revision_id: + description: The latest updated revision of the function or alias. + returned: on success + type: str + sample: 'a2x9886d-d48a-4a0c-ab64-82abc005x80c' + role: + description: The function's execution role. + returned: on success + type: str + sample: 'arn:aws:iam::123456789012:role/lambda_basic_execution' + runtime: + description: The funtime environment for the Lambda function. + returned: on success + type: str + sample: 'nodejs6.10' + tracing_config: + description: The function's AWS X-Ray tracing configuration. + returned: on success + type: dict + sample: { 'mode': 'Active' } + contains: + mode: + description: The tracing mode. + returned: on success + type: str + sample: 'Active' + timeout: + description: The amount of time that Lambda allows a function to run before terminating it. + returned: on success + type: int + sample: 3 + version: + description: The version of the Lambda function. + returned: on success + type: str + sample: '1' + vpc_config: + description: The function's networking configuration. + returned: on success + type: dict + sample: { + 'security_group_ids': [], + 'subnet_ids': [], + 'vpc_id': '123' + } + layers: + description: The function's layers. + returned: on success + version_added: 5.5.0 + type: complex + contains: + arn: + description: The Amazon Resource Name (ARN) of the function layer. + returned: always + type: str + sample: active + code_size: + description: The size of the layer archive in bytes. + returned: always + type: str + signing_profile_version_arn: + description: The Amazon Resource Name (ARN) for a signing profile version. + returned: always + type: str + signing_job_arn: + description: The Amazon Resource Name (ARN) of a signing job. + returned: always + type: str +''' + +import base64 +import hashlib +import traceback +import re +from collections import Counter + +try: + from botocore.exceptions import ClientError, BotoCoreError, WaiterError +except ImportError: + pass # protected by AnsibleAWSModule + +from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags + + +def get_account_info(module): + """return the account information (account id and partition) we are currently working on + + get_account_info tries too find out the account that we are working + on. It's not guaranteed that this will be easy so we try in + several different ways. Giving either IAM or STS privileges to + the account should be enough to permit this. + """ + account_id = None + partition = None + try: + sts_client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff()) + caller_id = sts_client.get_caller_identity(aws_retry=True) + account_id = caller_id.get('Account') + partition = caller_id.get('Arn').split(':')[1] + except (BotoCoreError, ClientError): + try: + iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + arn, partition, service, reg, account_id, resource = iam_client.get_user(aws_retry=True)['User']['Arn'].split(':') + except is_boto3_error_code('AccessDenied') as e: + try: + except_msg = to_native(e.message) + except AttributeError: + except_msg = to_native(e) + m = re.search(r"arn:(aws(-([a-z\-]+))?):iam::([0-9]{12,32}):\w+/", except_msg) + if m is None: + module.fail_json_aws(e, msg="getting account information") + account_id = m.group(4) + partition = m.group(1) + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="getting account information") + + return account_id, partition + + +def get_current_function(connection, function_name, qualifier=None): + try: + if qualifier is not None: + return connection.get_function(FunctionName=function_name, Qualifier=qualifier, aws_retry=True) + return connection.get_function(FunctionName=function_name, aws_retry=True) + except is_boto3_error_code('ResourceNotFoundException'): + return None + + +def get_layer_version_arn(module, connection, layer_name, version_number): + try: + layer_versions = connection.list_layer_versions(LayerName=layer_name, aws_retry=True)['LayerVersions'] + for v in layer_versions: + if v["Version"] == version_number: + return v["LayerVersionArn"] + module.fail_json(msg='Unable to find version {0} from Lambda layer {1}'.format(version_number, layer_name)) + except is_boto3_error_code('ResourceNotFoundException'): + module.fail_json(msg='Lambda layer {0} not found'.format(layer_name)) + + +def sha256sum(filename): + hasher = hashlib.sha256() + with open(filename, 'rb') as f: + hasher.update(f.read()) + + code_hash = hasher.digest() + code_b64 = base64.b64encode(code_hash) + hex_digest = code_b64.decode('utf-8') + + return hex_digest + + +def set_tag(client, module, tags, function, purge_tags): + + if tags is None: + return False + + changed = False + arn = function['Configuration']['FunctionArn'] + + try: + current_tags = client.list_tags(Resource=arn, aws_retry=True).get('Tags', {}) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to list tags") + + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags) + + if not tags_to_remove and not tags_to_add: + return False + + if module.check_mode: + return True + + try: + if tags_to_remove: + client.untag_resource( + Resource=arn, + TagKeys=tags_to_remove, + aws_retry=True + ) + changed = True + + if tags_to_add: + client.tag_resource( + Resource=arn, + Tags=tags_to_add, + aws_retry=True + ) + changed = True + + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to tag resource {0}".format(arn)) + + return changed + + +def wait_for_lambda(client, module, name): + try: + client_active_waiter = client.get_waiter('function_active') + client_updated_waiter = client.get_waiter('function_updated') + client_active_waiter.wait(FunctionName=name) + client_updated_waiter.wait(FunctionName=name) + except WaiterError as e: + module.fail_json_aws(e, msg='Timeout while waiting on lambda to finish updating') + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed while waiting on lambda to finish updating') + + +def format_response(response): + tags = response.get("Tags", {}) + result = camel_dict_to_snake_dict(response) + # Lambda returns a dict rather than the usual boto3 list of dicts + result["tags"] = tags + return result + + +def _zip_args(zip_file, current_config, ignore_checksum): + if not zip_file: + return {} + + # If there's another change that needs to happen, we always re-upload the code + if not ignore_checksum: + local_checksum = sha256sum(zip_file) + remote_checksum = current_config.get('CodeSha256', '') + if local_checksum == remote_checksum: + return {} + + with open(zip_file, 'rb') as f: + zip_content = f.read() + return {'ZipFile': zip_content} + + +def _s3_args(s3_bucket, s3_key, s3_object_version): + if not s3_bucket: + return {} + if not s3_key: + return {} + + code = {'S3Bucket': s3_bucket, + 'S3Key': s3_key} + if s3_object_version: + code.update({'S3ObjectVersion': s3_object_version}) + + return code + + +def _code_args(module, current_config): + s3_bucket = module.params.get('s3_bucket') + s3_key = module.params.get('s3_key') + s3_object_version = module.params.get('s3_object_version') + zip_file = module.params.get('zip_file') + architectures = module.params.get('architecture') + checksum_match = False + + code_kwargs = {} + + if architectures and current_config.get('Architectures', None) != [architectures]: + module.warn('Arch Change') + code_kwargs.update({'Architectures': [architectures]}) + + try: + code_kwargs.update(_zip_args(zip_file, current_config, bool(code_kwargs))) + except IOError as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + + code_kwargs.update(_s3_args(s3_bucket, s3_key, s3_object_version)) + + if not code_kwargs: + return {} + + if not architectures and current_config.get('Architectures', None): + code_kwargs.update({'Architectures': current_config.get('Architectures', None)}) + + return code_kwargs + + +def main(): + argument_spec = dict( + name=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + runtime=dict(), + role=dict(), + handler=dict(), + zip_file=dict(aliases=['src']), + s3_bucket=dict(), + s3_key=dict(no_log=False), + s3_object_version=dict(), + description=dict(default=''), + timeout=dict(type='int', default=3), + memory_size=dict(type='int', default=128), + vpc_subnet_ids=dict(type='list', elements='str'), + vpc_security_group_ids=dict(type='list', elements='str'), + environment_variables=dict(type='dict'), + dead_letter_arn=dict(), + kms_key_arn=dict(type='str', no_log=False), + tracing_mode=dict(choices=['Active', 'PassThrough']), + architecture=dict(choices=['x86_64', 'arm64'], type='str', aliases=['architectures']), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + layers=dict( + type='list', + elements='dict', + options=dict( + layer_version_arn=dict(type='str'), + layer_name=dict(type='str', aliases=['layer_arn']), + version=dict(type='int', aliases=['layer_version']), + ), + required_together=[['layer_name', 'version']], + required_one_of=[['layer_version_arn', 'layer_name']], + mutually_exclusive=[ + ['layer_name', 'layer_version_arn'], + ['version', 'layer_version_arn'] + ], + ), + ) + + mutually_exclusive = [['zip_file', 's3_key'], + ['zip_file', 's3_bucket'], + ['zip_file', 's3_object_version']] + + required_together = [['s3_key', 's3_bucket'], + ['vpc_subnet_ids', 'vpc_security_group_ids']] + + required_if = [ + ['state', 'present', ['runtime', 'handler', 'role']], + ['architecture', 'x86_64', ['zip_file', 's3_bucket'], True], + ['architecture', 'arm64', ['zip_file', 's3_bucket'], True], + ] + + module = AnsibleAWSModule(argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_if=required_if) + + name = module.params.get('name') + state = module.params.get('state').lower() + runtime = module.params.get('runtime') + role = module.params.get('role') + handler = module.params.get('handler') + s3_bucket = module.params.get('s3_bucket') + s3_key = module.params.get('s3_key') + s3_object_version = module.params.get('s3_object_version') + zip_file = module.params.get('zip_file') + description = module.params.get('description') + timeout = module.params.get('timeout') + memory_size = module.params.get('memory_size') + vpc_subnet_ids = module.params.get('vpc_subnet_ids') + vpc_security_group_ids = module.params.get('vpc_security_group_ids') + environment_variables = module.params.get('environment_variables') + dead_letter_arn = module.params.get('dead_letter_arn') + tracing_mode = module.params.get('tracing_mode') + tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') + kms_key_arn = module.params.get('kms_key_arn') + architectures = module.params.get('architecture') + layers = [] + + check_mode = module.check_mode + changed = False + + if architectures: + module.require_botocore_at_least( + '1.21.51', reason='to configure the architectures that the function supports.') + + try: + client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg="Trying to connect to AWS") + + if state == 'present': + if re.match(r'^arn:aws(-([a-z\-]+))?:iam', role): + role_arn = role + else: + # get account ID and assemble ARN + account_id, partition = get_account_info(module) + role_arn = 'arn:{0}:iam::{1}:role/{2}'.format(partition, account_id, role) + + # create list of layer version arn + if module.params.get("layers"): + for layer in module.params.get("layers"): + layer_version_arn = layer.get("layer_version_arn") + if layer_version_arn is None: + layer_version_arn = get_layer_version_arn(module, client, layer.get("layer_name"), layer.get("version")) + layers.append(layer_version_arn) + + # Get function configuration if present, False otherwise + current_function = get_current_function(client, name) + + # Update existing Lambda function + if state == 'present' and current_function: + + # Get current state + current_config = current_function['Configuration'] + current_version = None + + # Update function configuration + func_kwargs = {'FunctionName': name} + + # Update configuration if needed + if role_arn and current_config['Role'] != role_arn: + func_kwargs.update({'Role': role_arn}) + if handler and current_config['Handler'] != handler: + func_kwargs.update({'Handler': handler}) + if description and current_config['Description'] != description: + func_kwargs.update({'Description': description}) + if timeout and current_config['Timeout'] != timeout: + func_kwargs.update({'Timeout': timeout}) + if memory_size and current_config['MemorySize'] != memory_size: + func_kwargs.update({'MemorySize': memory_size}) + if runtime and current_config['Runtime'] != runtime: + func_kwargs.update({'Runtime': runtime}) + if (environment_variables is not None) and (current_config.get( + 'Environment', {}).get('Variables', {}) != environment_variables): + func_kwargs.update({'Environment': {'Variables': environment_variables}}) + if dead_letter_arn is not None: + if current_config.get('DeadLetterConfig'): + if current_config['DeadLetterConfig']['TargetArn'] != dead_letter_arn: + func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) + else: + if dead_letter_arn != "": + func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) + if tracing_mode and (current_config.get('TracingConfig', {}).get('Mode', 'PassThrough') != tracing_mode): + func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}}) + if kms_key_arn: + func_kwargs.update({'KMSKeyArn': kms_key_arn}) + + # If VPC configuration is desired + if vpc_subnet_ids: + + if 'VpcConfig' in current_config: + # Compare VPC config with current config + current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds'] + current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds'] + + subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids) + vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids) + + if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed: + new_vpc_config = {'SubnetIds': vpc_subnet_ids, + 'SecurityGroupIds': vpc_security_group_ids} + func_kwargs.update({'VpcConfig': new_vpc_config}) + else: + # No VPC configuration is desired, assure VPC config is empty when present in current config + if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'): + func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}}) + + # Check layers + if layers: + # compare two lists to see if the target layers are equal to the current + current_layers = current_config.get('Layers', []) + if Counter(layers) != Counter((f['Arn'] for f in current_layers)): + func_kwargs.update({'Layers': layers}) + + # Upload new configuration if configuration has changed + if len(func_kwargs) > 1: + if not check_mode: + wait_for_lambda(client, module, name) + + try: + if not check_mode: + response = client.update_function_configuration(aws_retry=True, **func_kwargs) + current_version = response['Version'] + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Trying to update lambda configuration") + + # Tag Function + if tags is not None: + if set_tag(client, module, tags, current_function, purge_tags): + changed = True + + code_kwargs = _code_args(module, current_config) + if code_kwargs: + + # Update code configuration + code_kwargs.update({'FunctionName': name, 'Publish': True}) + + if not check_mode: + wait_for_lambda(client, module, name) + + try: + if not check_mode: + response = client.update_function_code(aws_retry=True, **code_kwargs) + current_version = response['Version'] + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Trying to upload new code") + + # Describe function code and configuration + response = get_current_function(client, name, qualifier=current_version) + if not response: + module.fail_json(msg='Unable to get function information after updating') + response = format_response(response) + # We're done + module.exit_json(changed=changed, code_kwargs=code_kwargs, func_kwargs=func_kwargs, **response) + + # Function doesn't exists, create new Lambda function + elif state == 'present': + + func_kwargs = {'FunctionName': name, + 'Publish': True, + 'Runtime': runtime, + 'Role': role_arn, + 'Timeout': timeout, + 'MemorySize': memory_size, + } + + code = _code_args(module, {}) + if not code: + module.fail_json(msg='Either S3 object or path to zipfile required') + if 'Architectures' in code: + func_kwargs.update({'Architectures': code.pop('Architectures')}) + func_kwargs.update({'Code': code}) + + if description is not None: + func_kwargs.update({'Description': description}) + + if handler is not None: + func_kwargs.update({'Handler': handler}) + + if environment_variables: + func_kwargs.update({'Environment': {'Variables': environment_variables}}) + + if dead_letter_arn: + func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) + + if tracing_mode: + func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}}) + + if kms_key_arn: + func_kwargs.update({'KMSKeyArn': kms_key_arn}) + + # If VPC configuration is given + if vpc_subnet_ids: + func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids, + 'SecurityGroupIds': vpc_security_group_ids}}) + + # Layers + if layers: + func_kwargs.update({'Layers': layers}) + + # Tag Function + if tags: + func_kwargs.update({'Tags': tags}) + + # Function would have been created if not check mode + if check_mode: + module.exit_json(changed=True) + + # Finally try to create function + current_version = None + try: + response = client.create_function(aws_retry=True, **func_kwargs) + current_version = response['Version'] + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Trying to create function") + + response = get_current_function(client, name, qualifier=current_version) + if not response: + module.fail_json(msg='Unable to get function information after creating') + response = format_response(response) + module.exit_json(changed=changed, **response) + + # Delete existing Lambda function + if state == 'absent' and current_function: + try: + if not check_mode: + client.delete_function(FunctionName=name, aws_retry=True) + changed = True + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Trying to delete Lambda function") + + module.exit_json(changed=changed) + + # Function already absent, do nothing + elif state == 'absent': + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_alias.py b/ansible_collections/amazon/aws/plugins/modules/lambda_alias.py new file mode 100644 index 000000000..e2dd776d6 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_alias.py @@ -0,0 +1,331 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: lambda_alias +version_added: 5.0.0 +short_description: Creates, updates or deletes AWS Lambda function aliases +description: + - This module allows the management of AWS Lambda functions aliases via the Ansible + framework. It is idempotent and supports "Check" mode. Use module M(amazon.aws.lambda) to manage the lambda function + itself and M(amazon.aws.lambda_event) to manage event source mappings. + - This module was originally added to C(community.aws) in release 1.0.0. + +author: + - Pierre Jodouin (@pjodouin) + - Ryan Scott Brown (@ryansb) +options: + function_name: + description: + - The name of the function alias. + required: true + type: str + state: + description: + - Describes the desired state. + default: "present" + choices: ["present", "absent"] + type: str + name: + description: + - Name of the function alias. + required: true + aliases: ['alias_name'] + type: str + description: + description: + - A short, user-defined function alias description. + type: str + function_version: + description: + - Version associated with the Lambda function alias. + A value of 0 (or omitted parameter) sets the alias to the $LATEST version. + aliases: ['version'] + type: int + default: 0 +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 + +''' + +EXAMPLES = ''' +--- +# Simple example to create a lambda function and publish a version +- hosts: localhost + gather_facts: false + vars: + state: present + project_folder: /path/to/deployment/package + deployment_package: lambda.zip + account: 123456789012 + production_version: 5 + tasks: + - name: AWS Lambda Function + amazon.aws.lambda: + state: "{{ state | default('present') }}" + name: myLambdaFunction + publish: True + description: lambda function description + code_s3_bucket: package-bucket + code_s3_key: "lambda/{{ deployment_package }}" + local_path: "{{ project_folder }}/{{ deployment_package }}" + runtime: python2.7 + timeout: 5 + handler: lambda.handler + memory_size: 128 + role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole" + + - name: Get information + amazon.aws.lambda_info: + name: myLambdaFunction + register: lambda_info + - name: show results + ansible.builtin.debug: + msg: "{{ lambda_info['lambda_facts'] }}" + +# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0) + - name: "alias 'Dev' for function {{ lambda_info.lambda_facts.FunctionName }} " + amazon.aws.lambda_alias: + state: "{{ state | default('present') }}" + function_name: "{{ lambda_info.lambda_facts.FunctionName }}" + name: Dev + description: Development is $LATEST version + +# The QA alias will only be created when a new version is published (i.e. not = '$LATEST') + - name: "alias 'QA' for function {{ lambda_info.lambda_facts.FunctionName }} " + amazon.aws.lambda_alias: + state: "{{ state | default('present') }}" + function_name: "{{ lambda_info.lambda_facts.FunctionName }}" + name: QA + version: "{{ lambda_info.lambda_facts.Version }}" + description: "QA is version {{ lambda_info.lambda_facts.Version }}" + when: lambda_info.lambda_facts.Version != "$LATEST" + +# The Prod alias will have a fixed version based on a variable + - name: "alias 'Prod' for function {{ lambda_info.lambda_facts.FunctionName }} " + amazon.aws.lambda_alias: + state: "{{ state | default('present') }}" + function_name: "{{ lambda_info.lambda_facts.FunctionName }}" + name: Prod + version: "{{ production_version }}" + description: "Production is version {{ production_version }}" +''' + +RETURN = ''' +--- +alias_arn: + description: Full ARN of the function, including the alias + returned: success + type: str + sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev +description: + description: A short description of the alias + returned: success + type: str + sample: The development stage for my hot new app +function_version: + description: The qualifier that the alias refers to + returned: success + type: str + sample: $LATEST +name: + description: The name of the alias assigned + returned: success + type: str + sample: dev +revision_id: + description: A unique identifier that changes when you update the alias. + returned: success + type: str + sample: 12345678-1234-1234-1234-123456789abc +''' + +import re + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +def set_api_params(module, module_params): + """ + Sets non-None module parameters to those expected by the boto3 API. + + :param module: + :param module_params: + :return: + """ + + api_params = dict() + + for param in module_params: + module_param = module.params.get(param, None) + if module_param: + api_params[param] = module_param + + return snake_dict_to_camel_dict(api_params, capitalize_first=True) + + +def validate_params(module): + """ + Performs basic parameter validation. + + :param module: AnsibleAWSModule reference + :return: + """ + + function_name = module.params['function_name'] + + # validate function name + if not re.search(r'^[\w\-:]+$', function_name): + module.fail_json( + msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) + ) + if len(function_name) > 64: + module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + + # if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string + if module.params['function_version'] == 0: + module.params['function_version'] = '$LATEST' + else: + module.params['function_version'] = str(module.params['function_version']) + + return + + +def get_lambda_alias(module, client): + """ + Returns the lambda function alias if it exists. + + :param module: AnsibleAWSModule + :param client: (wrapped) boto3 lambda client + :return: + """ + + # set API parameters + api_params = set_api_params(module, ('function_name', 'name')) + + # check if alias exists and get facts + try: + results = client.get_alias(aws_retry=True, **api_params) + except is_boto3_error_code('ResourceNotFoundException'): + results = None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Error retrieving function alias') + + return results + + +def lambda_alias(module, client): + """ + Adds, updates or deletes lambda function aliases. + + :param module: AnsibleAWSModule + :param client: (wrapped) boto3 lambda client + :return dict: + """ + results = dict() + changed = False + current_state = 'absent' + state = module.params['state'] + + facts = get_lambda_alias(module, client) + if facts: + current_state = 'present' + + if state == 'present': + if current_state == 'present': + snake_facts = camel_dict_to_snake_dict(facts) + + # check if alias has changed -- only version and description can change + alias_params = ('function_version', 'description') + for param in alias_params: + if module.params.get(param) is None: + continue + if module.params.get(param) != snake_facts.get(param): + changed = True + break + + if changed: + api_params = set_api_params(module, ('function_name', 'name')) + api_params.update(set_api_params(module, alias_params)) + + if not module.check_mode: + try: + results = client.update_alias(aws_retry=True, **api_params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Error updating function alias') + + else: + # create new function alias + api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description')) + + try: + if not module.check_mode: + results = client.create_alias(aws_retry=True, **api_params) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Error creating function alias') + + else: # state = 'absent' + if current_state == 'present': + # delete the function + api_params = set_api_params(module, ('function_name', 'name')) + + try: + if not module.check_mode: + results = client.delete_alias(aws_retry=True, **api_params) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Error deleting function alias') + + return dict(changed=changed, **dict(results or facts or {})) + + +def main(): + """ + Main entry point. + + :return dict: ansible facts + """ + argument_spec = dict( + state=dict(required=False, default='present', choices=['present', 'absent']), + function_name=dict(required=True), + name=dict(required=True, aliases=['alias_name']), + function_version=dict(type='int', required=False, default=0, aliases=['version']), + description=dict(required=False, default=None), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[], + required_together=[], + ) + + client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) + + validate_params(module) + results = lambda_alias(module, client) + + module.exit_json(**camel_dict_to_snake_dict(results)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_event.py b/ansible_collections/amazon/aws/plugins/modules/lambda_event.py new file mode 100644 index 000000000..c6e63c4d8 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_event.py @@ -0,0 +1,457 @@ +#!/usr/bin/python +# (c) 2016, Pierre Jodouin +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: lambda_event +version_added: 5.0.0 +short_description: Creates, updates or deletes AWS Lambda function event mappings +description: + - This module allows the management of AWS Lambda function event source mappings such as DynamoDB and Kinesis stream + events via the Ansible framework. These event source mappings are relevant only in the AWS Lambda pull model, where + AWS Lambda invokes the function. + It is idempotent and supports "Check" mode. Use module M(amazon.aws.lambda) to manage the lambda + function itself and M(amazon.aws.lambda_alias) to manage function aliases. + - This module was originally added to C(community.aws) in release 1.0.0. + +author: + - Pierre Jodouin (@pjodouin) + - Ryan Brown (@ryansb) +options: + lambda_function_arn: + description: + - The name or ARN of the lambda function. + required: true + aliases: ['function_name', 'function_arn'] + type: str + state: + description: + - Describes the desired state. + default: "present" + choices: ["present", "absent"] + type: str + alias: + description: + - Name of the function alias. + - Mutually exclusive with I(version). + type: str + version: + description: + - Version of the Lambda function. + - Mutually exclusive with I(alias). + type: int + default: 0 + event_source: + description: + - Source of the event that triggers the lambda function. + - For DynamoDB and Kinesis events, select C(stream) + - For SQS queues, select C(sqs) + default: stream + choices: ['stream', 'sqs'] + type: str + source_params: + description: + - Sub-parameters required for event source. + suboptions: + source_arn: + description: + - The Amazon Resource Name (ARN) of the SQS queue, Kinesis stream or DynamoDB stream that is the event source. + type: str + required: true + enabled: + description: + - Indicates whether AWS Lambda should begin polling or readin from the event source. + default: true + type: bool + batch_size: + description: + - The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. + default: 100 + type: int + starting_position: + description: + - The position in the stream where AWS Lambda should start reading. + - Required when I(event_source=stream). + choices: [TRIM_HORIZON,LATEST] + type: str + function_response_types: + description: + - (Streams and Amazon SQS) A list of current response type enums applied to the event source mapping. + type: list + elements: str + choices: [ReportBatchItemFailures] + version_added: 5.5.0 + required: true + type: dict +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 + +''' + +EXAMPLES = ''' +# Example that creates a lambda event notification for a DynamoDB stream +- name: DynamoDB stream event mapping + amazon.aws.lambda_event: + state: present + event_source: stream + function_name: "{{ function_name }}" + alias: Dev + source_params: + source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457 + enabled: True + batch_size: 100 + starting_position: TRIM_HORIZON + register: event + +# Example that creates a lambda event notification for a DynamoDB stream +- name: DynamoDB stream event mapping + amazon.aws.lambda_event: + state: present + event_source: stream + function_name: "{{ function_name }}" + source_params: + source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457 + enabled: True + batch_size: 100 + starting_position: LATEST + function_response_types: + - ReportBatchItemFailures + register: event + +- name: Show source event + ansible.builtin.debug: + var: event.lambda_stream_events +''' + +RETURN = ''' +--- +lambda_stream_events: + description: list of dictionaries returned by the API describing stream event mappings + returned: success + type: list +''' + +import re + +try: + from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info + + +# --------------------------------------------------------------------------------------------------- +# +# Helper Functions & classes +# +# --------------------------------------------------------------------------------------------------- + + +class AWSConnection: + """ + Create the connection object and client objects as required. + """ + + def __init__(self, ansible_obj, resources, use_boto3=True): + + try: + self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3) + + self.resource_client = dict() + if not resources: + resources = ['lambda'] + + resources.append('iam') + + for resource in resources: + aws_connect_kwargs.update(dict(region=self.region, + endpoint=self.endpoint, + conn_type='client', + resource=resource + )) + self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs) + + # if region is not provided, then get default profile/session region + if not self.region: + self.region = self.resource_client['lambda'].meta.region_name + + except (ClientError, ParamValidationError, MissingParametersError) as e: + ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e)) + + # set account ID + try: + self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4] + except (ClientError, ValueError, KeyError, IndexError): + self.account_id = '' + + def client(self, resource='lambda'): + return self.resource_client[resource] + + +def pc(key): + """ + Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'. + + :param key: + :return: + """ + + return "".join([token.capitalize() for token in key.split('_')]) + + +def ordered_obj(obj): + """ + Order object for comparison purposes + + :param obj: + :return: + """ + + if isinstance(obj, dict): + return sorted((k, ordered_obj(v)) for k, v in obj.items()) + if isinstance(obj, list): + return sorted(ordered_obj(x) for x in obj) + else: + return obj + + +def set_api_sub_params(params): + """ + Sets module sub-parameters to those expected by the boto3 API. + + :param params: + :return: + """ + + api_params = dict() + + for param in params.keys(): + param_value = params.get(param, None) + if param_value: + api_params[pc(param)] = param_value + + return api_params + + +def validate_params(module, aws): + """ + Performs basic parameter validation. + + :param module: + :param aws: + :return: + """ + + function_name = module.params['lambda_function_arn'] + + # validate function name + if not re.search(r'^[\w\-:]+$', function_name): + module.fail_json( + msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) + ) + if len(function_name) > 64 and not function_name.startswith('arn:aws:lambda:'): + module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + + elif len(function_name) > 140 and function_name.startswith('arn:aws:lambda:'): + module.fail_json(msg='ARN "{0}" exceeds 140 character limit'.format(function_name)) + + # check if 'function_name' needs to be expanded in full ARN format + if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'): + function_name = module.params['lambda_function_arn'] + module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name) + + qualifier = get_qualifier(module) + if qualifier: + function_arn = module.params['lambda_function_arn'] + module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier) + + return + + +def get_qualifier(module): + """ + Returns the function qualifier as a version or alias or None. + + :param module: + :return: + """ + + qualifier = None + if module.params['version'] > 0: + qualifier = str(module.params['version']) + elif module.params['alias']: + qualifier = str(module.params['alias']) + + return qualifier + + +# --------------------------------------------------------------------------------------------------- +# +# Lambda Event Handlers +# +# This section defines a lambda_event_X function where X is an AWS service capable of initiating +# the execution of a Lambda function (pull only). +# +# --------------------------------------------------------------------------------------------------- + +def lambda_event_stream(module, aws): + """ + Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications. + :param module: + :param aws: + :return: + """ + + client = aws.client('lambda') + facts = dict() + changed = False + current_state = 'absent' + state = module.params['state'] + + api_params = dict(FunctionName=module.params['lambda_function_arn']) + + # check if required sub-parameters are present and valid + source_params = module.params['source_params'] + + source_arn = source_params.get('source_arn') + if source_arn: + api_params.update(EventSourceArn=source_arn) + else: + module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.") + + # check if optional sub-parameters are valid, if present + batch_size = source_params.get('batch_size') + if batch_size: + try: + source_params['batch_size'] = int(batch_size) + except ValueError: + module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size'])) + + # optional boolean value needs special treatment as not present does not imply False + source_param_enabled = module.boolean(source_params.get('enabled', 'True')) + + # check if event mapping exist + try: + facts = client.list_event_source_mappings(**api_params)['EventSourceMappings'] + if facts: + current_state = 'present' + except ClientError as e: + module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e)) + + if state == 'present': + if current_state == 'absent': + + starting_position = source_params.get('starting_position') + if starting_position: + api_params.update(StartingPosition=starting_position) + elif module.params.get('event_source') == 'sqs': + # starting position is not required for SQS + pass + else: + module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.") + + if source_arn: + api_params.update(Enabled=source_param_enabled) + if source_params.get('batch_size'): + api_params.update(BatchSize=source_params.get('batch_size')) + if source_params.get('function_response_types'): + api_params.update(FunctionResponseTypes=source_params.get('function_response_types')) + + try: + if not module.check_mode: + facts = client.create_event_source_mapping(**api_params) + changed = True + except (ClientError, ParamValidationError, MissingParametersError) as e: + module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e)) + + else: + # current_state is 'present' + api_params = dict(FunctionName=module.params['lambda_function_arn']) + current_mapping = facts[0] + api_params.update(UUID=current_mapping['UUID']) + mapping_changed = False + + # check if anything changed + if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']: + api_params.update(BatchSize=source_params['batch_size']) + mapping_changed = True + + if source_param_enabled is not None: + if source_param_enabled: + if current_mapping['State'] not in ('Enabled', 'Enabling'): + api_params.update(Enabled=True) + mapping_changed = True + else: + if current_mapping['State'] not in ('Disabled', 'Disabling'): + api_params.update(Enabled=False) + mapping_changed = True + + if mapping_changed: + try: + if not module.check_mode: + facts = client.update_event_source_mapping(**api_params) + changed = True + except (ClientError, ParamValidationError, MissingParametersError) as e: + module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e)) + + else: + if current_state == 'present': + # remove the stream event mapping + api_params = dict(UUID=facts[0]['UUID']) + + try: + if not module.check_mode: + facts = client.delete_event_source_mapping(**api_params) + changed = True + except (ClientError, ParamValidationError, MissingParametersError) as e: + module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e)) + + return camel_dict_to_snake_dict(dict(changed=changed, events=facts)) + + +def main(): + """Produce a list of function suffixes which handle lambda events.""" + source_choices = ["stream", "sqs"] + + argument_spec = dict( + state=dict(required=False, default='present', choices=['present', 'absent']), + lambda_function_arn=dict(required=True, aliases=['function_name', 'function_arn']), + event_source=dict(required=False, default="stream", choices=source_choices), + source_params=dict(type='dict', required=True), + alias=dict(required=False, default=None), + version=dict(type='int', required=False, default=0), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['alias', 'version']], + required_together=[], + ) + + aws = AWSConnection(module, ['lambda']) + + validate_params(module, aws) + + if module.params['event_source'].lower() in ('stream', 'sqs'): + results = lambda_event_stream(module, aws) + else: + module.fail_json(msg='Please select `stream` or `sqs` as the event type') + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_execute.py b/ansible_collections/amazon/aws/plugins/modules/lambda_execute.py new file mode 100644 index 000000000..68fff52b7 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_execute.py @@ -0,0 +1,285 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: lambda_execute +version_added: 5.0.0 +short_description: Execute an AWS Lambda function +description: + - This module executes AWS Lambda functions, allowing synchronous and asynchronous + invocation. + - Prior to release 5.0.0 this module was called C(community.aws.execute_lambda). + The usage did not change. + - This module was originally added to C(community.aws) in release 1.0.0. +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 +author: + - "Ryan Scott Brown (@ryansb) " +notes: + - Async invocation will always return an empty C(output) key. + - Synchronous invocation may result in a function timeout, resulting in an + empty C(output) key. +options: + name: + description: + - The name of the function to be invoked. This can only be used for + invocations within the calling account. To invoke a function in another + account, use I(function_arn) to specify the full ARN. + type: str + function_arn: + description: + - The name of the function to be invoked + type: str + tail_log: + description: + - If I(tail_log=true), the result of the task will include the last 4 KB + of the CloudWatch log for the function execution. Log tailing only + works if you use synchronous invocation I(wait=true). This is usually + used for development or testing Lambdas. + type: bool + default: false + wait: + description: + - Whether to wait for the function results or not. If I(wait=no) + the task will not return any results. To wait for the Lambda function + to complete, set I(wait=true) and the result will be available in the + I(output) key. + type: bool + default: true + dry_run: + description: + - Do not *actually* invoke the function. A C(DryRun) call will check that + the caller has permissions to call the function, especially for + checking cross-account permissions. + type: bool + default: false + version_qualifier: + description: + - Which version/alias of the function to run. This defaults to the + C(LATEST) revision, but can be set to any existing version or alias. + See U(https://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html) + for details. + type: str + payload: + description: + - A dictionary in any form to be provided as input to the Lambda function. + default: {} + type: dict +''' + +EXAMPLES = ''' +- amazon.aws.lambda_execute: + name: test-function + # the payload is automatically serialized and sent to the function + payload: + foo: bar + value: 8 + register: response + +# Test that you have sufficient permissions to execute a Lambda function in +# another account +- amazon.aws.lambda_execute: + function_arn: arn:aws:lambda:us-east-1:123456789012:function/some-function + dry_run: true + +- amazon.aws.lambda_execute: + name: test-function + payload: + foo: bar + value: 8 + wait: true + tail_log: true + register: response + # the response will have a `logs` key that will contain a log (up to 4KB) of the function execution in Lambda + +# Pass the Lambda event payload as a json file. +- amazon.aws.lambda_execute: + name: test-function + payload: "{{ lookup('file','lambda_event.json') }}" + register: response + +- amazon.aws.lambda_execute: + name: test-function + version_qualifier: PRODUCTION +''' + +RETURN = ''' +result: + description: Resulting data structure from a successful task execution. + returned: success + type: dict + contains: + output: + description: Function output if wait=true and the function returns a value + returned: success + type: dict + sample: "{ 'output': 'something' }" + logs: + description: The last 4KB of the function logs. Only provided if I(tail_log) is C(true) + type: str + returned: if I(tail_log) == true + status: + description: C(StatusCode) of API call exit (200 for synchronous invokes, 202 for async) + type: int + sample: 200 + returned: always +''' + +import base64 +import json + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +def main(): + argument_spec = dict( + name=dict(), + function_arn=dict(), + wait=dict(default=True, type='bool'), + tail_log=dict(default=False, type='bool'), + dry_run=dict(default=False, type='bool'), + version_qualifier=dict(), + payload=dict(default={}, type='dict'), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['name', 'function_arn'], + ], + required_one_of=[ + ('name', 'function_arn') + ], + ) + + name = module.params.get('name') + function_arn = module.params.get('function_arn') + await_return = module.params.get('wait') + dry_run = module.params.get('dry_run') + tail_log = module.params.get('tail_log') + version_qualifier = module.params.get('version_qualifier') + payload = module.params.get('payload') + + try: + client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + invoke_params = {} + + if await_return: + # await response + invoke_params['InvocationType'] = 'RequestResponse' + else: + # fire and forget + invoke_params['InvocationType'] = 'Event' + if dry_run or module.check_mode: + # dry_run overrides invocation type + invoke_params['InvocationType'] = 'DryRun' + + if tail_log and await_return: + invoke_params['LogType'] = 'Tail' + elif tail_log and not await_return: + module.fail_json(msg="The `tail_log` parameter is only available if " + "the invocation waits for the function to complete. " + "Set `wait` to true or turn off `tail_log`.") + else: + invoke_params['LogType'] = 'None' + + if version_qualifier: + invoke_params['Qualifier'] = version_qualifier + + if payload: + invoke_params['Payload'] = json.dumps(payload) + + if function_arn: + invoke_params['FunctionName'] = function_arn + elif name: + invoke_params['FunctionName'] = name + + if module.check_mode: + module.exit_json(changed=True) + + try: + wait_for_lambda(client, module, name) + response = client.invoke(**invoke_params, aws_retry=True) + except is_boto3_error_code('ResourceNotFoundException') as nfe: + module.fail_json_aws(nfe, msg="Could not find Lambda to execute. Make sure " + "the ARN is correct and your profile has " + "permissions to execute this function.") + except botocore.exceptions.ClientError as ce: # pylint: disable=duplicate-except + module.fail_json_aws(ce, msg="Client-side error when invoking Lambda, check inputs and specific error") + except botocore.exceptions.ParamValidationError as ve: # pylint: disable=duplicate-except + module.fail_json_aws(ve, msg="Parameters to `invoke` failed to validate") + except Exception as e: + module.fail_json_aws(e, msg="Unexpected failure while invoking Lambda function") + + results = { + 'logs': '', + 'status': response['StatusCode'], + 'output': '', + } + + if response.get('LogResult'): + try: + # logs are base64 encoded in the API response + results['logs'] = base64.b64decode(response.get('LogResult', '')) + except Exception as e: + module.fail_json_aws(e, msg="Failed while decoding logs") + + if invoke_params['InvocationType'] == 'RequestResponse': + try: + results['output'] = json.loads(response['Payload'].read().decode('utf8')) + except Exception as e: + module.fail_json_aws(e, msg="Failed while decoding function return value") + + if isinstance(results.get('output'), dict) and any( + [results['output'].get('stackTrace'), results['output'].get('errorMessage')]): + # AWS sends back stack traces and error messages when a function failed + # in a RequestResponse (synchronous) context. + template = ("Function executed, but there was an error in the Lambda function. " + "Message: {errmsg}, Type: {type}, Stack Trace: {trace}") + error_data = { + # format the stacktrace sent back as an array into a multiline string + 'trace': '\n'.join( + [' '.join([ + str(x) for x in line # cast line numbers to strings + ]) for line in results.get('output', {}).get('stackTrace', [])] + ), + 'errmsg': results['output'].get('errorMessage'), + 'type': results['output'].get('errorType') + } + module.fail_json(msg=template.format(**error_data), result=results) + + module.exit_json(changed=True, result=results) + + +def wait_for_lambda(client, module, name): + try: + client_active_waiter = client.get_waiter('function_active') + client_updated_waiter = client.get_waiter('function_updated') + client_active_waiter.wait(FunctionName=name) + client_updated_waiter.wait(FunctionName=name) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg='Timeout while waiting on lambda to be Active') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed while waiting on lambda to be Active') + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_info.py b/ansible_collections/amazon/aws/plugins/modules/lambda_info.py new file mode 100644 index 000000000..4584624d9 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_info.py @@ -0,0 +1,545 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: lambda_info +version_added: 5.0.0 +short_description: Gathers AWS Lambda function details +description: + - Gathers various details related to Lambda functions, including aliases, versions and event source mappings. + - Use module M(amazon.aws.lambda) to manage the lambda function itself, M(amazon.aws.lambda_alias) to manage function aliases, + M(amazon.aws.lambda_event) to manage lambda event source mappings, and M(amazon.aws.lambda_policy) to manage policy statements. + - This module was originally added to C(community.aws) in release 1.0.0. +options: + query: + description: + - Specifies the resource type for which to gather information. + - Defaults to C(all) when I(function_name) is specified. + - Defaults to C(config) when I(function_name) is NOT specified. + choices: [ "aliases", "all", "config", "mappings", "policy", "versions", "tags" ] + type: str + function_name: + description: + - The name of the lambda function for which information is requested. + aliases: [ "function", "name"] + type: str + event_source_arn: + description: + - When I(query=mappings), this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream. + type: str +author: + - Pierre Jodouin (@pjodouin) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 +''' + +EXAMPLES = ''' +--- +# Simple example of listing all info for a function +- name: List all for a specific function + amazon.aws.lambda_info: + query: all + function_name: myFunction + register: my_function_details + +# List all versions of a function +- name: List function versions + amazon.aws.lambda_info: + query: versions + function_name: myFunction + register: my_function_versions + +# List all info for all functions +- name: List all functions + amazon.aws.lambda_info: + query: all + register: output + +- name: show Lambda information + ansible.builtin.debug: + msg: "{{ output['function'] }}" +''' + +RETURN = ''' +--- +function: + description: + - lambda function list. + - C(function) has been deprecated in will be removed in the next major release after 2025-01-01. + returned: success + type: dict +function.TheName: + description: + - lambda function information, including event, mapping, and version information. + - C(function) has been deprecated in will be removed in the next major release after 2025-01-01. + returned: success + type: dict +functions: + description: List of information for each lambda function matching the query. + returned: always + type: list + elements: dict + version_added: 4.1.0 + version_added_collection: community.aws + contains: + aliases: + description: The aliases associated with the function. + returned: when C(query) is I(aliases) or I(all) + type: list + elements: str + architectures: + description: The architectures supported by the function. + returned: successful run where botocore >= 1.21.51 + type: list + elements: str + sample: ['arm64'] + code_sha256: + description: The SHA256 hash of the function's deployment package. + returned: success + type: str + sample: 'zOAGfF5JLFuzZoSNirUtOrQp+S341IOA3BcoXXoaIaU=' + code_size: + description: The size of the function's deployment package in bytes. + returned: success + type: int + sample: 123 + dead_letter_config: + description: The function's dead letter queue. + returned: when the function has a dead letter queue configured + type: dict + sample: { 'target_arn': arn:aws:lambda:us-east-1:123456789012:function:myFunction:1 } + contains: + target_arn: + description: The ARN of an SQS queue or SNS topic. + returned: when the function has a dead letter queue configured + type: str + sample: arn:aws:lambda:us-east-1:123456789012:function:myFunction:1 + description: + description: The function's description. + returned: success + type: str + sample: 'My function' + environment: + description: The function's environment variables. + returned: when environment variables exist + type: dict + contains: + variables: + description: Environment variable key-value pairs. + returned: when environment variables exist + type: dict + sample: {'key': 'value'} + error: + description: Error message for environment variables that could not be applied. + returned: when there is an error applying environment variables + type: dict + contains: + error_code: + description: The error code. + returned: when there is an error applying environment variables + type: str + message: + description: The error message. + returned: when there is an error applying environment variables + type: str + function_arn: + description: The function's Amazon Resource Name (ARN). + returned: on success + type: str + sample: 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1' + function_name: + description: The function's name. + returned: on success + type: str + sample: 'myFunction' + handler: + description: The function Lambda calls to begin executing your function. + returned: on success + type: str + sample: 'index.handler' + last_modified: + description: The date and time that the function was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ssTZD). + returned: on success + type: str + sample: '2017-08-01T00:00:00.000+0000' + mappings: + description: List of configuration information for each event source mapping. + returned: when C(query) is I(all) or I(mappings) + type: list + elements: dict + contains: + uuid: + description: The AWS Lambda assigned opaque identifier for the mapping. + returned: on success + type: str + batch_size: + description: The largest number of records that AWS Lambda will retrieve from the event source at the time of invoking the function. + returned: on success + type: int + event_source_arn: + description: The ARN of the Amazon Kinesis or DyanmoDB stream that is the source of events. + returned: on success + type: str + function_arn: + description: The Lambda function to invoke when AWS Lambda detects an event on the poll-based source. + returned: on success + type: str + last_modified: + description: The UTC time string indicating the last time the event mapping was updated. + returned: on success + type: str + last_processing_result: + description: The result of the last AWS Lambda invocation of your Lambda function. + returned: on success + type: str + state: + description: The state of the event source mapping. + returned: on success + type: str + state_transition_reason: + description: The reason the event source mapping is in its current state. + returned: on success + type: str + memory_size: + description: The memory allocated to the function. + returned: on success + type: int + sample: 128 + policy: + description: The policy associated with the function. + returned: when C(query) is I(all) or I(policy) + type: dict + revision_id: + description: The latest updated revision of the function or alias. + returned: on success + type: str + sample: 'a2x9886d-d48a-4a0c-ab64-82abc005x80c' + role: + description: The function's execution role. + returned: on success + type: str + sample: 'arn:aws:iam::123456789012:role/lambda_basic_execution' + runtime: + description: The funtime environment for the Lambda function. + returned: on success + type: str + sample: 'nodejs6.10' + tracing_config: + description: The function's AWS X-Ray tracing configuration. + returned: on success + type: dict + sample: { 'mode': 'Active' } + contains: + mode: + description: The tracing mode. + returned: on success + type: str + sample: 'Active' + timeout: + description: The amount of time that Lambda allows a function to run before terminating it. + returned: on success + type: int + sample: 3 + version: + description: The version of the Lambda function. + returned: on success + type: str + sample: '1' + versions: + description: List of Lambda function versions. + returned: when C(query) is I(all) or I(versions) + type: list + elements: dict + vpc_config: + description: The function's networking configuration. + returned: on success + type: dict + sample: { + 'security_group_ids': [], + 'subnet_ids': [], + 'vpc_id': '123' + } +''' +import json +import re + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +@AWSRetry.jittered_backoff() +def _paginate(client, function, **params): + paginator = client.get_paginator(function) + return paginator.paginate(**params).build_full_result() + + +def alias_details(client, module, function_name): + """ + Returns list of aliases for a specified function. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query + :return dict: + """ + + lambda_info = dict() + + try: + lambda_info.update(aliases=_paginate(client, 'list_aliases', FunctionName=function_name)['Aliases']) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(aliases=[]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get aliases") + + return camel_dict_to_snake_dict(lambda_info) + + +def list_functions(client, module): + """ + Returns queried facts for a specified function (or all functions). + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + """ + + function_name = module.params.get('function_name') + if function_name: + # Function name is specified - retrieve info on that function + function_names = [function_name] + + else: + # Function name is not specified - retrieve all function names + all_function_info = _paginate(client, 'list_functions')['Functions'] + function_names = [function_info['FunctionName'] for function_info in all_function_info] + + query = module.params['query'] + functions = [] + + # keep returning deprecated response (dict of dicts) until removed + all_facts = {} + + for function_name in function_names: + function = {} + + # query = 'config' returns info such as FunctionName, FunctionArn, Description, etc + # these details should be returned regardless of the query + function.update(config_details(client, module, function_name)) + + if query in ['all', 'aliases']: + function.update(alias_details(client, module, function_name)) + + if query in ['all', 'policy']: + function.update(policy_details(client, module, function_name)) + + if query in ['all', 'versions']: + function.update(version_details(client, module, function_name)) + + if query in ['all', 'mappings']: + function.update(mapping_details(client, module, function_name)) + + if query in ['all', 'tags']: + function.update(tags_details(client, module, function_name)) + + all_facts[function['function_name']] = function + + # add current lambda to list of lambdas + functions.append(function) + + # return info + module.exit_json(function=all_facts, functions=functions, changed=False) + + +def config_details(client, module, function_name): + """ + Returns configuration details for a lambda function. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query + :return dict: + """ + + lambda_info = dict() + + try: + lambda_info.update(client.get_function_configuration(aws_retry=True, FunctionName=function_name)) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(function={}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) + + if "Environment" in lambda_info and "Variables" in lambda_info["Environment"]: + env_vars = lambda_info["Environment"]["Variables"] + snaked_lambda_info = camel_dict_to_snake_dict(lambda_info) + snaked_lambda_info["environment"]["variables"] = env_vars + else: + snaked_lambda_info = camel_dict_to_snake_dict(lambda_info) + + return snaked_lambda_info + + +def mapping_details(client, module, function_name): + """ + Returns all lambda event source mappings. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query + :return dict: + """ + + lambda_info = dict() + params = dict() + + params['FunctionName'] = function_name + + if module.params.get('event_source_arn'): + params['EventSourceArn'] = module.params.get('event_source_arn') + + try: + lambda_info.update(mappings=_paginate(client, 'list_event_source_mappings', **params)['EventSourceMappings']) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(mappings=[]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get source event mappings") + + return camel_dict_to_snake_dict(lambda_info) + + +def policy_details(client, module, function_name): + """ + Returns policy attached to a lambda function. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query + :return dict: + """ + + lambda_info = dict() + + try: + # get_policy returns a JSON string so must convert to dict before reassigning to its key + lambda_info.update(policy=json.loads(client.get_policy(aws_retry=True, FunctionName=function_name)['Policy'])) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(policy={}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) + + return camel_dict_to_snake_dict(lambda_info) + + +def version_details(client, module, function_name): + """ + Returns all lambda function versions. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query + :return dict: + """ + + lambda_info = dict() + + try: + lambda_info.update(versions=_paginate(client, 'list_versions_by_function', FunctionName=function_name)['Versions']) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(versions=[]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) + + return camel_dict_to_snake_dict(lambda_info) + + +def tags_details(client, module, function_name): + """ + Returns tag details for a lambda function. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :param function_name (str): Name of Lambda function to query + :return dict: + """ + + lambda_info = dict() + + try: + lambda_info.update(tags=client.get_function(aws_retry=True, FunctionName=function_name).get('Tags', {})) + except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(function={}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Trying to get {0} tags".format(function_name)) + + return camel_dict_to_snake_dict(lambda_info) + + +def main(): + """ + Main entry point. + + :return dict: ansible facts + """ + argument_spec = dict( + function_name=dict(required=False, default=None, aliases=['function', 'name']), + query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions', 'tags'], default=None), + event_source_arn=dict(required=False, default=None), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[], + required_together=[] + ) + + # validate function_name if present + function_name = module.params['function_name'] + if function_name: + if not re.search(r"^[\w\-:]+$", function_name): + module.fail_json( + msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) + ) + if len(function_name) > 64: + module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + + # create default values for query if not specified. + # if function name exists, query should default to 'all'. + # if function name does not exist, query should default to 'config' to limit the runtime when listing all lambdas. + if not module.params.get('query'): + if function_name: + module.params['query'] = 'all' + else: + module.params['query'] = 'config' + + client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) + + # Deprecate previous return key of `function`, as it was a dict of dicts, as opposed to a list of dicts + module.deprecate( + "The returned key 'function', which returned a dictionary of dictionaries, is deprecated and will be replaced by 'functions'," + " which returns a list of dictionaries. Both keys are returned for now.", + date='2025-01-01', + collection_name='amazon.aws' + ) + + list_functions(client, module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_layer.py b/ansible_collections/amazon/aws/plugins/modules/lambda_layer.py new file mode 100644 index 000000000..2813a45da --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_layer.py @@ -0,0 +1,368 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: lambda_layer +version_added: 5.5.0 +short_description: Creates an AWS Lambda layer or deletes an AWS Lambda layer version +description: + - This module allows the management of AWS Lambda functions aliases via the Ansible + - Creates an Lambda layer from a ZIP archive. + Each time you call this module with the same layer name, a new version is created. + - Deletes a version of an Lambda layer. + +author: "Aubin Bikouo (@abikouo)" +options: + state: + description: + - Determines if an Lambda layer should be created, or deleted. When set to C(present), an Lambda layer version will be + created. If set to C(absent), an existing Lambda layer version will be deleted. + type: str + default: present + choices: [ absent, present ] + name: + description: + - The name or Amazon Resource Name (ARN) of the Lambda layer. + type: str + required: true + aliases: + - layer_name + description: + description: + - The description of the version. + - Ignored when I(state=absent). + - Mutually exclusive with I(version). + type: str + content: + description: + - The function layer archive. + - Required when I(state=present). + - Ignored when I(state=absent). + - Mutually exclusive with I(version). + type: dict + suboptions: + s3_bucket: + description: + - The Amazon S3 bucket of the layer archive. + type: str + s3_key: + description: + - The Amazon S3 key of the layer archive. + type: str + s3_object_version: + description: + - For versioned objects, the version of the layer archive object to use. + type: str + zip_file: + description: + - Path to the base64-encoded file of the layer archive. + type: path + compatible_runtimes: + description: + - A list of compatible function runtimes. + - Ignored when I(state=absent). + - Mutually exclusive with I(version). + type: list + elements: str + license_info: + description: + - The layer's software license. It can be any of an SPDX license identifier, + the URL of a license hosted on the internet or the full text of the license. + - Ignored when I(state=absent). + - Mutually exclusive with I(version). + type: str + compatible_architectures: + description: + - A list of compatible instruction set architectures. For example, x86_64. + - Mutually exclusive with I(version). + type: list + elements: str + version: + description: + - The version number of the layer to delete. + - Set to C(-1) to delete all versions for the specified layer name. + - Required when I(state=absent). + - Ignored when I(state=present). + - Mutually exclusive with I(description), I(content), I(compatible_runtimes), + I(license_info), I(compatible_architectures). + type: int +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 + +''' + +EXAMPLES = ''' +--- +# Create a new Python library layer version from a zip archive located into a S3 bucket +- name: Create a new python library layer + amazon.aws.lambda_layer: + state: present + name: sample-layer + description: 'My Python layer' + content: + s3_bucket: 'lambda-layers-us-west-2-123456789012' + s3_key: 'python_layer.zip' + compatible_runtimes: + - python3.6 + - python3.7 + license_info: MIT + compatible_architectures: + - x86_64 + +# Create a layer version from a zip in the local filesystem +- name: Create a new layer from a zip in the local filesystem + amazon.aws.lambda_layer: + state: present + name: sample-layer + description: 'My Python layer' + content: + zip_file: 'python_layer.zip' + compatible_runtimes: + - python3.6 + - python3.7 + license_info: MIT + compatible_architectures: + - x86_64 + +# Delete a layer version +- name: Delete a layer version + amazon.aws.lambda_layer: + state: absent + name: sample-layer + version: 2 + +# Delete all versions of test-layer +- name: Delete all versions + amazon.aws.lambda_layer: + state: absent + name: test-layer + version: -1 +''' + +RETURN = ''' +layer_version: + description: info about the layer version that was created or deleted. + returned: always + type: list + elements: dict + contains: + content: + description: Details about the layer version. + returned: I(state=present) + type: complex + contains: + location: + description: A link to the layer archive in Amazon S3 that is valid for 10 minutes. + returned: I(state=present) + type: str + sample: "https://awslambda-us-east-1-layers.s3.us-east-1.amazonaws.com/snapshots/123456789012/pylayer-9da91deffd3b4941b8baeeae5daeffe4" + code_sha256: + description: The SHA-256 hash of the layer archive. + returned: I(state=present) + type: str + sample: "VLluleJZ3HTwDrdYolSMrS+8iPwEkcoXXaegjXf+dmc=" + code_size: + description: The size of the layer archive in bytes. + returned: I(state=present) + type: int + sample: 9473675 + signing_profile_version_arn: + description: The Amazon Resource Name (ARN) for a signing profile version. + returned: When a signing profile is defined + type: str + signing_job_arn: + description: The Amazon Resource Name (ARN) of a signing job. + returned: When a signing profile is defined + type: str + layer_arn: + description: The ARN of the layer. + returned: if the layer version exists or has been created + type: str + sample: "arn:aws:lambda:eu-west-2:123456789012:layer:pylayer" + layer_version_arn: + description: The ARN of the layer version. + returned: if the layer version exists or has been created + type: str + sample: "arn:aws:lambda:eu-west-2:123456789012:layer:pylayer:2" + description: + description: The description of the version. + returned: I(state=present) + type: str + created_date: + description: The date that the layer version was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD). + returned: if the layer version exists or has been created + type: str + sample: "2022-09-28T14:27:35.866+0000" + version: + description: The version number. + returned: if the layer version exists or has been created + type: int + sample: 1 + compatible_runtimes: + description: A list of compatible runtimes. + returned: if it was defined for the layer version. + type: list + sample: ["python3.7"] + license_info: + description: The layer's software license. + returned: if it was defined for the layer version. + type: str + sample: "GPL-3.0-only" + compatible_architectures: + description: A list of compatible instruction set architectures. + returned: if it was defined for the layer version. + type: list +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +@AWSRetry.jittered_backoff() +def _list_layer_versions(client, **params): + paginator = client.get_paginator('list_layer_versions') + return paginator.paginate(**params).build_full_result() + + +class LambdaLayerFailure(Exception): + def __init__(self, exc, msg): + self.exc = exc + self.msg = msg + super().__init__(self) + + +def list_layer_versions(lambda_client, name): + + try: + layer_versions = _list_layer_versions(lambda_client, LayerName=name)['LayerVersions'] + return [camel_dict_to_snake_dict(layer) for layer in layer_versions] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + raise LambdaLayerFailure(e, "Unable to list layer versions for name {0}".format(name)) + + +def create_layer_version(lambda_client, params, check_mode=False): + if check_mode: + return {"msg": "Create operation skipped - running in check mode", "changed": True} + + opt = {"LayerName": params.get("name"), "Content": {}} + keys = [ + ('description', 'Description'), + ('compatible_runtimes', 'CompatibleRuntimes'), + ('license_info', 'LicenseInfo'), + ('compatible_architectures', 'CompatibleArchitectures'), + ] + for k, d in keys: + if params.get(k) is not None: + opt[d] = params.get(k) + + # Read zip file if any + zip_file = params["content"].get("zip_file") + if zip_file is not None: + with open(zip_file, "rb") as zf: + opt["Content"]["ZipFile"] = zf.read() + else: + opt["Content"]["S3Bucket"] = params["content"].get("s3_bucket") + opt["Content"]["S3Key"] = params["content"].get("s3_key") + if params["content"].get("s3_object_version") is not None: + opt["Content"]["S3ObjectVersion"] = params["content"].get("s3_object_version") + + try: + layer_version = lambda_client.publish_layer_version(**opt) + layer_version.pop("ResponseMetadata", None) + return {"changed": True, "layer_versions": [camel_dict_to_snake_dict(layer_version)]} + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + raise LambdaLayerFailure(e, "Failed to publish a new layer version (check that you have required permissions).") + + +def delete_layer_version(lambda_client, params, check_mode=False): + name = params.get("name") + version = params.get("version") + layer_versions = list_layer_versions(lambda_client, name) + deleted_versions = [] + changed = False + for layer in layer_versions: + if version == -1 or layer["version"] == version: + deleted_versions.append(layer) + changed = True + if not check_mode: + try: + lambda_client.delete_layer_version(LayerName=name, VersionNumber=layer["version"]) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + LambdaLayerFailure(e, "Failed to delete layer version LayerName={0}, VersionNumber={1}.".format(name, version)) + return {"changed": changed, "layer_versions": deleted_versions} + + +def execute_module(module, lambda_client): + + try: + + state = module.params.get("state") + f_operation = create_layer_version + if state == "absent": + f_operation = delete_layer_version + + module.exit_json(**f_operation(lambda_client, module.params, module.check_mode)) + except LambdaLayerFailure as e: + module.fail_json_aws(e.exc, msg=e.msg) + + +def main(): + argument_spec = dict( + state=dict(type="str", choices=["present", "absent"], default="present"), + name=dict(type="str", required=True, aliases=["layer_name"]), + description=dict(type="str"), + content=dict( + type="dict", + options=dict( + s3_bucket=dict(type="str"), + s3_key=dict(type="str", no_log=False), + s3_object_version=dict(type="str"), + zip_file=dict(type="path"), + ), + required_together=[['s3_bucket', 's3_key']], + required_one_of=[['s3_bucket', 'zip_file']], + mutually_exclusive=[['s3_bucket', 'zip_file']], + ), + compatible_runtimes=dict(type="list", elements="str"), + license_info=dict(type="str"), + compatible_architectures=dict(type="list", elements="str"), + version=dict(type="int"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[ + ("state", "present", ["content"]), + ("state", "absent", ["version"]), + ], + mutually_exclusive=[ + ['version', 'description'], + ['version', 'content'], + ['version', 'compatible_runtimes'], + ['version', 'license_info'], + ['version', 'compatible_architectures'], + ], + supports_check_mode=True, + ) + + lambda_client = module.client('lambda') + execute_module(module, lambda_client) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_layer_info.py b/ansible_collections/amazon/aws/plugins/modules/lambda_layer_info.py new file mode 100644 index 000000000..ded4c9aab --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_layer_info.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: lambda_layer_info +version_added: 5.5.0 +short_description: List lambda layer or lambda layer versions +description: + - This module is used to list the versions of an Lambda layer or all available lambda layers. + - The lambda layer versions that have been deleted aren't listed. + +author: "Aubin Bikouo (@abikouo)" +options: + name: + description: + - The name or Amazon Resource Name (ARN) of the Lambda layer. + type: str + aliases: + - layer_name + compatible_runtime: + description: + - A runtime identifier. + - Specify this option without I(name) to list only latest layers versions of layers that indicate + that they're compatible with that runtime. + - Specify this option with I(name) to list only layer versions that indicate that + they're compatible with that runtime. + type: str + compatible_architecture: + description: + - A compatible instruction set architectures. + - Specify this option without I(name) to include only to list only latest layers versions of layers that + are compatible with that instruction set architecture. + - Specify this option with I(name) to include only layer versions that are compatible with that architecture. + type: str +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 + +''' + +EXAMPLES = ''' +--- +# Display information about the versions for the layer named blank-java-lib +- name: Retrieve layer versions + amazon.aws.lambda_layer_info: + name: blank-java-lib + +# Display information about the versions for the layer named blank-java-lib compatible with architecture x86_64 +- name: Retrieve layer versions + amazon.aws.lambda_layer_info: + name: blank-java-lib + compatible_architecture: x86_64 + +# list latest versions of available layers +- name: list latest versions for all layers + amazon.aws.lambda_layer_info: + +# list latest versions of available layers compatible with runtime python3.7 +- name: list latest versions for all layers + amazon.aws.lambda_layer_info: + compatible_runtime: python3.7 +''' + +RETURN = ''' +layers_versions: + description: + - The layers versions that exists. + returned: success + type: list + elements: dict + contains: + layer_arn: + description: The ARN of the layer. + returned: when C(name) is provided + type: str + sample: "arn:aws:lambda:eu-west-2:123456789012:layer:pylayer" + layer_version_arn: + description: The ARN of the layer version. + returned: if the layer version exists or has been created + type: str + sample: "arn:aws:lambda:eu-west-2:123456789012:layer:pylayer:2" + description: + description: The description of the version. + returned: I(state=present) + type: str + created_date: + description: The date that the layer version was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD). + returned: if the layer version exists or has been created + type: str + sample: "2022-09-28T14:27:35.866+0000" + version: + description: The version number. + returned: if the layer version exists or has been created + type: int + sample: 1 + compatible_runtimes: + description: A list of compatible runtimes. + returned: if it was defined for the layer version. + type: list + sample: ["python3.7"] + license_info: + description: The layer's software license. + returned: if it was defined for the layer version. + type: str + sample: "GPL-3.0-only" + compatible_architectures: + description: A list of compatible instruction set architectures. + returned: if it was defined for the layer version. + type: list +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +@AWSRetry.jittered_backoff() +def _list_layer_versions(client, **params): + paginator = client.get_paginator('list_layer_versions') + return paginator.paginate(**params).build_full_result() + + +@AWSRetry.jittered_backoff() +def _list_layers(client, **params): + paginator = client.get_paginator('list_layers') + return paginator.paginate(**params).build_full_result() + + +class LambdaLayerInfoFailure(Exception): + def __init__(self, exc, msg): + self.exc = exc + self.msg = msg + super().__init__(self) + + +def list_layer_versions(lambda_client, name, compatible_runtime=None, compatible_architecture=None): + + params = {"LayerName": name} + if compatible_runtime: + params["CompatibleRuntime"] = compatible_runtime + if compatible_architecture: + params["CompatibleArchitecture"] = compatible_architecture + try: + layer_versions = _list_layer_versions(lambda_client, **params)['LayerVersions'] + return [camel_dict_to_snake_dict(layer) for layer in layer_versions] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + raise LambdaLayerInfoFailure(exc=e, msg="Unable to list layer versions for name {0}".format(name)) + + +def list_layers(lambda_client, compatible_runtime=None, compatible_architecture=None): + + params = {} + if compatible_runtime: + params["CompatibleRuntime"] = compatible_runtime + if compatible_architecture: + params["CompatibleArchitecture"] = compatible_architecture + try: + layers = _list_layers(lambda_client, **params)['Layers'] + layer_versions = [] + for item in layers: + layer = {key: value for key, value in item.items() if key != "LatestMatchingVersion"} + layer.update(item.get("LatestMatchingVersion")) + layer_versions.append(camel_dict_to_snake_dict(layer)) + return layer_versions + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + raise LambdaLayerInfoFailure(exc=e, msg="Unable to list layers {0}".format(params)) + + +def execute_module(module, lambda_client): + + params = {} + f_operation = list_layers + name = module.params.get("name") + if name is not None: + f_operation = list_layer_versions + params["name"] = name + compatible_runtime = module.params.get("compatible_runtime") + if compatible_runtime is not None: + params["compatible_runtime"] = compatible_runtime + compatible_architecture = module.params.get("compatible_architecture") + if compatible_architecture is not None: + params["compatible_architecture"] = compatible_architecture + + try: + result = f_operation(lambda_client, **params) + module.exit_json(changed=False, layers_versions=result) + except LambdaLayerInfoFailure as e: + module.fail_json_aws(exception=e.exc, msg=e.msg) + + +def main(): + argument_spec = dict( + name=dict(type="str", aliases=["layer_name"]), + compatible_runtime=dict(type="str"), + compatible_architecture=dict(type="str"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + lambda_client = module.client('lambda') + execute_module(module, lambda_client) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_policy.py b/ansible_collections/amazon/aws/plugins/modules/lambda_policy.py new file mode 100644 index 000000000..38fbef325 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_policy.py @@ -0,0 +1,426 @@ +#!/usr/bin/python +# Copyright (c) 2016, Pierre Jodouin +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: lambda_policy +version_added: 5.0.0 +short_description: Creates, updates or deletes AWS Lambda policy statements. +description: + - This module allows the management of AWS Lambda policy statements. + - It is idempotent and supports "Check" mode. + - Use module M(amazon.aws.lambda) to manage the lambda function itself, M(amazon.aws.lambda_alias) to manage function aliases, + M(amazon.aws.lambda_event) to manage event source mappings such as Kinesis streams, M(community.aws.execute_lambda) to execute a + lambda function and M(amazon.aws.lambda_info) to gather information relating to one or more lambda functions. + - This module was originally added to C(community.aws) in release 1.0.0. +author: + - Pierre Jodouin (@pjodouin) + - Michael De La Rue (@mikedlr) +options: + function_name: + description: + - "Name of the Lambda function whose resource policy you are updating by adding a new permission." + - "You can specify a function name (for example, Thumbnail ) or you can specify Amazon Resource Name (ARN) of the" + - "function (for example, C(arn:aws:lambda:us-west-2:account-id:function:ThumbNail) ). AWS Lambda also allows you to" + - "specify partial ARN (for example, C(account-id:Thumbnail) ). Note that the length constraint applies only to the" + - "ARN. If you specify only the function name, it is limited to 64 character in length." + required: true + aliases: ['lambda_function_arn', 'function_arn'] + type: str + + state: + description: + - Describes the desired state. + default: "present" + choices: ["present", "absent"] + type: str + + alias: + description: + - Name of the function alias. Mutually exclusive with I(version). + type: str + + version: + description: + - Version of the Lambda function. Mutually exclusive with I(alias). + type: int + + statement_id: + description: + - A unique statement identifier. + required: true + aliases: ['sid'] + type: str + + action: + description: + - "The AWS Lambda action you want to allow in this statement. Each Lambda action is a string starting with + lambda: followed by the API name (see Operations ). For example, C(lambda:CreateFunction) . You can use wildcard + (C(lambda:*)) to grant permission for all AWS Lambda actions." + required: true + type: str + + principal: + description: + - "The principal who is getting this permission. It can be Amazon S3 service Principal (s3.amazonaws.com ) if + you want Amazon S3 to invoke the function, an AWS account ID if you are granting cross-account permission, or + any valid AWS service principal such as sns.amazonaws.com . For example, you might want to allow a custom + application in another AWS account to push events to AWS Lambda by invoking your function." + required: true + type: str + + source_arn: + description: + - This is optional; however, when granting Amazon S3 permission to invoke your function, you should specify this + field with the bucket Amazon Resource Name (ARN) as its value. This ensures that only events generated from + the specified bucket can invoke the function. + type: str + + source_account: + description: + - The AWS account ID (without a hyphen) of the source owner. For example, if I(source_arn) identifies a bucket, + then this is the bucket owner's account ID. You can use this additional condition to ensure the bucket you + specify is owned by a specific account (it is possible the bucket owner deleted the bucket and some other AWS + account created the bucket). You can also use this condition to specify all sources (that is, you don't + specify the I(source_arn) ) owned by a specific account. + type: str + + event_source_token: + description: + - Token string representing source ARN or account. Mutually exclusive with I(source_arn) or I(source_account). + type: str + +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 + +''' + +EXAMPLES = ''' + +- name: Lambda S3 event notification + amazon.aws.lambda_policy: + state: present + function_name: functionName + alias: Dev + statement_id: lambda-s3-myBucket-create-data-log + action: lambda:InvokeFunction + principal: s3.amazonaws.com + source_arn: arn:aws:s3:eu-central-1:123456789012:bucketName + source_account: 123456789012 + register: lambda_policy_action + +- name: show results + ansible.builtin.debug: + var: lambda_policy_action +''' + +RETURN = ''' +--- +lambda_policy_action: + description: describes what action was taken + returned: success + type: str +''' + +import json +import re + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils._text import to_native +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + + +def pc(key): + """ + Changes python key into Pascal case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'. + + :param key: + :return: + """ + + return "".join([token.capitalize() for token in key.split('_')]) + + +def policy_equal(module, current_statement): + for param in ('action', 'principal', 'source_arn', 'source_account', 'event_source_token'): + if module.params.get(param) != current_statement.get(param): + return False + + return True + + +def set_api_params(module, module_params): + """ + Sets module parameters to those expected by the boto3 API. + + :param module: + :param module_params: + :return: + """ + + api_params = dict() + + for param in module_params: + module_param = module.params.get(param) + if module_param is not None: + api_params[pc(param)] = module_param + + return api_params + + +def validate_params(module): + """ + Performs parameter validation beyond the module framework's validation. + + :param module: + :return: + """ + + function_name = module.params['function_name'] + + # validate function name + if function_name.startswith('arn:'): + if not re.search(r'^[\w\-:]+$', function_name): + module.fail_json( + msg='ARN {0} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.'.format(function_name) + ) + if len(function_name) > 140: + module.fail_json(msg='ARN name "{0}" exceeds 140 character limit'.format(function_name)) + else: + if not re.search(r'^[\w\-]+$', function_name): + module.fail_json( + msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format( + function_name) + ) + if len(function_name) > 64: + module.fail_json( + msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + + +def get_qualifier(module): + """ + Returns the function qualifier as a version or alias or None. + + :param module: + :return: + """ + + if module.params.get('version') is not None: + return to_native(module.params['version']) + elif module.params['alias']: + return to_native(module.params['alias']) + + return None + + +def extract_statement(policy, sid): + """return flattened single policy statement from a policy + + If a policy statement is present in the policy extract it and + return it in a flattened form. Otherwise return an empty + dictionary. + """ + if 'Statement' not in policy: + return {} + policy_statement = {} + # Now that we have the policy, check if required permission statement is present and flatten to + # simple dictionary if found. + for statement in policy['Statement']: + if statement['Sid'] == sid: + policy_statement['action'] = statement['Action'] + try: + policy_statement['principal'] = statement['Principal']['Service'] + except KeyError: + pass + try: + policy_statement['principal'] = statement['Principal']['AWS'] + except KeyError: + pass + try: + policy_statement['source_arn'] = statement['Condition']['ArnLike']['AWS:SourceArn'] + except KeyError: + pass + try: + policy_statement['source_account'] = statement['Condition']['StringEquals']['AWS:SourceAccount'] + except KeyError: + pass + try: + policy_statement['event_source_token'] = statement['Condition']['StringEquals']['lambda:EventSourceToken'] + except KeyError: + pass + break + + return policy_statement + + +def get_policy_statement(module, client): + """Checks that policy exists and if so, that statement ID is present or absent. + + :param module: + :param client: + :return: + """ + sid = module.params['statement_id'] + + # set API parameters + api_params = set_api_params(module, ('function_name', )) + qualifier = get_qualifier(module) + if qualifier: + api_params.update(Qualifier=qualifier) + + policy_results = None + # check if function policy exists + try: + policy_results = client.get_policy(**api_params) + except is_boto3_error_code('ResourceNotFoundException'): + return {} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="retrieving function policy") + + # get_policy returns a JSON string so must convert to dict before reassigning to its key + policy = json.loads(policy_results.get('Policy', '{}')) + return extract_statement(policy, sid) + + +def add_policy_permission(module, client): + """ + Adds a permission statement to the policy. + + :param module: + :param aws: + :return: + """ + + changed = False + + # set API parameters + params = ( + 'function_name', + 'statement_id', + 'action', + 'principal', + 'source_arn', + 'source_account', + 'event_source_token') + api_params = set_api_params(module, params) + qualifier = get_qualifier(module) + if qualifier: + api_params.update(Qualifier=qualifier) + + if not module.check_mode: + try: + client.add_permission(**api_params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="adding permission to policy") + changed = True + + return changed + + +def remove_policy_permission(module, client): + """ + Removed a permission statement from the policy. + + :param module: + :param aws: + :return: + """ + + changed = False + + # set API parameters + api_params = set_api_params(module, ('function_name', 'statement_id')) + qualifier = get_qualifier(module) + if qualifier: + api_params.update(Qualifier=qualifier) + + try: + if not module.check_mode: + client.remove_permission(**api_params) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="removing permission from policy") + + return changed + + +def manage_state(module, lambda_client): + changed = False + current_state = 'absent' + state = module.params['state'] + action_taken = 'none' + + # check if the policy exists + current_policy_statement = get_policy_statement(module, lambda_client) + if current_policy_statement: + current_state = 'present' + + if state == 'present': + if current_state == 'present' and not policy_equal(module, current_policy_statement): + remove_policy_permission(module, lambda_client) + changed = add_policy_permission(module, lambda_client) + action_taken = 'updated' + if not current_state == 'present': + changed = add_policy_permission(module, lambda_client) + action_taken = 'added' + elif current_state == 'present': + # remove the policy statement + changed = remove_policy_permission(module, lambda_client) + action_taken = 'deleted' + + return dict(changed=changed, ansible_facts=dict(lambda_policy_action=action_taken)) + + +def setup_module_object(): + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent']), + function_name=dict(required=True, aliases=['lambda_function_arn', 'function_arn']), + statement_id=dict(required=True, aliases=['sid']), + alias=dict(), + version=dict(type='int'), + action=dict(required=True, ), + principal=dict(required=True, ), + source_arn=dict(), + source_account=dict(), + event_source_token=dict(no_log=False), + ) + + return AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['alias', 'version'], + ['event_source_token', 'source_arn'], + ['event_source_token', 'source_account']], + ) + + +def main(): + """ + Main entry point. + + :return dict: ansible facts + """ + + module = setup_module_object() + client = module.client('lambda') + validate_params(module) + results = manage_state(module, client) + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py new file mode 100644 index 000000000..5eec23c88 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py @@ -0,0 +1,1143 @@ +#!/usr/bin/python +# Copyright (c) 2022 Ansible Project +# Copyright (c) 2022 Alina Buzachis (@alinabuzachis) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: rds_cluster +version_added: 5.0.0 +short_description: rds_cluster module +description: + - Create, modify, and delete RDS clusters. + - This module was originally added to C(community.aws) in release 3.2.0. +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +author: + - Sloane Hertel (@s-hertel) + - Alina Buzachis (@alinabuzachis) +options: + # General module options + state: + description: Whether the snapshot should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + creation_source: + description: Which source to use if creating from a template (an existing cluster, S3 bucket, or snapshot). + choices: ['snapshot', 's3', 'cluster'] + type: str + force_update_password: + description: + - Set to C(true) to update your cluster password with I(master_user_password). + - Since comparing passwords to determine if it needs to be updated is not possible this is set to C(false) by default to allow idempotence. + type: bool + default: false + promote: + description: Set to C(true) to promote a read replica cluster. + type: bool + default: false + purge_cloudwatch_logs_exports: + description: + - Whether or not to disable Cloudwatch logs enabled for the DB cluster that are not provided in I(enable_cloudwatch_logs_exports). + Set I(enable_cloudwatch_logs_exports) to an empty list to disable all. + type: bool + default: true + purge_security_groups: + description: + - Set to C(false) to retain any enabled security groups that aren't specified in the task and are associated with the cluster. + - Can be applied to I(vpc_security_group_ids) + type: bool + default: true + wait: + description: Whether to wait for the cluster to be available or deleted. + type: bool + default: true + # Options that have a corresponding boto3 parameter + apply_immediately: + description: + - A value that specifies whether modifying a cluster with I(new_db_cluster_identifier) and I(master_user_password) + should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If C(false), changes + are applied during the next maintenance window. + type: bool + default: false + availability_zones: + description: + - A list of EC2 Availability Zones that instances in the DB cluster can be created in. + May be used when creating a cluster or when restoring from S3 or a snapshot. + aliases: + - zones + - az + type: list + elements: str + backtrack_to: + description: + - The timestamp of the time to backtrack the DB cluster to in ISO 8601 format, such as "2017-07-08T18:00Z". + type: str + backtrack_window: + description: + - The target backtrack window, in seconds. To disable backtracking, set this value to C(0). + - If specified, this value must be set to a number from C(0) to C(259,200) (72 hours). + type: int + backup_retention_period: + description: + - The number of days for which automated backups are retained (must be within C(1) to C(35)). + May be used when creating a new cluster, when restoring from S3, or when modifying a cluster. + type: int + default: 1 + character_set_name: + description: + - The character set to associate with the DB cluster. + type: str + database_name: + description: + - The name for your database. If a name is not provided Amazon RDS will not create a database. + aliases: + - db_name + type: str + db_cluster_identifier: + description: + - The DB cluster (lowercase) identifier. The identifier must contain from 1 to 63 letters, numbers, or + hyphens and the first character must be a letter and may not end in a hyphen or contain consecutive hyphens. + aliases: + - cluster_id + - id + - cluster_name + type: str + required: true + db_cluster_parameter_group_name: + description: + - The name of the DB cluster parameter group to associate with this DB cluster. + If this argument is omitted when creating a cluster, the default DB cluster parameter group for the specified DB engine and version is used. + type: str + db_subnet_group_name: + description: + - A DB subnet group to associate with this DB cluster if not using the default. + type: str + enable_cloudwatch_logs_exports: + description: + - A list of log types that need to be enabled for exporting to CloudWatch Logs. + - Engine aurora-mysql supports C(audit), C(error), C(general) and C(slowquery). + - Engine aurora-postgresql supports C(postgresql). + type: list + elements: str + deletion_protection: + description: + - A value that indicates whether the DB cluster has deletion protection enabled. + The database can't be deleted when deletion protection is enabled. + By default, deletion protection is disabled. + type: bool + global_cluster_identifier: + description: + - The global cluster ID of an Aurora cluster that becomes the primary cluster in the new global database cluster. + type: str + enable_http_endpoint: + description: + - A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless DB cluster. + By default, the HTTP endpoint is disabled. + type: bool + copy_tags_to_snapshot: + description: + - Indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. + The default is not to copy them. + type: bool + domain: + description: + - The Active Directory directory ID to create the DB cluster in. + type: str + domain_iam_role_name: + description: + - Specify the name of the IAM role to be used when making API calls to the Directory Service. + type: str + enable_global_write_forwarding: + description: + - A value that indicates whether to enable this DB cluster to forward write operations to the primary cluster of an Aurora global database. + By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database. + - This value can be only set on Aurora DB clusters that are members of an Aurora global database. + type: bool + db_cluster_instance_class: + description: + - The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example C(db.m6gd.xlarge). + - Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. + - For the full list of DB instance classes and availability for your engine visit + U(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html). + - This setting is required to create a Multi-AZ DB cluster. + - I(db_cluster_instance_class) require botocore >= 1.23.44. + type: str + version_added: 5.5.0 + enable_iam_database_authentication: + description: + - Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. + If this option is omitted when creating the cluster, Amazon RDS sets this to C(false). + type: bool + allocated_storage: + description: + - The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster. + - This setting is required to create a Multi-AZ DB cluster. + - I(allocated_storage) require botocore >= 1.23.44. + type: int + version_added: 5.5.0 + storage_type: + description: + - Specifies the storage type to be associated with the DB cluster. + - This setting is required to create a Multi-AZ DB cluster. + - When specified, a value for the I(iops) parameter is required. + - I(storage_type) require botocore >= 1.23.44. + - Defaults to C(io1). + type: str + choices: + - io1 + version_added: 5.5.0 + iops: + description: + - The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster. + - This setting is required to create a Multi-AZ DB cluster + - Must be a multiple between .5 and 50 of the storage amount for the DB cluster. + - I(iops) require botocore >= 1.23.44. + type: int + version_added: 5.5.0 + engine: + description: + - The name of the database engine to be used for this DB cluster. This is required to create a cluster. + - The combinaison of I(engine) and I(engine_mode) may not be supported. + - "See AWS documentation for details: + L(Amazon RDS Documentation,https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBCluster.html)." + - When I(engine=mysql), I(allocated_storage), I(iops) and I(db_cluster_instance_class) must also be specified. + - When I(engine=postgres), I(allocated_storage), I(iops) and I(db_cluster_instance_class) must also be specified. + - Support for C(postgres) and C(mysql) was added in amazon.aws 5.5.0. + choices: + - aurora + - aurora-mysql + - aurora-postgresql + - mysql + - postgres + type: str + engine_mode: + description: + - The DB engine mode of the DB cluster. The combination of I(engine) and I(engine_mode) may not be supported. + - "See AWS documentation for details: + L(Amazon RDS Documentation,https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBCluster.html)." + choices: + - provisioned + - serverless + - parallelquery + - global + - multimaster + type: str + version_added: 5.5.0 + engine_version: + description: + - The version number of the database engine to use. + - For Aurora MySQL that could be C(5.6.10a), C(5.7.12). + - Aurora PostgreSQL example, C(9.6.3). + type: str + final_snapshot_identifier: + description: + - The DB cluster snapshot identifier of the new DB cluster snapshot created when I(skip_final_snapshot=false). + type: str + force_backtrack: + description: + - A boolean to indicate if the DB cluster should be forced to backtrack when binary logging is enabled. + Otherwise, an error occurs when binary logging is enabled. + type: bool + kms_key_id: + description: + - The AWS KMS key identifier (the ARN, unless you are creating a cluster in the same account that owns the + KMS key, in which case the KMS key alias may be used). + - If I(replication_source_identifier) specifies an encrypted source Amazon RDS will use the key used toe encrypt the source. + - If I(storage_encrypted=true) and and I(replication_source_identifier) is not provided, the default encryption key is used. + type: str + master_user_password: + description: + - An 8-41 character password for the master database user. + - The password can contain any printable ASCII character except "/", """, or "@". + - To modify the password use I(force_password_update). Use I(apply immediately) to change + the password immediately, otherwise it is updated during the next maintenance window. + aliases: + - password + type: str + master_username: + description: + - The name of the master user for the DB cluster. Must be 1-16 letters or numbers and begin with a letter. + aliases: + - username + type: str + new_db_cluster_identifier: + description: + - The new DB cluster (lowercase) identifier for the DB cluster when renaming a DB cluster. + - The identifier must contain from 1 to 63 letters, numbers, or hyphens and the first character must be a + letter and may not end in a hyphen or contain consecutive hyphens. + - Use I(apply_immediately) to rename immediately, otherwise it is updated during the next maintenance window. + aliases: + - new_cluster_id + - new_id + - new_cluster_name + type: str + option_group_name: + description: + - The option group to associate with the DB cluster. + type: str + port: + description: + - The port number on which the instances in the DB cluster accept connections. If not specified, Amazon RDS + defaults this to C(3306) if the I(engine) is C(aurora) and c(5432) if the I(engine) is C(aurora-postgresql). + type: int + preferred_backup_window: + description: + - The daily time range (in UTC) of at least 30 minutes, during which automated backups are created if automated backups are + enabled using I(backup_retention_period). The option must be in the format of "hh24:mi-hh24:mi" and not conflict with + I(preferred_maintenance_window). + aliases: + - backup_window + type: str + preferred_maintenance_window: + description: + - The weekly time range (in UTC) of at least 30 minutes, during which system maintenance can occur. The option must + be in the format "ddd:hh24:mi-ddd:hh24:mi" where ddd is one of Mon, Tue, Wed, Thu, Fri, Sat, Sun. + aliases: + - maintenance_window + type: str + replication_source_identifier: + description: + - The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a Read Replica. + aliases: + - replication_src_id + type: str + restore_to_time: + description: + - The UTC date and time to restore the DB cluster to. Must be in the format "2015-03-07T23:45:00Z". + - If this is not provided while restoring a cluster, I(use_latest_restorable_time) must be. + May not be specified if I(restore_type) is copy-on-write. + type: str + restore_type: + description: + - The type of restore to be performed. If not provided, Amazon RDS uses full-copy. + choices: + - full-copy + - copy-on-write + type: str + role_arn: + description: + - The Amazon Resource Name (ARN) of the IAM role to associate with the Aurora DB cluster, for example + "arn:aws:iam::123456789012:role/AuroraAccessRole" + type: str + s3_bucket_name: + description: + - The name of the Amazon S3 bucket that contains the data used to create the Amazon Aurora DB cluster. + type: str + s3_ingestion_role_arn: + description: + - The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that authorizes Amazon RDS to access + the Amazon S3 bucket on your behalf. + type: str + s3_prefix: + description: + - The prefix for all of the file names that contain the data used to create the Amazon Aurora DB cluster. + - If you do not specify a SourceS3Prefix value, then the Amazon Aurora DB cluster is created by using all of the files in the Amazon S3 bucket. + type: str + skip_final_snapshot: + description: + - Whether a final DB cluster snapshot is created before the DB cluster is deleted. + - If this is C(false), I(final_snapshot_identifier) must be provided. + type: bool + default: false + snapshot_identifier: + description: + - The identifier for the DB snapshot or DB cluster snapshot to restore from. + - You can use either the name or the ARN to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot. + type: str + source_db_cluster_identifier: + description: + - The identifier of the source DB cluster from which to restore. + type: str + source_engine: + description: + - The identifier for the database engine that was backed up to create the files stored in the Amazon S3 bucket. + choices: + - mysql + type: str + source_engine_version: + description: + - The version of the database that the backup files were created from. + type: str + source_region: + description: + - The ID of the region that contains the source for the DB cluster. + type: str + storage_encrypted: + description: + - Whether the DB cluster is encrypted. + type: bool + use_earliest_time_on_point_in_time_unavailable: + description: + - If I(backtrack_to) is set to a timestamp earlier than the earliest backtrack time, this value backtracks the DB cluster to + the earliest possible backtrack time. Otherwise, an error occurs. + type: bool + use_latest_restorable_time: + description: + - Whether to restore the DB cluster to the latest restorable backup time. Only one of I(use_latest_restorable_time) + and I(restore_to_time) may be provided. + type: bool + vpc_security_group_ids: + description: + - A list of EC2 VPC security groups to associate with the DB cluster. + type: list + elements: str +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +- name: Create minimal aurora cluster in default VPC and default subnet group + amazon.aws.rds_cluster: + cluster_id: "{{ cluster_id }}" + engine: "aurora" + password: "{{ password }}" + username: "{{ username }}" + +- name: Add a new security group without purge + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + vpc_security_group_ids: + - sg-0be17ba10c9286b0b + purge_security_groups: false + +- name: Modify password + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + password: "{{ new_password }}" + force_update_password: true + apply_immediately: true + +- name: Rename the cluster + amazon.aws.rds_cluster: + engine: aurora + password: "{{ password }}" + username: "{{ username }}" + cluster_id: "cluster-{{ resource_prefix }}" + new_cluster_id: "cluster-{{ resource_prefix }}-renamed" + apply_immediately: true + +- name: Delete aurora cluster without creating a final snapshot + amazon.aws.rds_cluster: + engine: aurora + password: "{{ password }}" + username: "{{ username }}" + cluster_id: "{{ cluster_id }}" + skip_final_snapshot: True + tags: + Name: "cluster-{{ resource_prefix }}" + Created_By: "Ansible_rds_cluster_integration_test" + state: absent + +- name: Restore cluster from source snapshot + amazon.aws.rds_cluster: + engine: aurora + password: "{{ password }}" + username: "{{ username }}" + cluster_id: "cluster-{{ resource_prefix }}-restored" + snapshot_identifier: "cluster-{{ resource_prefix }}-snapshot" + +- name: Create an Aurora PostgreSQL cluster and attach an intance + amazon.aws.rds_cluster: + state: present + engine: aurora-postgresql + engine_mode: provisioned + cluster_id: '{{ cluster_id }}' + username: '{{ username }}' + password: '{{ password }}' + +- name: Attach a new instance to the cluster + amazon.aws.rds_instance: + id: '{{ instance_id }}' + cluster_id: '{{ cluster_id }}' + engine: aurora-postgresql + state: present + db_instance_class: 'db.t3.medium' +''' + +RETURN = r''' +activity_stream_status: + description: The status of the database activity stream. + returned: always + type: str + sample: stopped +allocated_storage: + description: + - The allocated storage size in gigabytes. Since aurora storage size is not fixed this is + always 1 for aurora database engines. + returned: always + type: int + sample: 1 +associated_roles: + description: + - A list of dictionaries of the AWS Identity and Access Management (IAM) roles that are associated + with the DB cluster. Each dictionary contains the role_arn and the status of the role. + returned: always + type: list + sample: [] +availability_zones: + description: The list of availability zones that instances in the DB cluster can be created in. + returned: always + type: list + sample: + - us-east-1c + - us-east-1a + - us-east-1e +backup_retention_period: + description: The number of days for which automatic DB snapshots are retained. + returned: always + type: int + sample: 1 +changed: + description: If the RDS cluster has changed. + returned: always + type: bool + sample: true +cluster_create_time: + description: The time in UTC when the DB cluster was created. + returned: always + type: str + sample: '2018-06-29T14:08:58.491000+00:00' +copy_tags_to_snapshot: + description: + - Specifies whether tags are copied from the DB cluster to snapshots of the DB cluster. + returned: always + type: bool + sample: false +cross_account_clone: + description: + - Specifies whether the DB cluster is a clone of a DB cluster owned by a different Amazon Web Services account. + returned: always + type: bool + sample: false +db_cluster_arn: + description: The Amazon Resource Name (ARN) for the DB cluster. + returned: always + type: str + sample: arn:aws:rds:us-east-1:123456789012:cluster:rds-cluster-demo +db_cluster_identifier: + description: The lowercase user-supplied DB cluster identifier. + returned: always + type: str + sample: rds-cluster-demo +db_cluster_members: + description: + - A list of dictionaries containing information about the instances in the cluster. + Each dictionary contains the db_instance_identifier, is_cluster_writer (bool), + db_cluster_parameter_group_status, and promotion_tier (int). + returned: always + type: list + sample: [] +db_cluster_parameter_group: + description: The parameter group associated with the DB cluster. + returned: always + type: str + sample: default.aurora5.6 +db_cluster_resource_id: + description: The AWS Region-unique, immutable identifier for the DB cluster. + returned: always + type: str + sample: cluster-D2MEQDN3BQNXDF74K6DQJTHASU +db_subnet_group: + description: The name of the subnet group associated with the DB Cluster. + returned: always + type: str + sample: default +deletion_protection: + description: + - Indicates if the DB cluster has deletion protection enabled. + The database can't be deleted when deletion protection is enabled. + returned: always + type: bool + sample: false +domain_memberships: + description: + - The Active Directory Domain membership records associated with the DB cluster. + returned: always + type: list + sample: [] +earliest_restorable_time: + description: The earliest time to which a database can be restored with point-in-time restore. + returned: always + type: str + sample: '2018-06-29T14:09:34.797000+00:00' +endpoint: + description: The connection endpoint for the primary instance of the DB cluster. + returned: always + type: str + sample: rds-cluster-demo.cluster-cvlrtwiennww.us-east-1.rds.amazonaws.com +engine: + description: The database engine of the DB cluster. + returned: always + type: str + sample: aurora +engine_mode: + description: The DB engine mode of the DB cluster. + returned: always + type: str + sample: provisioned +engine_version: + description: The database engine version. + returned: always + type: str + sample: 5.6.10a +hosted_zone_id: + description: The ID that Amazon Route 53 assigns when you create a hosted zone. + returned: always + type: str + sample: Z2R2ITUGPM61AM +http_endpoint_enabled: + description: + - A value that indicates whether the HTTP endpoint for an Aurora Serverless DB cluster is enabled. + returned: always + type: bool + sample: false +iam_database_authentication_enabled: + description: Whether IAM accounts may be mapped to database accounts. + returned: always + type: bool + sample: false +latest_restorable_time: + description: The latest time to which a database can be restored with point-in-time restore. + returned: always + type: str + sample: '2018-06-29T14:09:34.797000+00:00' +master_username: + description: The master username for the DB cluster. + returned: always + type: str + sample: username +multi_az: + description: Whether the DB cluster has instances in multiple availability zones. + returned: always + type: bool + sample: false +port: + description: The port that the database engine is listening on. + returned: always + type: int + sample: 3306 +preferred_backup_window: + description: The UTC weekly time range during which system maintenance can occur. + returned: always + type: str + sample: 10:18-10:48 +preferred_maintenance_window: + description: The UTC weekly time range during which system maintenance can occur. + returned: always + type: str + sample: tue:03:23-tue:03:53 +read_replica_identifiers: + description: A list of read replica ID strings associated with the DB cluster. + returned: always + type: list + sample: [] +reader_endpoint: + description: The reader endpoint for the DB cluster. + returned: always + type: str + sample: rds-cluster-demo.cluster-ro-cvlrtwiennww.us-east-1.rds.amazonaws.com +status: + description: The status of the DB cluster. + returned: always + type: str + sample: available +storage_encrypted: + description: Whether the DB cluster is storage encrypted. + returned: always + type: bool + sample: false +tag_list: + description: A list of tags consisting of key-value pairs. + returned: always + type: list + elements: dict + sample: [ + { + "key": "Created_By", + "value": "Ansible_rds_cluster_integration_test" + } + ] +tags: + description: A dictionary of key value pairs. + returned: always + type: dict + sample: { + "Name": "rds-cluster-demo" + } +vpc_security_groups: + description: A list of the DB cluster's security groups and their status. + returned: always + type: complex + contains: + status: + description: Status of the security group. + returned: always + type: str + sample: active + vpc_security_group_id: + description: Security group of the cluster. + returned: always + type: str + sample: sg-12345678 +''' + + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.rds import wait_for_cluster_status +from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method + + +@AWSRetry.jittered_backoff(retries=10) +def _describe_db_clusters(**params): + try: + paginator = client.get_paginator('describe_db_clusters') + return paginator.paginate(**params).build_full_result()['DBClusters'][0] + except is_boto3_error_code('DBClusterNotFoundFault'): + return {} + + +def get_add_role_options(params_dict, cluster): + current_role_arns = [role['RoleArn'] for role in cluster.get('AssociatedRoles', [])] + role = params_dict['RoleArn'] + if role is not None and role not in current_role_arns: + return {'RoleArn': role, 'DBClusterIdentifier': params_dict['DBClusterIdentifier']} + return {} + + +def get_backtrack_options(params_dict): + options = ['BacktrackTo', 'DBClusterIdentifier', 'UseEarliestTimeOnPointInTimeUnavailable'] + if params_dict['BacktrackTo'] is not None: + options = dict((k, params_dict[k]) for k in options if params_dict[k] is not None) + if 'ForceBacktrack' in params_dict: + options['Force'] = params_dict['ForceBacktrack'] + return options + return {} + + +def get_create_options(params_dict): + options = [ + "AvailabilityZones", + "BacktrackWindow", + "BackupRetentionPeriod", + "PreferredBackupWindow", + "CharacterSetName", + "DBClusterIdentifier", + "DBClusterParameterGroupName", + "DBSubnetGroupName", + "DatabaseName", + "EnableCloudwatchLogsExports", + "EnableIAMDatabaseAuthentication", + "KmsKeyId", + "Engine", + "EngineMode", + "EngineVersion", + "PreferredMaintenanceWindow", + "MasterUserPassword", + "MasterUsername", + "OptionGroupName", + "Port", + "ReplicationSourceIdentifier", + "SourceRegion", + "StorageEncrypted", + "Tags", + "VpcSecurityGroupIds", + "EngineMode", + "ScalingConfiguration", + "DeletionProtection", + "EnableHttpEndpoint", + "CopyTagsToSnapshot", + "Domain", + "DomainIAMRoleName", + "EnableGlobalWriteForwarding", + ] + + return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) + + +def get_modify_options(params_dict, force_update_password): + options = [ + 'ApplyImmediately', 'BacktrackWindow', 'BackupRetentionPeriod', 'PreferredBackupWindow', + 'DBClusterIdentifier', 'DBClusterParameterGroupName', 'EnableIAMDatabaseAuthentication', + 'EngineVersion', 'PreferredMaintenanceWindow', 'MasterUserPassword', 'NewDBClusterIdentifier', + 'OptionGroupName', 'Port', 'VpcSecurityGroupIds', 'EnableIAMDatabaseAuthentication', + 'CloudwatchLogsExportConfiguration', 'DeletionProtection', 'EnableHttpEndpoint', + 'CopyTagsToSnapshot', 'EnableGlobalWriteForwarding', 'Domain', 'DomainIAMRoleName', + ] + modify_options = dict((k, v) for k, v in params_dict.items() if k in options and v is not None) + if not force_update_password: + modify_options.pop('MasterUserPassword', None) + return modify_options + + +def get_delete_options(params_dict): + options = ['DBClusterIdentifier', 'FinalSnapshotIdentifier', 'SkipFinalSnapshot'] + return dict((k, params_dict[k]) for k in options if params_dict[k] is not None) + + +def get_restore_s3_options(params_dict): + options = [ + 'AvailabilityZones', 'BacktrackWindow', 'BackupRetentionPeriod', 'CharacterSetName', + 'DBClusterIdentifier', 'DBClusterParameterGroupName', 'DBSubnetGroupName', 'DatabaseName', + 'EnableCloudwatchLogsExports', 'EnableIAMDatabaseAuthentication', 'Engine', 'EngineVersion', + 'KmsKeyId', 'MasterUserPassword', 'MasterUsername', 'OptionGroupName', 'Port', + 'PreferredBackupWindow', 'PreferredMaintenanceWindow', 'S3BucketName', 'S3IngestionRoleArn', + 'S3Prefix', 'SourceEngine', 'SourceEngineVersion', 'StorageEncrypted', 'Tags', + 'VpcSecurityGroupIds', 'DeletionProtection', 'EnableHttpEndpoint', 'CopyTagsToSnapshot', + 'Domain', 'DomainIAMRoleName', + ] + + return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) + + +def get_restore_snapshot_options(params_dict): + options = [ + 'AvailabilityZones', 'BacktrackWindow', 'DBClusterIdentifier', 'DBSubnetGroupName', + 'DatabaseName', 'EnableCloudwatchLogsExports', 'EnableIAMDatabaseAuthentication', + 'Engine', 'EngineVersion', 'KmsKeyId', 'OptionGroupName', 'Port', 'SnapshotIdentifier', + 'Tags', 'VpcSecurityGroupIds', 'DBClusterParameterGroupName', 'DeletionProtection', + 'CopyTagsToSnapshot', 'Domain', 'DomainIAMRoleName', + ] + return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) + + +def get_restore_cluster_options(params_dict): + options = [ + 'BacktrackWindow', 'DBClusterIdentifier', 'DBSubnetGroupName', 'EnableCloudwatchLogsExports', + 'EnableIAMDatabaseAuthentication', 'KmsKeyId', 'OptionGroupName', 'Port', 'RestoreToTime', + 'RestoreType', 'SourceDBClusterIdentifier', 'Tags', 'UseLatestRestorableTime', + 'VpcSecurityGroupIds', 'DeletionProtection', 'CopyTagsToSnapshot', 'Domain', + 'DomainIAMRoleName', + ] + return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) + + +def get_rds_method_attribute_name(cluster): + state = module.params['state'] + creation_source = module.params['creation_source'] + method_name = None + method_options_name = None + + if state == 'absent': + if cluster and cluster['Status'] not in ['deleting', 'deleted']: + method_name = 'delete_db_cluster' + method_options_name = 'get_delete_options' + else: + if cluster: + method_name = 'modify_db_cluster' + method_options_name = 'get_modify_options' + elif creation_source == 'snapshot': + method_name = 'restore_db_cluster_from_snapshot' + method_options_name = 'get_restore_snapshot_options' + elif creation_source == 's3': + method_name = 'restore_db_cluster_from_s3' + method_options_name = 'get_restore_s3_options' + elif creation_source == 'cluster': + method_name = 'restore_db_cluster_to_point_in_time' + method_options_name = 'get_restore_cluster_options' + else: + method_name = 'create_db_cluster' + method_options_name = 'get_create_options' + + return method_name, method_options_name + + +def add_role(params): + if not module.check_mode: + try: + client.add_role_to_db_cluster(**params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg=f"Unable to add role {params['RoleArn']} to cluster {params['DBClusterIdentifier']}") + wait_for_cluster_status(client, module, params['DBClusterIdentifier'], 'cluster_available') + + +def backtrack_cluster(params): + if not module.check_mode: + try: + client.backtrack_db_cluster(**params) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg=f"Unable to backtrack cluster {params['DBClusterIdentifier']}") + wait_for_cluster_status(client, module, params['DBClusterIdentifier'], 'cluster_available') + + +def get_cluster(db_cluster_id): + try: + return _describe_db_clusters(DBClusterIdentifier=db_cluster_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to describe DB clusters") + + +def changing_cluster_options(modify_params, current_cluster): + changing_params = {} + apply_immediately = modify_params.pop('ApplyImmediately') + db_cluster_id = modify_params.pop('DBClusterIdentifier') + + enable_cloudwatch_logs_export = modify_params.pop('EnableCloudwatchLogsExports', None) + if enable_cloudwatch_logs_export is not None: + desired_cloudwatch_logs_configuration = {'EnableLogTypes': [], 'DisableLogTypes': []} + provided_cloudwatch_logs = set(enable_cloudwatch_logs_export) + current_cloudwatch_logs_export = set(current_cluster['EnabledCloudwatchLogsExports']) + + desired_cloudwatch_logs_configuration['EnableLogTypes'] = list(provided_cloudwatch_logs.difference(current_cloudwatch_logs_export)) + if module.params['purge_cloudwatch_logs_exports']: + desired_cloudwatch_logs_configuration['DisableLogTypes'] = list(current_cloudwatch_logs_export.difference(provided_cloudwatch_logs)) + changing_params['CloudwatchLogsExportConfiguration'] = desired_cloudwatch_logs_configuration + + password = modify_params.pop('MasterUserPassword', None) + if password: + changing_params['MasterUserPassword'] = password + + new_cluster_id = modify_params.pop('NewDBClusterIdentifier', None) + if new_cluster_id and new_cluster_id != current_cluster['DBClusterIdentifier']: + changing_params['NewDBClusterIdentifier'] = new_cluster_id + + option_group = modify_params.pop('OptionGroupName', None) + if ( + option_group and option_group not in [g['DBClusterOptionGroupName'] for g in current_cluster['DBClusterOptionGroupMemberships']] + ): + changing_params['OptionGroupName'] = option_group + + vpc_sgs = modify_params.pop('VpcSecurityGroupIds', None) + if vpc_sgs: + desired_vpc_sgs = [] + provided_vpc_sgs = set(vpc_sgs) + current_vpc_sgs = set([sg['VpcSecurityGroupId'] for sg in current_cluster['VpcSecurityGroups']]) + if module.params['purge_security_groups']: + desired_vpc_sgs = vpc_sgs + else: + if provided_vpc_sgs - current_vpc_sgs: + desired_vpc_sgs = list(provided_vpc_sgs | current_vpc_sgs) + + if desired_vpc_sgs: + changing_params['VpcSecurityGroupIds'] = desired_vpc_sgs + + desired_db_cluster_parameter_group = modify_params.pop("DBClusterParameterGroupName", None) + if desired_db_cluster_parameter_group: + if desired_db_cluster_parameter_group != current_cluster["DBClusterParameterGroup"]: + changing_params["DBClusterParameterGroupName"] = desired_db_cluster_parameter_group + + for param in modify_params: + if modify_params[param] != current_cluster[param]: + changing_params[param] = modify_params[param] + + if changing_params: + changing_params['DBClusterIdentifier'] = db_cluster_id + if apply_immediately is not None: + changing_params['ApplyImmediately'] = apply_immediately + + return changing_params + + +def ensure_present(cluster, parameters, method_name, method_options_name): + changed = False + + if not cluster: + if parameters.get('Tags') is not None: + parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags']) + call_method(client, module, method_name, eval(method_options_name)(parameters)) + changed = True + else: + if get_backtrack_options(parameters): + backtrack_cluster(client, module, get_backtrack_options(parameters)) + changed = True + else: + modifiable_options = eval(method_options_name)(parameters, + force_update_password=module.params['force_update_password']) + modify_options = changing_cluster_options(modifiable_options, cluster) + if modify_options: + call_method(client, module, method_name, modify_options) + changed = True + if module.params['tags'] is not None: + existing_tags = get_tags(client, module, cluster['DBClusterArn']) + changed |= ensure_tags(client, module, cluster['DBClusterArn'], existing_tags, module.params['tags'], + module.params['purge_tags']) + + add_role_params = get_add_role_options(parameters, cluster) + if add_role_params: + add_role(client, module, add_role_params) + changed = True + + if module.params['promote'] and cluster.get('ReplicationSourceIdentifier'): + call_method(client, module, 'promote_read_replica_db_cluster', parameters={'DBClusterIdentifier': module.params['db_cluster_identifier']}) + changed = True + + return changed + + +def main(): + global module + global client + + arg_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + creation_source=dict(type='str', choices=['snapshot', 's3', 'cluster']), + force_update_password=dict(type='bool', default=False), + promote=dict(type='bool', default=False), + purge_cloudwatch_logs_exports=dict(type='bool', default=True), + purge_tags=dict(type='bool', default=True), + wait=dict(type='bool', default=True), + purge_security_groups=dict(type='bool', default=True), + ) + + parameter_options = dict( + apply_immediately=dict(type='bool', default=False), + availability_zones=dict(type='list', elements='str', aliases=['zones', 'az']), + backtrack_to=dict(), + backtrack_window=dict(type='int'), + backup_retention_period=dict(type='int', default=1), + character_set_name=dict(), + database_name=dict(aliases=['db_name']), + db_cluster_identifier=dict(required=True, aliases=['cluster_id', 'id', 'cluster_name']), + db_cluster_parameter_group_name=dict(), + db_subnet_group_name=dict(), + enable_cloudwatch_logs_exports=dict(type='list', elements='str'), + deletion_protection=dict(type='bool'), + global_cluster_identifier=dict(), + enable_http_endpoint=dict(type='bool'), + copy_tags_to_snapshot=dict(type='bool'), + domain=dict(), + domain_iam_role_name=dict(), + enable_global_write_forwarding=dict(type='bool'), + db_cluster_instance_class=dict(type="str"), + enable_iam_database_authentication=dict(type='bool'), + engine=dict(choices=["aurora", "aurora-mysql", "aurora-postgresql", "mysql", "postgres"]), + engine_mode=dict(choices=["provisioned", "serverless", "parallelquery", "global", "multimaster"]), + engine_version=dict(), + allocated_storage=dict(type="int"), + storage_type=dict(type="str", choices=["io1"]), + iops=dict(type="int"), + final_snapshot_identifier=dict(), + force_backtrack=dict(type='bool'), + kms_key_id=dict(), + master_user_password=dict(aliases=['password'], no_log=True), + master_username=dict(aliases=['username']), + new_db_cluster_identifier=dict(aliases=['new_cluster_id', 'new_id', 'new_cluster_name']), + option_group_name=dict(), + port=dict(type='int'), + preferred_backup_window=dict(aliases=['backup_window']), + preferred_maintenance_window=dict(aliases=['maintenance_window']), + replication_source_identifier=dict(aliases=['replication_src_id']), + restore_to_time=dict(), + restore_type=dict(choices=['full-copy', 'copy-on-write']), + role_arn=dict(), + s3_bucket_name=dict(), + s3_ingestion_role_arn=dict(), + s3_prefix=dict(), + skip_final_snapshot=dict(type='bool', default=False), + snapshot_identifier=dict(), + source_db_cluster_identifier=dict(), + source_engine=dict(choices=['mysql']), + source_engine_version=dict(), + source_region=dict(), + storage_encrypted=dict(type='bool'), + tags=dict(type='dict', aliases=['resource_tags']), + use_earliest_time_on_point_in_time_unavailable=dict(type='bool'), + use_latest_restorable_time=dict(type='bool'), + vpc_security_group_ids=dict(type='list', elements='str'), + ) + arg_spec.update(parameter_options) + + module = AnsibleAWSModule( + argument_spec=arg_spec, + required_if=[ + ('creation_source', 'snapshot', ('snapshot_identifier', 'engine')), + ('creation_source', 's3', ( + 's3_bucket_name', 'engine', 'master_username', 'master_user_password', + 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')), + ], + mutually_exclusive=[ + ('s3_bucket_name', 'source_db_cluster_identifier', 'snapshot_identifier'), + ('use_latest_restorable_time', 'restore_to_time'), + ], + supports_check_mode=True, + ) + + retry_decorator = AWSRetry.jittered_backoff(retries=10) + + try: + client = module.client('rds', retry_decorator=retry_decorator) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS.') + + if module.params.get("engine") and module.params["engine"] in ("mysql", "postgres"): + module.require_botocore_at_least("1.23.44", reason="to use mysql and postgres engines") + if module.params["state"] == "present": + if not ( + module.params.get("allocated_storage") + and module.params.get("iops") + and module.params.get("db_cluster_instance_class") + ): + module.fail_json( + f"When engine={module.params['engine']} allocated_storage, iops and db_cluster_instance_class msut be specified" + ) + else: + # Fall to default value + if not module.params.get("storage_type"): + module.params["storage_type"] = "io1" + + module.params['db_cluster_identifier'] = module.params['db_cluster_identifier'].lower() + cluster = get_cluster(module.params['db_cluster_identifier']) + + if module.params['new_db_cluster_identifier']: + module.params['new_db_cluster_identifier'] = module.params['new_db_cluster_identifier'].lower() + + if get_cluster(module.params['new_db_cluster_identifier']): + module.fail_json(f"A new cluster ID {module.params['new_db_cluster_identifier']} was provided but it already exists") + if not cluster: + module.fail_json(f"A new cluster ID {module.params['new_db_cluster_identifier']} was provided but the cluster to be renamed does not exist") + + if ( + module.params['state'] == 'absent' and module.params['skip_final_snapshot'] is False and + module.params['final_snapshot_identifier'] is None + ): + module.fail_json(msg='skip_final_snapshot is False but all of the following are missing: final_snapshot_identifier') + + parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options)) + + changed = False + method_name, method_options_name = get_rds_method_attribute_name(cluster) + + if method_name: + if method_name == 'delete_db_cluster': + call_method(client, module, method_name, eval(method_options_name)(parameters)) + changed = True + else: + changed |= ensure_present(cluster, parameters, method_name, method_options_name) + + if not module.check_mode and module.params['new_db_cluster_identifier'] and module.params['apply_immediately']: + cluster_id = module.params['new_db_cluster_identifier'] + else: + cluster_id = module.params['db_cluster_identifier'] + + result = camel_dict_to_snake_dict(get_cluster(cluster_id)) + + if result: + result['tags'] = get_tags(client, module, result['db_cluster_arn']) + + module.exit_json(changed=changed, **result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_info.py new file mode 100644 index 000000000..3135a4ce9 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_info.py @@ -0,0 +1,309 @@ +#!/usr/bin/python +# Copyright (c) 2022 Ansible Project +# Copyright (c) 2022 Alina Buzachis (@alinabuzachis) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +module: rds_cluster_info +version_added: 5.0.0 +short_description: Obtain information about one or more RDS clusters +description: + - Obtain information about one or more RDS clusters. + - This module was originally added to C(community.aws) in release 3.2.0. +options: + db_cluster_identifier: + description: + - The user-supplied DB cluster identifier. + - If this parameter is specified, information from only the specific DB cluster is returned. + aliases: + - cluster_id + - id + - cluster_name + type: str + filters: + description: + - A filter that specifies one or more DB clusters to describe. + See U(https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBClusters.html). + type: dict +author: + - Alina Buzachis (@alinabuzachis) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 + +''' + +EXAMPLES = r''' +- name: Get info of all existing DB clusters + amazon.aws.rds_cluster_info: + register: _result_cluster_info + +- name: Get info on a specific DB cluster + amazon.aws.rds_cluster_info: + cluster_id: "{{ cluster_id }}" + register: _result_cluster_info + +- name: Get info all DB clusters with specific engine + amazon.aws.rds_cluster_info: + engine: "aurora" + register: _result_cluster_info +''' + +RETURN = r''' +clusters: + description: List of RDS clusters. + returned: always + type: list + contains: + activity_stream_status: + description: The status of the database activity stream. + type: str + sample: stopped + allocated_storage: + description: + - The allocated storage size in gigabytes. Since aurora storage size is not fixed this is + always 1 for aurora database engines. + type: int + sample: 1 + associated_roles: + description: + - A list of dictionaries of the AWS Identity and Access Management (IAM) roles that are associated + with the DB cluster. Each dictionary contains the role_arn and the status of the role. + type: list + sample: [] + availability_zones: + description: The list of availability zones that instances in the DB cluster can be created in. + type: list + sample: + - us-east-1c + - us-east-1a + - us-east-1e + backup_retention_period: + description: The number of days for which automatic DB snapshots are retained. + type: int + sample: 1 + cluster_create_time: + description: The time in UTC when the DB cluster was created. + type: str + sample: '2018-06-29T14:08:58.491000+00:00' + copy_tags_to_snapshot: + description: + - Specifies whether tags are copied from the DB cluster to snapshots of the DB cluster. + type: bool + sample: false + cross_account_clone: + description: + - Specifies whether the DB cluster is a clone of a DB cluster owned by a different Amazon Web Services account. + type: bool + sample: false + db_cluster_arn: + description: The Amazon Resource Name (ARN) for the DB cluster. + type: str + sample: arn:aws:rds:us-east-1:123456789012:cluster:rds-cluster-demo + db_cluster_identifier: + description: The lowercase user-supplied DB cluster identifier. + type: str + sample: rds-cluster-demo + db_cluster_members: + description: + - A list of dictionaries containing information about the instances in the cluster. + Each dictionary contains the I(db_instance_identifier), I(is_cluster_writer) (bool), + I(db_cluster_parameter_group_status), and I(promotion_tier) (int). + type: list + sample: [] + db_cluster_parameter_group: + description: The parameter group associated with the DB cluster. + type: str + sample: default.aurora5.6 + db_cluster_resource_id: + description: The AWS Region-unique, immutable identifier for the DB cluster. + type: str + sample: cluster-D2MEQDN3BQNXDF74K6DQJTHASU + db_subnet_group: + description: The name of the subnet group associated with the DB Cluster. + type: str + sample: default + deletion_protection: + description: + - Indicates if the DB cluster has deletion protection enabled. + The database can't be deleted when deletion protection is enabled. + type: bool + sample: false + domain_memberships: + description: + - The Active Directory Domain membership records associated with the DB cluster. + type: list + sample: [] + earliest_restorable_time: + description: The earliest time to which a database can be restored with point-in-time restore. + type: str + sample: '2018-06-29T14:09:34.797000+00:00' + endpoint: + description: The connection endpoint for the primary instance of the DB cluster. + type: str + sample: rds-cluster-demo.cluster-cvlrtwiennww.us-east-1.rds.amazonaws.com + engine: + description: The database engine of the DB cluster. + type: str + sample: aurora + engine_mode: + description: The DB engine mode of the DB cluster. + type: str + sample: provisioned + engine_version: + description: The database engine version. + type: str + sample: 5.6.10a + hosted_zone_id: + description: The ID that Amazon Route 53 assigns when you create a hosted zone. + type: str + sample: Z2R2ITUGPM61AM + http_endpoint_enabled: + description: + - A value that indicates whether the HTTP endpoint for an Aurora Serverless DB cluster is enabled. + type: bool + sample: false + iam_database_authentication_enabled: + description: Whether IAM accounts may be mapped to database accounts. + type: bool + sample: false + latest_restorable_time: + description: The latest time to which a database can be restored with point-in-time restore. + type: str + sample: '2018-06-29T14:09:34.797000+00:00' + master_username: + description: The master username for the DB cluster. + type: str + sample: username + multi_az: + description: Whether the DB cluster has instances in multiple availability zones. + type: bool + sample: false + port: + description: The port that the database engine is listening on. + type: int + sample: 3306 + preferred_backup_window: + description: The UTC weekly time range during which system maintenance can occur. + type: str + sample: 10:18-10:48 + preferred_maintenance_window: + description: The UTC weekly time range during which system maintenance can occur. + type: str + sample: tue:03:23-tue:03:53 + read_replica_identifiers: + description: A list of read replica ID strings associated with the DB cluster. + type: list + sample: [] + reader_endpoint: + description: The reader endpoint for the DB cluster. + type: str + sample: rds-cluster-demo.cluster-ro-cvlrtwiennww.us-east-1.rds.amazonaws.com + status: + description: The status of the DB cluster. + type: str + sample: available + storage_encrypted: + description: Whether the DB cluster is storage encrypted. + type: bool + sample: false + tag_list: + description: A list of tags consisting of key-value pairs. + type: list + elements: dict + sample: [ + { + "key": "Created_By", + "value": "Ansible_rds_cluster_integration_test" + } + ] + tags: + description: A dictionary of key value pairs. + type: dict + sample: { + "Name": "rds-cluster-demo" + } + vpc_security_groups: + description: A list of the DB cluster's security groups and their status. + type: complex + contains: + status: + description: Status of the security group. + type: str + sample: active + vpc_security_group_id: + description: Security group of the cluster. + type: str + sample: sg-12345678 +''' + + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags + + +@AWSRetry.jittered_backoff(retries=10) +def _describe_db_clusters(client, **params): + try: + paginator = client.get_paginator('describe_db_clusters') + return paginator.paginate(**params).build_full_result()['DBClusters'] + except is_boto3_error_code('DBClusterNotFoundFault'): + return [] + + +def cluster_info(client, module): + cluster_id = module.params.get('db_cluster_identifier') + filters = module.params.get('filters') + + params = dict() + if cluster_id: + params['DBClusterIdentifier'] = cluster_id + if filters: + params['Filters'] = ansible_dict_to_boto3_filter_list(filters) + + try: + result = _describe_db_clusters(client, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't get RDS cluster information.") + + for cluster in result: + cluster['Tags'] = get_tags(client, module, cluster['DBClusterArn']) + + return dict(changed=False, clusters=[camel_dict_to_snake_dict(cluster, ignore_list=['Tags']) for cluster in result]) + + +def main(): + argument_spec = dict( + db_cluster_identifier=dict(aliases=['cluster_id', 'id', 'cluster_name']), + filters=dict(type='dict'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + client = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS.') + + module.exit_json(**cluster_info(client, module)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster_snapshot.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_snapshot.py new file mode 100644 index 000000000..ff712c438 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_snapshot.py @@ -0,0 +1,374 @@ +#!/usr/bin/python +# Copyright (c) 2014 Ansible Project +# Copyright (c) 2021 Alina Buzachis (@alinabuzachis) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: rds_cluster_snapshot +version_added: 5.0.0 +short_description: Manage Amazon RDS snapshots of DB clusters +description: + - Create, modify and delete RDS snapshots of DB clusters. + - This module was originally added to C(community.aws) in release 4.0.0. +options: + state: + description: + - Specify the desired state of the snapshot. + default: present + choices: [ 'present', 'absent'] + type: str + db_cluster_snapshot_identifier: + description: + - The identifier of the DB cluster snapshot. + required: true + aliases: + - snapshot_id + - id + - snapshot_name + type: str + db_cluster_identifier: + description: + - The identifier of the DB cluster to create a snapshot for. + - Required when I(state=present). + aliases: + - cluster_id + - cluster_name + type: str + source_db_cluster_snapshot_identifier: + description: + - The identifier of the DB cluster snapshot to copy. + - If the source snapshot is in the same AWS region as the copy, specify the snapshot's identifier. + - If the source snapshot is in a different AWS region as the copy, specify the snapshot's ARN. + aliases: + - source_id + - source_snapshot_id + type: str + source_region: + description: + - The region that contains the snapshot to be copied. + type: str + copy_tags: + description: + - Whether to copy all tags from I(source_db_cluster_snapshot_identifier) to I(db_cluster_snapshot_identifier). + type: bool + default: False + wait: + description: + - Whether or not to wait for snapshot creation or deletion. + type: bool + default: false + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 300 + type: int +notes: + - Retrieve the information about a specific DB cluster or list the DB cluster snapshots for a specific DB cluster + can de done using M(community.aws.rds_snapshot_info). +author: + - Alina Buzachis (@alinabuzachis) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +''' + +EXAMPLES = r''' +- name: Create a DB cluster snapshot + amazon.aws.rds_cluster_snapshot: + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: new-cluster-snapshot + +- name: Delete a DB cluster snapshot + amazon.aws.rds_cluster_snapshot: + db_cluster_snapshot_identifier: new-cluster-snapshot + state: absent + +- name: Copy snapshot from a different region and copy its tags + amazon.aws.rds_cluster_snapshot: + id: new-database-snapshot-copy + region: us-east-1 + source_id: "{{ snapshot.db_snapshot_arn }}" + source_region: us-east-2 + copy_tags: true +''' + +RETURN = r''' +availability_zone: + description: Availability zone of the database from which the snapshot was created. + returned: always + type: str + sample: us-west-2a +db_cluster_snapshot_identifier: + description: Specifies the identifier for the DB cluster snapshot. + returned: always + type: str + sample: ansible-test-16638696-test-snapshot +db_cluster_identifier: + description: Specifies the DB cluster identifier of the DB cluster that this DB cluster snapshot was created from. + returned: always + type: str + sample: ansible-test-16638696 +snapshot_create_time: + description: Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC). + returned: always + type: str + sample: '2019-06-15T10:46:23.776000+00:00' +engine: + description: Specifies the name of the database engine for this DB cluster snapshot. + returned: always + type: str + sample: "aurora" +engine_mode: + description: Provides the engine mode of the database engine for this DB cluster snapshot. + returned: always + type: str + sample: "5.6.mysql_aurora.1.22.5" +allocated_storage: + description: Specifies the allocated storage size in gibibytes (GiB). + returned: always + type: int + sample: 20 +status: + description: Specifies the status of this DB cluster snapshot. + returned: always + type: str + sample: available +port: + description: Port on which the database is listening. + returned: always + type: int + sample: 3306 +vpc_id: + description: ID of the VPC in which the DB lives. + returned: always + type: str + sample: vpc-09ff232e222710ae0 +cluster_create_time: + description: Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC). + returned: always + type: str + sample: '2019-06-15T10:15:56.221000+00:00' +master_username: + description: Provides the master username for this DB cluster snapshot. + returned: always + type: str + sample: test +engine_version: + description: Version of the cluster from which the snapshot was created. + returned: always + type: str + sample: "5.6.mysql_aurora.1.22.5" +license_model: + description: Provides the license model information for this DB cluster snapshot. + returned: always + type: str + sample: general-public-license +snapshot_type: + description: How the snapshot was created (always manual for this module!). + returned: always + type: str + sample: manual +percent_progress: + description: Specifies the percentage of the estimated data that has been transferred. + returned: always + type: int + sample: 100 +storage_encrypted: + description: Specifies whether the DB cluster snapshot is encrypted. + returned: always + type: bool + sample: false +kms_key_id: + description: The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. + returned: always + type: str +db_cluster_snapshot_arn: + description: Amazon Resource Name for the snapshot. + returned: always + type: str + sample: arn:aws:rds:us-west-2:123456789012:snapshot:ansible-test-16638696-test-snapshot +source_db_cluster_snapshot_arn: + description: If the DB cluster snapshot was copied from a source DB cluster snapshot, the ARN for the source DB cluster snapshot, otherwise, null. + returned: always + type: str + sample: null +iam_database_authentication_enabled: + description: Whether IAM database authentication is enabled. + returned: always + type: bool + sample: false +tag_list: + description: A list of tags. + returned: always + type: list + sample: [] +tags: + description: Tags applied to the snapshot. + returned: always + type: complex + contains: {} +''' + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method +from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute +from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params + + +def get_snapshot(snapshot_id): + try: + snapshot = client.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=snapshot_id, aws_retry=True)["DBClusterSnapshots"][0] + snapshot["Tags"] = get_tags(client, module, snapshot["DBClusterSnapshotArn"]) + except is_boto3_error_code("DBClusterSnapshotNotFound"): + return {} + except is_boto3_error_code("DBClusterSnapshotNotFoundFault"): # pylint: disable=duplicate-except + return {} + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id)) + return snapshot + + +def get_parameters(parameters, method_name): + if method_name == 'copy_db_cluster_snapshot': + parameters['TargetDBClusterSnapshotIdentifier'] = module.params['db_cluster_snapshot_identifier'] + + required_options = get_boto3_client_method_parameters(client, method_name, required=True) + if any(parameters.get(k) is None for k in required_options): + module.fail_json(msg='To {0} requires the parameters: {1}'.format( + get_rds_method_attribute(method_name, module).operation_description, required_options)) + options = get_boto3_client_method_parameters(client, method_name) + parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) + + return parameters + + +def ensure_snapshot_absent(): + snapshot_name = module.params.get("db_cluster_snapshot_identifier") + params = {"DBClusterSnapshotIdentifier": snapshot_name} + changed = False + + snapshot = get_snapshot(snapshot_name) + if not snapshot: + module.exit_json(changed=changed) + elif snapshot and snapshot["Status"] != "deleting": + snapshot, changed = call_method(client, module, "delete_db_cluster_snapshot", params) + + module.exit_json(changed=changed) + + +def copy_snapshot(params): + changed = False + snapshot_id = module.params.get('db_cluster_snapshot_identifier') + snapshot = get_snapshot(snapshot_id) + + if not snapshot: + method_params = get_parameters(params, 'copy_db_cluster_snapshot') + if method_params.get('Tags'): + method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) + result, changed = call_method(client, module, 'copy_db_cluster_snapshot', method_params) + + return changed + + +def ensure_snapshot_present(params): + source_id = module.params.get('source_db_cluster_snapshot_identifier') + snapshot_name = module.params.get("db_cluster_snapshot_identifier") + changed = False + + snapshot = get_snapshot(snapshot_name) + + # Copy snapshot + if source_id: + changed |= copy_snapshot(params) + + # Create snapshot + elif not snapshot: + changed |= create_snapshot(params) + + # Snapshot exists and we're not creating a copy - modify exising snapshot + else: + changed |= modify_snapshot() + + snapshot = get_snapshot(snapshot_name) + module.exit_json(changed=changed, **camel_dict_to_snake_dict(snapshot, ignore_list=['Tags'])) + + +def create_snapshot(params): + method_params = get_parameters(params, 'create_db_cluster_snapshot') + if method_params.get('Tags'): + method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) + snapshot, changed = call_method(client, module, 'create_db_cluster_snapshot', method_params) + + return changed + + +def modify_snapshot(): + # TODO - add other modifications aside from purely tags + changed = False + snapshot_id = module.params.get('db_cluster_snapshot_identifier') + snapshot = get_snapshot(snapshot_id) + + if module.params.get('tags'): + changed |= ensure_tags(client, module, snapshot['DBClusterSnapshotArn'], snapshot['Tags'], module.params['tags'], module.params['purge_tags']) + + return changed + + +def main(): + global client + global module + + argument_spec = dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + db_cluster_snapshot_identifier=dict(type='str', aliases=['id', 'snapshot_id', 'snapshot_name'], required=True), + db_cluster_identifier=dict(type='str', aliases=['cluster_id', 'cluster_name']), + source_db_cluster_snapshot_identifier=dict(type='str', aliases=['source_id', 'source_snapshot_id']), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + copy_tags=dict(type='bool', default=False), + source_region=dict(type='str'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + retry_decorator = AWSRetry.jittered_backoff(retries=10) + try: + client = module.client('rds', retry_decorator=retry_decorator) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS.") + + state = module.params.get("state") + + if state == "absent": + ensure_snapshot_absent() + elif state == "present": + params = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in argument_spec)) + ensure_snapshot_present(params) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_instance.py b/ansible_collections/amazon/aws/plugins/modules/rds_instance.py new file mode 100644 index 000000000..f1eccea3b --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/rds_instance.py @@ -0,0 +1,1481 @@ +#!/usr/bin/python +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: rds_instance +version_added: 5.0.0 +short_description: Manage RDS instances +description: + - Create, modify, and delete RDS instances. + - This module was originally added to C(community.aws) in release 1.0.0. +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +author: + - Sloane Hertel (@s-hertel) + +options: + # General module options + state: + description: + - Desired state of the RDS Instance. + - I(state=rebooted) is not idempotent and will leave the DB instance in a running state + and start it prior to rebooting if it was stopped. I(present) will leave the DB instance in the current running/stopped state, + (running if creating the DB instance). + - I(state=running) and I(state=started) are synonyms, as are I(state=rebooted) and I(state=restarted). + choices: ['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'] + default: 'present' + type: str + creation_source: + description: Which source to use if restoring from a template (an existing instance, S3 bucket, or snapshot). + choices: ['snapshot', 's3', 'instance'] + type: str + force_update_password: + description: + - Set to C(True) to update your instance password with I(master_user_password). Since comparing passwords to determine + if it needs to be updated is not possible this is set to False by default to allow idempotence. + type: bool + default: False + purge_cloudwatch_logs_exports: + description: Set to False to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance. + type: bool + default: True + read_replica: + description: + - Set to C(False) to promote a read replica instance or true to create one. When creating a read replica C(creation_source) should + be set to 'instance' or not provided. C(source_db_instance_identifier) must be provided with this option. + type: bool + wait: + description: + - Whether to wait for the instance to be available, stopped, or deleted. At a later time a I(wait_timeout) option may be added. + Following each API call to create/modify/delete the instance a waiter is used with a 60 second delay 30 times until the instance reaches + the expected state (available/stopped/deleted). The total task time may also be influenced by AWSRetry which helps stabilize if the + instance is in an invalid state to operate on to begin with (such as if you try to stop it when it is in the process of rebooting). + If setting this to False task retries and delays may make your playbook execution better handle timeouts for major modifications. + type: bool + default: True + + # Options that have a corresponding boto3 parameter + allocated_storage: + description: + - The amount of storage (in gibibytes) to allocate for the DB instance. + type: int + allow_major_version_upgrade: + description: + - Whether to allow major version upgrades. + type: bool + apply_immediately: + description: + - A value that specifies whether modifying an instance with I(new_db_instance_identifier) and I(master_user_password) + should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If false, changes + are applied during the next maintenance window. + type: bool + default: False + auto_minor_version_upgrade: + description: + - Whether minor version upgrades are applied automatically to the DB instance during the maintenance window. + type: bool + availability_zone: + description: + - A list of EC2 Availability Zones that the DB instance can be created in. + May be used when creating an instance or when restoring from S3 or a snapshot. Mutually exclusive with I(multi_az). + aliases: + - az + - zone + type: str + backup_retention_period: + description: + - The number of days for which automated backups are retained. + - When set to C(0), automated backups will be disabled. (Not applicable if the DB instance is a source to read replicas) + - May be used when creating a new instance, when restoring from S3, or when modifying an instance. + type: int + ca_certificate_identifier: + description: + - The identifier of the CA certificate for the DB instance. + type: str + character_set_name: + description: + - The character set to associate with the DB instance. + type: str + copy_tags_to_snapshot: + description: + - Whether or not to copy all tags from the DB instance to snapshots of the instance. When initially creating + a DB instance the RDS API defaults this to false if unspecified. + type: bool + db_cluster_identifier: + description: + - The DB cluster (lowercase) identifier to add the aurora DB instance to. The identifier must contain from 1 to + 63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or + contain consecutive hyphens. + aliases: + - cluster_id + type: str + db_instance_class: + description: + - The compute and memory capacity of the DB instance, for example db.t2.micro. + aliases: + - class + - instance_type + type: str + db_instance_identifier: + description: + - The DB instance (lowercase) identifier. The identifier must contain from 1 to 63 letters, numbers, or + hyphens and the first character must be a letter and may not end in a hyphen or contain consecutive hyphens. + aliases: + - instance_id + - id + required: True + type: str + db_name: + description: + - The name for your database. If a name is not provided Amazon RDS will not create a database. + type: str + db_parameter_group_name: + description: + - The name of the DB parameter group to associate with this DB instance. When creating the DB instance if this + argument is omitted the default DBParameterGroup for the specified engine is used. + type: str + db_security_groups: + description: + - (EC2-Classic platform) A list of DB security groups to associate with this DB instance. + type: list + elements: str + db_snapshot_identifier: + description: + - The identifier or ARN of the DB snapshot to restore from when using I(creation_source=snapshot). + type: str + aliases: + - snapshot_identifier + - snapshot_id + db_subnet_group_name: + description: + - The DB subnet group name to use for the DB instance. + aliases: + - subnet_group + type: str + deletion_protection: + description: + - A value that indicates whether the DB instance has deletion protection enabled. + The database can't be deleted when deletion protection is enabled. + By default, deletion protection is disabled. + type: bool + version_added: 3.3.0 + version_added_collection: community.aws + domain: + description: + - The Active Directory Domain to restore the instance in. + type: str + domain_iam_role_name: + description: + - The name of the IAM role to be used when making API calls to the Directory Service. + type: str + enable_cloudwatch_logs_exports: + description: + - A list of log types that need to be enabled for exporting to CloudWatch Logs. + aliases: + - cloudwatch_log_exports + type: list + elements: str + enable_iam_database_authentication: + description: + - Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. + If this option is omitted when creating the instance, Amazon RDS sets this to False. + type: bool + enable_performance_insights: + description: + - Whether to enable Performance Insights for the DB instance. + type: bool + engine: + description: + - The name of the database engine to be used for this DB instance. This is required to create an instance. + choices: ['aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-ee-cdb', + 'oracle-se2', 'oracle-se2-cdb', 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web'] + type: str + engine_version: + description: + - The version number of the database engine to use. For Aurora MySQL that could be 5.6.10a , 5.7.12. + Aurora PostgreSQL example, 9.6.3 + type: str + final_db_snapshot_identifier: + description: + - The DB instance snapshot identifier of the new DB instance snapshot created when I(skip_final_snapshot) is false. + aliases: + - final_snapshot_identifier + type: str + force_failover: + description: + - Set to true to conduct the reboot through a MultiAZ failover. + type: bool + iam_roles: + description: + - List of Amazon Web Services Identity and Access Management (IAM) roles to associate with DB instance. + type: list + elements: dict + suboptions: + feature_name: + description: + - The name of the feature associated with the IAM role. + type: str + required: true + role_arn: + description: + - The ARN of the IAM role to associate with the DB instance. + type: str + required: true + version_added: 3.3.0 + version_added_collection: community.aws + iops: + description: + - The Provisioned IOPS (I/O operations per second) value. Is only set when using I(storage_type) is set to io1. + type: int + kms_key_id: + description: + - The ARN of the AWS KMS key identifier for an encrypted DB instance. If you are creating a DB instance with the + same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key + alias instead of the ARN for the KM encryption key. + - If I(storage_encrypted) is true and and this option is not provided, the default encryption key is used. + type: str + license_model: + description: + - The license model for the DB instance. + - Several options are license-included, bring-your-own-license, and general-public-license. + - This option can also be omitted to default to an accepted value. + type: str + master_user_password: + description: + - An 8-41 character password for the master database user. The password can contain any printable ASCII character + except "/", """, or "@". To modify the password use I(force_update_password). Use I(apply immediately) to change + the password immediately, otherwise it is updated during the next maintenance window. + aliases: + - password + type: str + master_username: + description: + - The name of the master user for the DB instance. Must be 1-16 letters or numbers and begin with a letter. + aliases: + - username + type: str + max_allocated_storage: + description: + - The upper limit to which Amazon RDS can automatically scale the storage of the DB instance. + type: int + monitoring_interval: + description: + - The interval, in seconds, when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting + metrics, specify 0. Amazon RDS defaults this to 0 if omitted when initially creating a DB instance. + type: int + monitoring_role_arn: + description: + - The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. + type: str + multi_az: + description: + - Specifies if the DB instance is a Multi-AZ deployment. Mutually exclusive with I(availability_zone). + type: bool + new_db_instance_identifier: + description: + - The new DB instance (lowercase) identifier for the DB instance when renaming a DB instance. The identifier must contain + from 1 to 63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or + contain consecutive hyphens. Use I(apply_immediately) to rename immediately, otherwise it is updated during the + next maintenance window. + aliases: + - new_instance_id + - new_id + type: str + option_group_name: + description: + - The option group to associate with the DB instance. + type: str + performance_insights_kms_key_id: + description: + - The AWS KMS key identifier (ARN, name, or alias) for encryption of Performance Insights data. + type: str + performance_insights_retention_period: + description: + - The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731. + type: int + port: + description: + - The port number on which the instances accept connections. + type: int + preferred_backup_window: + description: + - The daily time range (in UTC) of at least 30 minutes, during which automated backups are created if automated backups are + enabled using I(backup_retention_period). The option must be in the format of "hh24:mi-hh24:mi" and not conflict with + I(preferred_maintenance_window). + aliases: + - backup_window + type: str + preferred_maintenance_window: + description: + - The weekly time range (in UTC) of at least 30 minutes, during which system maintenance can occur. The option must + be in the format "ddd:hh24:mi-ddd:hh24:mi" where ddd is one of Mon, Tue, Wed, Thu, Fri, Sat, Sun. + aliases: + - maintenance_window + type: str + processor_features: + description: + - A dictionary of Name, Value pairs to indicate the number of CPU cores and the number of threads per core for the + DB instance class of the DB instance. Names are threadsPerCore and coreCount. + Set this option to an empty dictionary to use the default processor features. + suboptions: + threadsPerCore: + description: The number of threads per core + coreCount: + description: The number of CPU cores + type: dict + promotion_tier: + description: + - An integer that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of + the existing primary instance. + type: int + publicly_accessible: + description: + - Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with + a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal + instance with a DNS name that resolves to a private IP address. + type: bool + purge_iam_roles: + description: + - Set to C(True) to remove any IAM roles that aren't specified in the task and are associated with the instance. + type: bool + default: False + version_added: 3.3.0 + version_added_collection: community.aws + restore_time: + description: + - If using I(creation_source=instance) this indicates the UTC date and time to restore from the source instance. + For example, "2009-09-07T23:45:00Z". + - May alternatively set I(use_latest_restore_time=True). + - Only one of I(use_latest_restorable_time) and I(restore_time) may be provided. + type: str + s3_bucket_name: + description: + - The name of the Amazon S3 bucket that contains the data used to create the Amazon DB instance. + type: str + s3_ingestion_role_arn: + description: + - The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that authorizes Amazon RDS to access + the Amazon S3 bucket on your behalf. + type: str + s3_prefix: + description: + - The prefix for all of the file names that contain the data used to create the Amazon DB instance. If you do not + specify a SourceS3Prefix value, then the Amazon DB instance is created by using all of the files in the Amazon S3 bucket. + type: str + skip_final_snapshot: + description: + - Whether a final DB instance snapshot is created before the DB instance is deleted. If this is false I(final_db_snapshot_identifier) + must be provided. + type: bool + default: false + source_db_instance_identifier: + description: + - The identifier or ARN of the source DB instance from which to restore when creating a read replica or spinning up a point-in-time + DB instance using I(creation_source=instance). If the source DB is not in the same region this should be an ARN. + type: str + source_engine: + description: + - The identifier for the database engine that was backed up to create the files stored in the Amazon S3 bucket. + choices: + - mysql + type: str + source_engine_version: + description: + - The version of the database that the backup files were created from. + type: str + source_region: + description: + - The region of the DB instance from which the replica is created. + type: str + storage_encrypted: + description: + - Whether the DB instance is encrypted. + type: bool + storage_type: + description: + - The storage type to be associated with the DB instance. I(storage_type) does not apply to Aurora DB instances. + choices: + - standard + - gp2 + - gp3 + - io1 + type: str + storage_throughput: + description: + - The storage throughput when the I(storage_type) is C(gp3). + - When the allocated storage is below 400 GB, the storage throughput will always be 125 mb/s. + - When the allocated storage is large than or equal 400 GB, the througput starts at 500 mb/s. + - Requires boto3 >= 1.26.0. + type: int + version_added: 5.2.0 + tde_credential_arn: + description: + - The ARN from the key store with which to associate the instance for Transparent Data Encryption. This is + supported by Oracle or SQL Server DB instances and may be used in conjunction with C(storage_encrypted) + though it might slightly affect the performance of your database. + aliases: + - transparent_data_encryption_arn + type: str + tde_credential_password: + description: + - The password for the given ARN from the key store in order to access the device. + aliases: + - transparent_data_encryption_password + type: str + timezone: + description: + - The time zone of the DB instance. + type: str + use_latest_restorable_time: + description: + - Whether to restore the DB instance to the latest restorable backup time. + - Only one of I(use_latest_restorable_time) and I(restore_time) may be provided. + type: bool + aliases: + - restore_from_latest + vpc_security_group_ids: + description: + - A list of EC2 VPC security groups to associate with the DB instance. + type: list + elements: str + purge_security_groups: + description: + - Set to False to retain any enabled security groups that aren't specified in the task and are associated with the instance. + - Can be applied to I(vpc_security_group_ids) and I(db_security_groups) + type: bool + default: True + version_added: 1.5.0 + version_added_collection: community.aws +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +- name: create minimal aurora instance in default VPC and default subnet group + amazon.aws.rds_instance: + engine: aurora + db_instance_identifier: ansible-test-aurora-db-instance + instance_type: db.t2.small + password: "{{ password }}" + username: "{{ username }}" + cluster_id: ansible-test-cluster # This cluster must exist - see rds_cluster to manage it + +- name: Create a DB instance using the default AWS KMS encryption key + amazon.aws.rds_instance: + id: test-encrypted-db + state: present + engine: mariadb + storage_encrypted: True + db_instance_class: db.t2.medium + username: "{{ username }}" + password: "{{ password }}" + allocated_storage: "{{ allocated_storage }}" + +- name: remove the DB instance without a final snapshot + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + +- name: remove the DB instance with a final snapshot + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + final_snapshot_identifier: "{{ snapshot_id }}" + +- name: Add a new security group without purge + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + vpc_security_group_ids: + - sg-0be17ba10c9286b0b + purge_security_groups: false + register: result + +# Add IAM role to db instance +- name: Create IAM policy + community.aws.iam_managed_policy: + policy_name: "my-policy" + policy: "{{ lookup('file','files/policy.json') }}" + state: present + register: iam_policy + +- name: Create IAM role + community.aws.iam_role: + assume_role_policy_document: "{{ lookup('file','files/assume_policy.json') }}" + name: "my-role" + state: present + managed_policy: "{{ iam_policy.policy.arn }}" + register: iam_role + +- name: Create DB instance with added IAM role + amazon.aws.rds_instance: + id: "my-instance-id" + state: present + engine: postgres + engine_version: 14.2 + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: db.m6g.large + allocated_storage: "{{ allocated_storage }}" + iam_roles: + - role_arn: "{{ iam_role.arn }}" + feature_name: 's3Export' + +- name: Remove IAM role from DB instance + amazon.aws.rds_instance: + id: "my-instance-id" + state: present + purge_iam_roles: true + +# Restore DB instance from snapshot +- name: Create a snapshot and wait until completion + amazon.aws.rds_instance_snapshot: + instance_id: 'my-instance-id' + snapshot_id: 'my-new-snapshot' + state: present + wait: true + register: snapshot + +- name: Restore DB from snapshot + amazon.aws.rds_instance: + id: 'my-restored-db' + creation_source: snapshot + snapshot_identifier: 'my-new-snapshot' + engine: mariadb + state: present + register: restored_db +''' + +RETURN = r''' +allocated_storage: + description: The allocated storage size in gigabytes. This is always 1 for aurora database engines. + returned: always + type: int + sample: 20 +associated_roles: + description: The list of currently associated roles. + returned: always + type: list + sample: [] +auto_minor_version_upgrade: + description: Whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. + returned: always + type: bool + sample: true +availability_zone: + description: The availability zone for the DB instance. + returned: always + type: str + sample: us-east-1f +backup_retention_period: + description: The number of days for which automated backups are retained. + returned: always + type: int + sample: 1 +ca_certificate_identifier: + description: The identifier of the CA certificate for the DB instance. + returned: always + type: str + sample: rds-ca-2015 +copy_tags_to_snapshot: + description: Whether tags are copied from the DB instance to snapshots of the DB instance. + returned: always + type: bool + sample: false +db_instance_arn: + description: The Amazon Resource Name (ARN) for the DB instance. + returned: always + type: str + sample: arn:aws:rds:us-east-1:123456789012:db:ansible-test +db_instance_class: + description: The name of the compute and memory capacity class of the DB instance. + returned: always + type: str + sample: db.m4.large +db_instance_identifier: + description: The identifier of the DB instance + returned: always + type: str + sample: ansible-test +db_instance_port: + description: The port that the DB instance listens on. + returned: always + type: int + sample: 0 +db_instance_status: + description: The current state of this database. + returned: always + type: str + sample: stopped +db_parameter_groups: + description: The list of DB parameter groups applied to this DB instance. + returned: always + type: complex + contains: + db_parameter_group_name: + description: The name of the DP parameter group. + returned: always + type: str + sample: default.mariadb10.0 + parameter_apply_status: + description: The status of parameter updates. + returned: always + type: str + sample: in-sync +db_security_groups: + description: A list of DB security groups associated with this DB instance. + returned: always + type: list + sample: [] +db_subnet_group: + description: The subnet group associated with the DB instance. + returned: always + type: complex + contains: + db_subnet_group_description: + description: The description of the DB subnet group. + returned: always + type: str + sample: default + db_subnet_group_name: + description: The name of the DB subnet group. + returned: always + type: str + sample: default + subnet_group_status: + description: The status of the DB subnet group. + returned: always + type: str + sample: Complete + subnets: + description: A list of Subnet elements. + returned: always + type: complex + contains: + subnet_availability_zone: + description: The availability zone of the subnet. + returned: always + type: complex + contains: + name: + description: The name of the Availability Zone. + returned: always + type: str + sample: us-east-1c + subnet_identifier: + description: The ID of the subnet. + returned: always + type: str + sample: subnet-12345678 + subnet_status: + description: The status of the subnet. + returned: always + type: str + sample: Active + vpc_id: + description: The VpcId of the DB subnet group. + returned: always + type: str + sample: vpc-12345678 +dbi_resource_id: + description: The AWS Region-unique, immutable identifier for the DB instance. + returned: always + type: str + sample: db-UHV3QRNWX4KB6GALCIGRML6QFA +deletion_protection: + description: C(True) if the DB instance has deletion protection enabled, C(False) if not. + returned: always + type: bool + sample: False + version_added: 3.3.0 + version_added_collection: community.aws +domain_memberships: + description: The Active Directory Domain membership records associated with the DB instance. + returned: always + type: list + sample: [] +endpoint: + description: The connection endpoint. + returned: always + type: complex + contains: + address: + description: The DNS address of the DB instance. + returned: always + type: str + sample: ansible-test.cvlrtwiennww.us-east-1.rds.amazonaws.com + hosted_zone_id: + description: The ID that Amazon Route 53 assigns when you create a hosted zone. + returned: always + type: str + sample: ZTR2ITUGPA61AM + port: + description: The port that the database engine is listening on. + returned: always + type: int + sample: 3306 +engine: + description: The database engine version. + returned: always + type: str + sample: mariadb +engine_version: + description: The database engine version. + returned: always + type: str + sample: 10.0.35 +iam_database_authentication_enabled: + description: Whether mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled. + returned: always + type: bool + sample: false +instance_create_time: + description: The date and time the DB instance was created. + returned: always + type: str + sample: '2018-07-04T16:48:35.332000+00:00' +kms_key_id: + description: The AWS KMS key identifier for the encrypted DB instance when storage_encrypted is true. + returned: When storage_encrypted is true + type: str + sample: arn:aws:kms:us-east-1:123456789012:key/70c45553-ad2e-4a85-9f14-cfeb47555c33 +latest_restorable_time: + description: The latest time to which a database can be restored with point-in-time restore. + returned: always + type: str + sample: '2018-07-04T16:50:50.642000+00:00' +license_model: + description: The License model information for this DB instance. + returned: always + type: str + sample: general-public-license +master_username: + description: The master username for the DB instance. + returned: always + type: str + sample: test +max_allocated_storage: + description: The upper limit to which Amazon RDS can automatically scale the storage of the DB instance. + returned: When max allocated storage is present. + type: int + sample: 100 +monitoring_interval: + description: + - The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. + 0 means collecting Enhanced Monitoring metrics is disabled. + returned: always + type: int + sample: 0 +multi_az: + description: Whether the DB instance is a Multi-AZ deployment. + returned: always + type: bool + sample: false +option_group_memberships: + description: The list of option group memberships for this DB instance. + returned: always + type: complex + contains: + option_group_name: + description: The name of the option group that the instance belongs to. + returned: always + type: str + sample: default:mariadb-10-0 + status: + description: The status of the DB instance's option group membership. + returned: always + type: str + sample: in-sync +pending_modified_values: + description: The changes to the DB instance that are pending. + returned: always + type: complex + contains: {} +performance_insights_enabled: + description: True if Performance Insights is enabled for the DB instance, and otherwise false. + returned: always + type: bool + sample: false +preferred_backup_window: + description: The daily time range during which automated backups are created if automated backups are enabled. + returned: always + type: str + sample: 07:01-07:31 +preferred_maintenance_window: + description: The weekly time range (in UTC) during which system maintenance can occur. + returned: always + type: str + sample: sun:09:31-sun:10:01 +publicly_accessible: + description: + - True for an Internet-facing instance with a publicly resolvable DNS name, False to indicate an + internal instance with a DNS name that resolves to a private IP address. + returned: always + type: bool + sample: true +read_replica_db_instance_identifiers: + description: Identifiers of the Read Replicas associated with this DB instance. + returned: always + type: list + sample: [] +storage_encrypted: + description: Whether the DB instance is encrypted. + returned: always + type: bool + sample: false +storage_type: + description: The storage type to be associated with the DB instance. + returned: always + type: str + sample: standard +tags: + description: A dictionary of tags associated with the DB instance. + returned: always + type: complex + contains: {} +vpc_security_groups: + description: A list of VPC security group elements that the DB instance belongs to. + returned: always + type: complex + contains: + status: + description: The status of the VPC security group. + returned: always + type: str + sample: active + vpc_security_group_id: + description: The name of the VPC security group. + returned: always + type: str + sample: sg-12345678 +''' + +from time import sleep + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + + +from ansible.module_utils._text import to_text +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.six import string_types + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params +from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method +from ansible_collections.amazon.aws.plugins.module_utils.rds import compare_iam_roles +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_final_identifier +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import update_iam_roles + + +valid_engines = ['aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-ee-cdb', + 'oracle-se2', 'oracle-se2-cdb', 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web'] + +valid_engines_iam_roles = ['aurora-postgresql', 'oracle-ee', 'oracle-ee-cdb', 'oracle-se2', 'oracle-se2-cdb', + 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web'] + + +def get_rds_method_attribute_name(instance, state, creation_source, read_replica): + method_name = None + if state == 'absent' or state == 'terminated': + if instance and instance['DBInstanceStatus'] not in ['deleting', 'deleted']: + method_name = 'delete_db_instance' + else: + if instance: + method_name = 'modify_db_instance' + elif read_replica is True: + method_name = 'create_db_instance_read_replica' + elif creation_source == 'snapshot': + method_name = 'restore_db_instance_from_db_snapshot' + elif creation_source == 's3': + method_name = 'restore_db_instance_from_s3' + elif creation_source == 'instance': + method_name = 'restore_db_instance_to_point_in_time' + else: + method_name = 'create_db_instance' + return method_name + + +def get_instance(client, module, db_instance_id): + try: + for i in range(3): + try: + instance = client.describe_db_instances(DBInstanceIdentifier=db_instance_id)['DBInstances'][0] + instance['Tags'] = get_tags(client, module, instance['DBInstanceArn']) + if instance.get('ProcessorFeatures'): + instance['ProcessorFeatures'] = dict((feature['Name'], feature['Value']) for feature in instance['ProcessorFeatures']) + if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): + instance['PendingModifiedValues']['ProcessorFeatures'] = dict( + (feature['Name'], feature['Value']) + for feature in instance['PendingModifiedValues']['ProcessorFeatures'] + ) + break + except is_boto3_error_code('DBInstanceNotFound'): + sleep(3) + else: + instance = {} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to describe DB instances') + return instance + + +def get_final_snapshot(client, module, snapshot_identifier): + try: + snapshots = AWSRetry.jittered_backoff()(client.describe_db_snapshots)(DBSnapshotIdentifier=snapshot_identifier) + if len(snapshots.get('DBSnapshots', [])) == 1: + return snapshots['DBSnapshots'][0] + return {} + except is_boto3_error_code('DBSnapshotNotFound') as e: # May not be using wait: True + return {} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to retrieve information about the final snapshot') + + +def get_parameters(client, module, parameters, method_name): + if method_name == 'restore_db_instance_to_point_in_time': + parameters['TargetDBInstanceIdentifier'] = module.params['db_instance_identifier'] + + required_options = get_boto3_client_method_parameters(client, method_name, required=True) + if any(parameters.get(k) is None for k in required_options): + module.fail_json(msg='To {0} requires the parameters: {1}'.format( + get_rds_method_attribute(method_name, module).operation_description, required_options)) + options = get_boto3_client_method_parameters(client, method_name) + parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) + + if parameters.get('ProcessorFeatures') is not None: + parameters['ProcessorFeatures'] = [{'Name': k, 'Value': to_text(v)} for k, v in parameters['ProcessorFeatures'].items()] + + # If this parameter is an empty list it can only be used with modify_db_instance (as the parameter UseDefaultProcessorFeatures) + if parameters.get('ProcessorFeatures') == [] and not method_name == 'modify_db_instance': + parameters.pop('ProcessorFeatures') + + if method_name in ['create_db_instance', 'create_db_instance_read_replica', 'restore_db_instance_from_db_snapshot']: + if parameters.get('Tags'): + parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags']) + + if method_name == 'modify_db_instance': + parameters = get_options_with_changing_values(client, module, parameters) + + return parameters + + +def get_options_with_changing_values(client, module, parameters): + instance_id = module.params['db_instance_identifier'] + purge_cloudwatch_logs = module.params['purge_cloudwatch_logs_exports'] + force_update_password = module.params['force_update_password'] + port = module.params['port'] + apply_immediately = parameters.pop('ApplyImmediately', None) + cloudwatch_logs_enabled = module.params['enable_cloudwatch_logs_exports'] + purge_security_groups = module.params['purge_security_groups'] + + if port: + parameters['DBPortNumber'] = port + if not force_update_password: + parameters.pop('MasterUserPassword', None) + if cloudwatch_logs_enabled: + parameters['CloudwatchLogsExportConfiguration'] = cloudwatch_logs_enabled + if not module.params['storage_type']: + parameters.pop('Iops', None) + + instance = get_instance(client, module, instance_id) + updated_parameters = get_changing_options_with_inconsistent_keys(parameters, instance, purge_cloudwatch_logs, purge_security_groups) + updated_parameters.update(get_changing_options_with_consistent_keys(parameters, instance)) + parameters = updated_parameters + + if instance.get('StorageType') == 'io1': + # Bundle Iops and AllocatedStorage while updating io1 RDS Instance + current_iops = instance.get('PendingModifiedValues', {}).get('Iops', instance['Iops']) + current_allocated_storage = instance.get('PendingModifiedValues', {}).get('AllocatedStorage', instance['AllocatedStorage']) + new_iops = module.params.get('iops') + new_allocated_storage = module.params.get('allocated_storage') + + if current_iops != new_iops or current_allocated_storage != new_allocated_storage: + parameters['AllocatedStorage'] = new_allocated_storage + parameters['Iops'] = new_iops + + if instance.get('StorageType') == 'gp3': + if module.boto3_at_least('1.26.0'): + GP3_THROUGHPUT = True + current_storage_throughput = instance.get('PendingModifiedValues', {}).get('StorageThroughput', instance['StorageThroughput']) + new_storage_throughput = module.params.get('storage_throughput') or current_storage_throughput + if new_storage_throughput != current_storage_throughput: + parameters['StorageThroughput'] = new_storage_throughput + else: + GP3_THROUGHPUT = False + module.warn('gp3 volumes require boto3 >= 1.26.0. storage_throughput will be ignored.') + + current_iops = instance.get('PendingModifiedValues', {}).get('Iops', instance['Iops']) + # when you just change from gp2 to gp3, you may not add the iops parameter + new_iops = module.params.get('iops') or current_iops + + new_allocated_storage = module.params.get('allocated_storage') + current_allocated_storage = instance.get('PendingModifiedValues', {}).get('AllocatedStorage', instance['AllocatedStorage']) + + if new_allocated_storage: + if current_allocated_storage != new_allocated_storage: + parameters["AllocatedStorage"] = new_allocated_storage + + if new_allocated_storage >= 400: + if new_iops < 12000: + module.fail_json( + msg="IOPS must be at least 12000 when the allocated storage is larger than or equal to 400 GB." + ) + + if new_storage_throughput < 500 and GP3_THROUGHPUT: + module.fail_json( + msg="Storage Throughput must be at least 500 when the allocated storage is larger than or equal to 400 GB." + ) + + if current_iops != new_iops: + parameters["Iops"] = new_iops + # must be always specified when changing iops + parameters["AllocatedStorage"] = new_allocated_storage + + if parameters.get('NewDBInstanceIdentifier') and instance.get('PendingModifiedValues', {}).get('DBInstanceIdentifier'): + if parameters['NewDBInstanceIdentifier'] == instance['PendingModifiedValues']['DBInstanceIdentifier'] and not apply_immediately: + parameters.pop('NewDBInstanceIdentifier') + + if parameters: + parameters['DBInstanceIdentifier'] = instance_id + if apply_immediately is not None: + parameters['ApplyImmediately'] = apply_immediately + + return parameters + + +def get_current_attributes_with_inconsistent_keys(instance): + options = {} + if instance.get('PendingModifiedValues', {}).get('PendingCloudwatchLogsExports', {}).get('LogTypesToEnable', []): + current_enabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToEnable'] + current_disabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToDisable'] + options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': current_enabled, 'LogTypesToDisable': current_disabled} + else: + options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': instance.get('EnabledCloudwatchLogsExports', []), 'LogTypesToDisable': []} + if instance.get('PendingModifiedValues', {}).get('Port'): + options['DBPortNumber'] = instance['PendingModifiedValues']['Port'] + else: + options['DBPortNumber'] = instance['Endpoint']['Port'] + if instance.get('PendingModifiedValues', {}).get('DBSubnetGroupName'): + options['DBSubnetGroupName'] = instance['PendingModifiedValues']['DBSubnetGroupName'] + else: + options['DBSubnetGroupName'] = instance['DBSubnetGroup']['DBSubnetGroupName'] + if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): + options['ProcessorFeatures'] = instance['PendingModifiedValues']['ProcessorFeatures'] + else: + options['ProcessorFeatures'] = instance.get('ProcessorFeatures', {}) + options['OptionGroupName'] = [g['OptionGroupName'] for g in instance['OptionGroupMemberships']] + options['DBSecurityGroups'] = [sg['DBSecurityGroupName'] for sg in instance['DBSecurityGroups'] if sg['Status'] in ['adding', 'active']] + options['VpcSecurityGroupIds'] = [sg['VpcSecurityGroupId'] for sg in instance['VpcSecurityGroups'] if sg['Status'] in ['adding', 'active']] + options['DBParameterGroupName'] = [parameter_group['DBParameterGroupName'] for parameter_group in instance['DBParameterGroups']] + options['EnableIAMDatabaseAuthentication'] = instance['IAMDatabaseAuthenticationEnabled'] + # PerformanceInsightsEnabled is not returned on older RDS instances it seems + options['EnablePerformanceInsights'] = instance.get('PerformanceInsightsEnabled', False) + options['NewDBInstanceIdentifier'] = instance['DBInstanceIdentifier'] + + # Neither of these are returned via describe_db_instances, so if either is specified during a check_mode run, changed=True + options['AllowMajorVersionUpgrade'] = None + options['MasterUserPassword'] = None + + return options + + +def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_cloudwatch_logs, purge_security_groups): + changing_params = {} + current_options = get_current_attributes_with_inconsistent_keys(instance) + for option in current_options: + current_option = current_options[option] + desired_option = modify_params.pop(option, None) + if desired_option is None: + continue + + # TODO: allow other purge_option module parameters rather than just checking for things to add + if isinstance(current_option, list): + if isinstance(desired_option, list): + if ( + set(desired_option) < set(current_option) and + option in ('DBSecurityGroups', 'VpcSecurityGroupIds',) and purge_security_groups + ): + changing_params[option] = desired_option + elif set(desired_option) <= set(current_option): + continue + elif isinstance(desired_option, string_types): + if desired_option in current_option: + continue + + # Current option and desired option are the same - continue loop + if option != 'ProcessorFeatures' and current_option == desired_option: + continue + + if option == 'ProcessorFeatures' and current_option == boto3_tag_list_to_ansible_dict(desired_option, 'Name', 'Value'): + continue + + # Current option and desired option are different - add to changing_params list + if option == 'ProcessorFeatures' and desired_option == []: + changing_params['UseDefaultProcessorFeatures'] = True + elif option == 'CloudwatchLogsExportConfiguration': + current_option = set(current_option.get('LogTypesToEnable', [])) + desired_option = set(desired_option) + format_option = {'EnableLogTypes': [], 'DisableLogTypes': []} + format_option['EnableLogTypes'] = list(desired_option.difference(current_option)) + if purge_cloudwatch_logs: + format_option['DisableLogTypes'] = list(current_option.difference(desired_option)) + if format_option['EnableLogTypes'] or format_option['DisableLogTypes']: + changing_params[option] = format_option + elif option in ('DBSecurityGroups', 'VpcSecurityGroupIds',): + if purge_security_groups: + changing_params[option] = desired_option + else: + changing_params[option] = list(set(current_option) | set(desired_option)) + else: + changing_params[option] = desired_option + + return changing_params + + +def get_changing_options_with_consistent_keys(modify_params, instance): + changing_params = {} + + for param in modify_params: + current_option = instance.get('PendingModifiedValues', {}).get(param, None) + if current_option is None: + current_option = instance.get(param, None) + if modify_params[param] != current_option: + changing_params[param] = modify_params[param] + + return changing_params + + +def validate_options(client, module, instance): + state = module.params['state'] + skip_final_snapshot = module.params['skip_final_snapshot'] + snapshot_id = module.params['final_db_snapshot_identifier'] + modified_id = module.params['new_db_instance_identifier'] + engine = module.params['engine'] + tde_options = bool(module.params['tde_credential_password'] or module.params['tde_credential_arn']) + read_replica = module.params['read_replica'] + creation_source = module.params['creation_source'] + source_instance = module.params['source_db_instance_identifier'] + if module.params['source_region'] is not None: + same_region = bool(module.params['source_region'] == module.params['region']) + else: + same_region = True + + if modified_id: + modified_instance = get_instance(client, module, modified_id) + else: + modified_instance = {} + + if modified_id and instance and modified_instance: + module.fail_json(msg='A new instance ID {0} was provided but it already exists'.format(modified_id)) + if modified_id and not instance and modified_instance: + module.fail_json(msg='A new instance ID {0} was provided but the instance to be renamed does not exist'.format(modified_id)) + if state in ('absent', 'terminated') and instance and not skip_final_snapshot and snapshot_id is None: + module.fail_json(msg='skip_final_snapshot is false but all of the following are missing: final_db_snapshot_identifier') + if engine is not None and not (engine.startswith('mysql') or engine.startswith('oracle')) and tde_options: + module.fail_json(msg='TDE is available for MySQL and Oracle DB instances') + if read_replica is True and not instance and creation_source not in [None, 'instance']: + module.fail_json(msg='Cannot create a read replica from {0}. You must use a source DB instance'.format(creation_source)) + if read_replica is True and not instance and not source_instance: + module.fail_json(msg='read_replica is true and the instance does not exist yet but all of the following are missing: source_db_instance_identifier') + + +def update_instance(client, module, instance, instance_id): + changed = False + + # Get newly created DB instance + if not instance: + instance = get_instance(client, module, instance_id) + + # Check tagging/promoting/rebooting/starting/stopping instance + changed |= ensure_tags( + client, module, instance['DBInstanceArn'], instance['Tags'], module.params['tags'], module.params['purge_tags'] + ) + changed |= promote_replication_instance(client, module, instance, module.params['read_replica']) + changed |= update_instance_state(client, module, instance, module.params['state']) + + return changed + + +def promote_replication_instance(client, module, instance, read_replica): + changed = False + if read_replica is False: + # 'StatusInfos' only exists when the instance is a read replica + # See https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/describe-db-instances.html + if bool(instance.get('StatusInfos')): + try: + result, changed = call_method(client, module, method_name='promote_read_replica', + parameters={'DBInstanceIdentifier': instance['DBInstanceIdentifier']}) + except is_boto3_error_message('DB Instance is not a read replica'): + pass + return changed + + +def ensure_iam_roles(client, module, instance_id): + ''' + Ensure specified IAM roles are associated with DB instance + + Parameters: + client: RDS client + module: AWSModule + instance_id: DB's instance ID + + Returns: + changed (bool): True if changes were successfully made to DB instance's IAM roles; False if not + ''' + instance = camel_dict_to_snake_dict(get_instance(client, module, instance_id), ignore_list=['Tags', 'ProcessorFeatures']) + + # Ensure engine type supports associating IAM roles + engine = instance.get('engine') + if engine not in valid_engines_iam_roles: + module.fail_json(msg='DB engine {0} is not valid for adding IAM roles. Valid engines are {1}'.format(engine, valid_engines_iam_roles)) + + changed = False + purge_iam_roles = module.params.get('purge_iam_roles') + target_roles = module.params.get('iam_roles') if module.params.get('iam_roles') else [] + existing_roles = instance.get('associated_roles', []) + roles_to_add, roles_to_remove = compare_iam_roles(existing_roles, target_roles, purge_iam_roles) + if bool(roles_to_add or roles_to_remove): + changed = True + # Don't update on check_mode + if module.check_mode: + module.exit_json(changed=changed, **instance) + else: + update_iam_roles(client, module, instance_id, roles_to_add, roles_to_remove) + return changed + + +def update_instance_state(client, module, instance, state): + changed = False + if state in ['rebooted', 'restarted']: + changed |= reboot_running_db_instance(client, module, instance) + if state in ['started', 'running', 'stopped']: + changed |= start_or_stop_instance(client, module, instance, state) + return changed + + +def reboot_running_db_instance(client, module, instance): + parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']} + if instance['DBInstanceStatus'] in ['stopped', 'stopping']: + call_method(client, module, 'start_db_instance', parameters) + if module.params.get('force_failover') is not None: + parameters['ForceFailover'] = module.params['force_failover'] + results, changed = call_method(client, module, 'reboot_db_instance', parameters) + return changed + + +def start_or_stop_instance(client, module, instance, state): + changed = False + parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']} + if state == 'stopped' and instance['DBInstanceStatus'] not in ['stopping', 'stopped']: + if module.params['db_snapshot_identifier']: + parameters['DBSnapshotIdentifier'] = module.params['db_snapshot_identifier'] + result, changed = call_method(client, module, 'stop_db_instance', parameters) + elif state == 'started' and instance['DBInstanceStatus'] not in ['available', 'starting', 'restarting']: + result, changed = call_method(client, module, 'start_db_instance', parameters) + return changed + + +def main(): + arg_spec = dict( + state=dict(choices=['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'], default='present'), + creation_source=dict(choices=['snapshot', 's3', 'instance']), + force_update_password=dict(type='bool', default=False, no_log=False), + purge_cloudwatch_logs_exports=dict(type='bool', default=True), + purge_iam_roles=dict(type='bool', default=False), + purge_tags=dict(type='bool', default=True), + read_replica=dict(type='bool'), + wait=dict(type='bool', default=True), + purge_security_groups=dict(type='bool', default=True), + ) + + parameter_options = dict( + allocated_storage=dict(type='int'), + allow_major_version_upgrade=dict(type='bool'), + apply_immediately=dict(type='bool', default=False), + auto_minor_version_upgrade=dict(type='bool'), + availability_zone=dict(aliases=['az', 'zone']), + backup_retention_period=dict(type='int'), + ca_certificate_identifier=dict(), + character_set_name=dict(), + copy_tags_to_snapshot=dict(type='bool'), + db_cluster_identifier=dict(aliases=['cluster_id']), + db_instance_class=dict(aliases=['class', 'instance_type']), + db_instance_identifier=dict(required=True, aliases=['instance_id', 'id']), + db_name=dict(), + db_parameter_group_name=dict(), + db_security_groups=dict(type='list', elements='str'), + db_snapshot_identifier=dict(type='str', aliases=['snapshot_identifier', 'snapshot_id']), + db_subnet_group_name=dict(aliases=['subnet_group']), + deletion_protection=dict(type='bool'), + domain=dict(), + domain_iam_role_name=dict(), + enable_cloudwatch_logs_exports=dict(type='list', aliases=['cloudwatch_log_exports'], elements='str'), + enable_iam_database_authentication=dict(type='bool'), + enable_performance_insights=dict(type='bool'), + engine=dict(type='str', choices=valid_engines), + engine_version=dict(), + final_db_snapshot_identifier=dict(aliases=['final_snapshot_identifier']), + force_failover=dict(type='bool'), + iam_roles=dict(type='list', elements='dict'), + iops=dict(type='int'), + kms_key_id=dict(), + license_model=dict(), + master_user_password=dict(aliases=['password'], no_log=True), + master_username=dict(aliases=['username']), + max_allocated_storage=dict(type='int'), + monitoring_interval=dict(type='int'), + monitoring_role_arn=dict(), + multi_az=dict(type='bool'), + new_db_instance_identifier=dict(aliases=['new_instance_id', 'new_id']), + option_group_name=dict(), + performance_insights_kms_key_id=dict(), + performance_insights_retention_period=dict(type="int"), + port=dict(type="int"), + preferred_backup_window=dict(aliases=["backup_window"]), + preferred_maintenance_window=dict(aliases=["maintenance_window"]), + processor_features=dict(type="dict"), + promotion_tier=dict(type='int'), + publicly_accessible=dict(type="bool"), + restore_time=dict(), + s3_bucket_name=dict(), + s3_ingestion_role_arn=dict(), + s3_prefix=dict(), + skip_final_snapshot=dict(type='bool', default=False), + source_db_instance_identifier=dict(), + source_engine=dict(choices=['mysql']), + source_engine_version=dict(), + source_region=dict(), + storage_encrypted=dict(type='bool'), + storage_type=dict(choices=['standard', 'gp2', 'gp3', 'io1']), + storage_throughput=dict(type='int'), + tags=dict(type='dict', aliases=['resource_tags']), + tde_credential_arn=dict(aliases=['transparent_data_encryption_arn']), + tde_credential_password=dict(no_log=True, aliases=['transparent_data_encryption_password']), + timezone=dict(), + use_latest_restorable_time=dict(type='bool', aliases=['restore_from_latest']), + vpc_security_group_ids=dict(type='list', elements='str') + ) + arg_spec.update(parameter_options) + + required_if = [ + ('engine', 'aurora', ('db_cluster_identifier',)), + ('engine', 'aurora-mysql', ('db_cluster_identifier',)), + ('engine', 'aurora-postresql', ('db_cluster_identifier',)), + ('storage_type', 'io1', ('iops', 'allocated_storage')), + ('creation_source', 'snapshot', ('db_snapshot_identifier', 'engine')), + ('creation_source', 's3', ( + 's3_bucket_name', 'engine', 'master_username', 'master_user_password', + 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')), + ] + mutually_exclusive = [ + ('s3_bucket_name', 'source_db_instance_identifier', 'db_snapshot_identifier'), + ('use_latest_restorable_time', 'restore_time'), + ('availability_zone', 'multi_az'), + ] + + module = AnsibleAWSModule( + argument_spec=arg_spec, + required_if=required_if, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True + ) + + # Sanitize instance identifiers + module.params['db_instance_identifier'] = module.params['db_instance_identifier'].lower() + if module.params['new_db_instance_identifier']: + module.params['new_db_instance_identifier'] = module.params['new_db_instance_identifier'].lower() + + # Sanitize processor features + if module.params['processor_features'] is not None: + module.params['processor_features'] = dict((k, to_text(v)) for k, v in module.params['processor_features'].items()) + + # Ensure dates are in lowercase + if module.params['preferred_maintenance_window']: + module.params['preferred_maintenance_window'] = module.params['preferred_maintenance_window'].lower() + + # Throw warning regarding case when allow_major_version_upgrade is specified in check_mode + # describe_rds_instance never returns this value, so on check_mode, it will always return changed=True + # In non-check mode runs, changed will return the correct value, so no need to warn there. + # see: amazon.aws.module_util.rds.handle_errors. + if module.params.get('allow_major_version_upgrade') and module.check_mode: + module.warn('allow_major_version_upgrade is not returned when describing db instances, so changed will always be `True` on check mode runs.') + + client = module.client('rds') + changed = False + state = module.params['state'] + instance_id = module.params['db_instance_identifier'] + instance = get_instance(client, module, instance_id) + validate_options(client, module, instance) + method_name = get_rds_method_attribute_name(instance, state, module.params['creation_source'], module.params['read_replica']) + + if method_name: + + # Exit on create/delete if check_mode + if module.check_mode and method_name in ['create_db_instance', 'delete_db_instance']: + module.exit_json(changed=True, **camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures'])) + + raw_parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options)) + parameters_to_modify = get_parameters(client, module, raw_parameters, method_name) + + if parameters_to_modify: + # Exit on check_mode when parameters to modify + if module.check_mode: + module.exit_json(changed=True, **camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures'])) + result, changed = call_method(client, module, method_name, parameters_to_modify) + + instance_id = get_final_identifier(method_name, module) + + if state != 'absent': + # Check tagging/promoting/rebooting/starting/stopping instance + if not module.check_mode or instance: + changed |= update_instance(client, module, instance, instance_id) + + # Check IAM roles + if module.params.get('iam_roles') or module.params.get('purge_iam_roles'): + changed |= ensure_iam_roles(client, module, instance_id) + + if changed: + instance = get_instance(client, module, instance_id) + if state != 'absent' and (instance or not module.check_mode): + for attempt_to_wait in range(0, 10): + instance = get_instance(client, module, instance_id) + if instance: + break + else: + sleep(5) + + if state == 'absent' and changed and not module.params['skip_final_snapshot']: + instance.update(FinalSnapshot=get_final_snapshot(client, module, module.params['final_db_snapshot_identifier'])) + + pending_processor_features = None + if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): + pending_processor_features = instance['PendingModifiedValues'].pop('ProcessorFeatures') + instance = camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures']) + if pending_processor_features is not None: + instance['pending_modified_values']['processor_features'] = pending_processor_features + + module.exit_json(changed=changed, **instance) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_instance_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_instance_info.py new file mode 100644 index 000000000..6996b6115 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/rds_instance_info.py @@ -0,0 +1,424 @@ +#!/usr/bin/python +# Copyright (c) 2017, 2018 Michael De La Rue +# Copyright (c) 2017, 2018 Will Thames +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rds_instance_info +version_added: 5.0.0 +short_description: obtain information about one or more RDS instances +description: + - Obtain information about one or more RDS instances. + - This module was originally added to C(community.aws) in release 1.0.0. +options: + db_instance_identifier: + description: + - The RDS instance's unique identifier. + required: false + aliases: + - id + type: str + filters: + description: + - A filter that specifies one or more DB instances to describe. + See U(https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) + type: dict +author: + - "Will Thames (@willthames)" + - "Michael De La Rue (@mikedlr)" +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 + +''' + +EXAMPLES = ''' +- name: Get information about an instance + amazon.aws.rds_instance_info: + db_instance_identifier: new-database + register: new_database_info + +- name: Get all RDS instances + amazon.aws.rds_instance_info: +''' + +RETURN = ''' +instances: + description: List of RDS instances + returned: always + type: complex + contains: + allocated_storage: + description: Gigabytes of storage allocated to the database + returned: always + type: int + sample: 10 + auto_minor_version_upgrade: + description: Whether minor version upgrades happen automatically + returned: always + type: bool + sample: true + availability_zone: + description: Availability Zone in which the database resides + returned: always + type: str + sample: us-west-2b + backup_retention_period: + description: Days for which backups are retained + returned: always + type: int + sample: 7 + ca_certificate_identifier: + description: ID for the CA certificate + returned: always + type: str + sample: rds-ca-2015 + copy_tags_to_snapshot: + description: Whether DB tags should be copied to the snapshot + returned: always + type: bool + sample: false + db_instance_arn: + description: ARN of the database instance + returned: always + type: str + sample: arn:aws:rds:us-west-2:123456789012:db:helloworld-rds + db_instance_class: + description: Instance class of the database instance + returned: always + type: str + sample: db.t2.small + db_instance_identifier: + description: Database instance identifier + returned: always + type: str + sample: helloworld-rds + db_instance_port: + description: Port used by the database instance + returned: always + type: int + sample: 0 + db_instance_status: + description: Status of the database instance + returned: always + type: str + sample: available + db_name: + description: Name of the database + returned: always + type: str + sample: management + db_parameter_groups: + description: List of database parameter groups + returned: always + type: complex + contains: + db_parameter_group_name: + description: Name of the database parameter group + returned: always + type: str + sample: psql-pg-helloworld + parameter_apply_status: + description: Whether the parameter group has been applied + returned: always + type: str + sample: in-sync + db_security_groups: + description: List of security groups used by the database instance + returned: always + type: list + sample: [] + db_subnet_group: + description: list of subnet groups + returned: always + type: complex + contains: + db_subnet_group_description: + description: Description of the DB subnet group + returned: always + type: str + sample: My database subnet group + db_subnet_group_name: + description: Name of the database subnet group + returned: always + type: str + sample: my-subnet-group + subnet_group_status: + description: Subnet group status + returned: always + type: str + sample: Complete + subnets: + description: List of subnets in the subnet group + returned: always + type: complex + contains: + subnet_availability_zone: + description: Availability zone of the subnet + returned: always + type: complex + contains: + name: + description: Name of the availability zone + returned: always + type: str + sample: us-west-2c + subnet_identifier: + description: Subnet ID + returned: always + type: str + sample: subnet-abcd1234 + subnet_status: + description: Subnet status + returned: always + type: str + sample: Active + vpc_id: + description: VPC id of the subnet group + returned: always + type: str + sample: vpc-abcd1234 + dbi_resource_id: + description: AWS Region-unique, immutable identifier for the DB instance + returned: always + type: str + sample: db-AAAAAAAAAAAAAAAAAAAAAAAAAA + deletion_protection: + description: C(True) if the DB instance has deletion protection enabled, C(False) if not. + returned: always + type: bool + sample: False + version_added: 3.3.0 + version_added_collection: community.aws + domain_memberships: + description: List of domain memberships + returned: always + type: list + sample: [] + endpoint: + description: Database endpoint + returned: always + type: complex + contains: + address: + description: Database endpoint address + returned: always + type: str + sample: helloworld-rds.ctrqpe3so1sf.us-west-2.rds.amazonaws.com + hosted_zone_id: + description: Route53 hosted zone ID + returned: always + type: str + sample: Z1PABCD0000000 + port: + description: Database endpoint port + returned: always + type: int + sample: 5432 + engine: + description: Database engine + returned: always + type: str + sample: postgres + engine_version: + description: Database engine version + returned: always + type: str + sample: 9.5.10 + iam_database_authentication_enabled: + description: Whether database authentication through IAM is enabled + returned: always + type: bool + sample: false + instance_create_time: + description: Date and time the instance was created + returned: always + type: str + sample: '2017-10-10T04:00:07.434000+00:00' + iops: + description: The Provisioned IOPS value for the DB instance. + returned: always + type: int + sample: 1000 + kms_key_id: + description: KMS Key ID + returned: always + type: str + sample: arn:aws:kms:us-west-2:123456789012:key/abcd1234-0000-abcd-1111-0123456789ab + latest_restorable_time: + description: Latest time to which a database can be restored with point-in-time restore + returned: always + type: str + sample: '2018-05-17T00:03:56+00:00' + license_model: + description: License model + returned: always + type: str + sample: postgresql-license + master_username: + description: Database master username + returned: always + type: str + sample: dbadmin + monitoring_interval: + description: Interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance + returned: always + type: int + sample: 0 + multi_az: + description: Whether Multi-AZ is on + returned: always + type: bool + sample: false + option_group_memberships: + description: List of option groups + returned: always + type: complex + contains: + option_group_name: + description: Option group name + returned: always + type: str + sample: default:postgres-9-5 + status: + description: Status of option group + returned: always + type: str + sample: in-sync + pending_modified_values: + description: Modified values pending application + returned: always + type: complex + contains: {} + performance_insights_enabled: + description: Whether performance insights are enabled + returned: always + type: bool + sample: false + preferred_backup_window: + description: Preferred backup window + returned: always + type: str + sample: 04:00-05:00 + preferred_maintenance_window: + description: Preferred maintenance window + returned: always + type: str + sample: mon:05:00-mon:05:30 + publicly_accessible: + description: Whether the DB is publicly accessible + returned: always + type: bool + sample: false + read_replica_db_instance_identifiers: + description: List of database instance read replicas + returned: always + type: list + sample: [] + storage_encrypted: + description: Whether the storage is encrypted + returned: always + type: bool + sample: true + storage_type: + description: Storage type of the Database instance + returned: always + type: str + sample: gp2 + tags: + description: Tags used by the database instance + returned: always + type: complex + contains: {} + vpc_security_groups: + description: List of VPC security groups + returned: always + type: complex + contains: + status: + description: Status of the VPC security group + returned: always + type: str + sample: active + vpc_security_group_id: + description: VPC Security Group ID + returned: always + type: str + sample: sg-abcd1234 +''' + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, + boto3_tag_list_to_ansible_dict, + AWSRetry, + camel_dict_to_snake_dict, + ) + + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + + +@AWSRetry.jittered_backoff() +def _describe_db_instances(conn, **params): + paginator = conn.get_paginator('describe_db_instances') + try: + results = paginator.paginate(**params).build_full_result()['DBInstances'] + except is_boto3_error_code('DBInstanceNotFound'): + results = [] + + return results + + +def instance_info(module, conn): + instance_name = module.params.get('db_instance_identifier') + filters = module.params.get('filters') + + params = dict() + if instance_name: + params['DBInstanceIdentifier'] = instance_name + if filters: + params['Filters'] = ansible_dict_to_boto3_filter_list(filters) + + try: + results = _describe_db_instances(conn, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't get instance information") + + for instance in results: + try: + instance['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=instance['DBInstanceArn'], + aws_retry=True)['TagList']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't get tags for instance %s" % instance['DBInstanceIdentifier']) + + return dict(changed=False, instances=[camel_dict_to_snake_dict(instance, ignore_list=['Tags']) for instance in results]) + + +def main(): + argument_spec = dict( + db_instance_identifier=dict(aliases=['id']), + filters=dict(type='dict') + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + + module.exit_json(**instance_info(module, conn)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_instance_snapshot.py b/ansible_collections/amazon/aws/plugins/modules/rds_instance_snapshot.py new file mode 100644 index 000000000..0f779d8db --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/rds_instance_snapshot.py @@ -0,0 +1,386 @@ +#!/usr/bin/python +# Copyright (c) 2014 Ansible Project +# Copyright (c) 2017, 2018, 2019 Will Thames +# Copyright (c) 2017, 2018 Michael De La Rue +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: rds_instance_snapshot +version_added: 5.0.0 +short_description: Manage Amazon RDS instance snapshots +description: + - Creates or deletes RDS snapshots. + - This module was originally added to C(community.aws) in release 1.0.0. +options: + state: + description: + - Specify the desired state of the snapshot. + default: present + choices: [ 'present', 'absent'] + type: str + db_snapshot_identifier: + description: + - The snapshot to manage. + required: true + aliases: + - id + - snapshot_id + type: str + db_instance_identifier: + description: + - Database instance identifier. Required when creating a snapshot. + aliases: + - instance_id + type: str + source_db_snapshot_identifier: + description: + - The identifier of the source DB snapshot. + - Required when copying a snapshot. + - If the source snapshot is in the same AWS region as the copy, specify the snapshot's identifier. + - If the source snapshot is in a different AWS region as the copy, specify the snapshot's ARN. + aliases: + - source_id + - source_snapshot_id + type: str + version_added: 3.3.0 + version_added_collection: community.aws + source_region: + description: + - The region that contains the snapshot to be copied. + type: str + version_added: 3.3.0 + version_added_collection: community.aws + copy_tags: + description: + - Whether to copy all tags from I(source_db_snapshot_identifier) to I(db_instance_identifier). + type: bool + default: False + version_added: 3.3.0 + version_added_collection: community.aws + wait: + description: + - Whether or not to wait for snapshot creation or deletion. + type: bool + default: False + wait_timeout: + description: + - how long before wait gives up, in seconds. + default: 300 + type: int +author: + - "Will Thames (@willthames)" + - "Michael De La Rue (@mikedlr)" + - "Alina Buzachis (@alinabuzachis)" + - "Joseph Torcasso (@jatorcasso)" +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +''' + +EXAMPLES = r''' +- name: Create snapshot + amazon.aws.rds_instance_snapshot: + db_instance_identifier: new-database + db_snapshot_identifier: new-database-snapshot + register: snapshot + +- name: Copy snapshot from a different region and copy its tags + amazon.aws.rds_instance_snapshot: + id: new-database-snapshot-copy + region: us-east-1 + source_id: "{{ snapshot.db_snapshot_arn }}" + source_region: us-east-2 + copy_tags: true + +- name: Delete snapshot + amazon.aws.rds_instance_snapshot: + db_snapshot_identifier: new-database-snapshot + state: absent +''' + +RETURN = r''' +allocated_storage: + description: How much storage is allocated in GB. + returned: always + type: int + sample: 20 +availability_zone: + description: Availability zone of the database from which the snapshot was created. + returned: always + type: str + sample: us-west-2a +db_instance_identifier: + description: Database from which the snapshot was created. + returned: always + type: str + sample: ansible-test-16638696 +db_snapshot_arn: + description: Amazon Resource Name for the snapshot. + returned: always + type: str + sample: arn:aws:rds:us-west-2:123456789012:snapshot:ansible-test-16638696-test-snapshot +db_snapshot_identifier: + description: Name of the snapshot. + returned: always + type: str + sample: ansible-test-16638696-test-snapshot +dbi_resource_id: + description: The identifier for the source DB instance, which can't be changed and which is unique to an AWS Region. + returned: always + type: str + sample: db-MM4P2U35RQRAMWD3QDOXWPZP4U +encrypted: + description: Whether the snapshot is encrypted. + returned: always + type: bool + sample: false +engine: + description: Engine of the database from which the snapshot was created. + returned: always + type: str + sample: mariadb +engine_version: + description: Version of the database from which the snapshot was created. + returned: always + type: str + sample: 10.2.21 +iam_database_authentication_enabled: + description: Whether IAM database authentication is enabled. + returned: always + type: bool + sample: false +instance_create_time: + description: Creation time of the instance from which the snapshot was created. + returned: always + type: str + sample: '2019-06-15T10:15:56.221000+00:00' +license_model: + description: License model of the database. + returned: always + type: str + sample: general-public-license +master_username: + description: Master username of the database. + returned: always + type: str + sample: test +option_group_name: + description: Option group of the database. + returned: always + type: str + sample: default:mariadb-10-2 +percent_progress: + description: How much progress has been made taking the snapshot. Will be 100 for an available snapshot. + returned: always + type: int + sample: 100 +port: + description: Port on which the database is listening. + returned: always + type: int + sample: 3306 +processor_features: + description: List of processor features of the database. + returned: always + type: list + sample: [] +source_db_snapshot_identifier: + description: The DB snapshot ARN that the DB snapshot was copied from. + returned: when snapshot is a copy + type: str + sample: arn:aws:rds:us-west-2:123456789012:snapshot:ansible-test-16638696-test-snapshot-source + version_added: 3.3.0 + version_added_collection: community.aws +snapshot_create_time: + description: Creation time of the snapshot. + returned: always + type: str + sample: '2019-06-15T10:46:23.776000+00:00' +snapshot_type: + description: How the snapshot was created (always manual for this module!). + returned: always + type: str + sample: manual +status: + description: Status of the snapshot. + returned: always + type: str + sample: available +storage_type: + description: Storage type of the database. + returned: always + type: str + sample: gp2 +tags: + description: Tags applied to the snapshot. + returned: always + type: complex + contains: {} +vpc_id: + description: ID of the VPC in which the DB lives. + returned: always + type: str + sample: vpc-09ff232e222710ae0 +''' + +try: + import botocore +except ImportError: + pass # protected by AnsibleAWSModule + +# import module snippets +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params +from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags + + +def get_snapshot(snapshot_id): + try: + snapshot = client.describe_db_snapshots(DBSnapshotIdentifier=snapshot_id)['DBSnapshots'][0] + snapshot['Tags'] = get_tags(client, module, snapshot['DBSnapshotArn']) + except is_boto3_error_code("DBSnapshotNotFound"): + return {} + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id)) + return snapshot + + +def get_parameters(parameters, method_name): + if method_name == 'copy_db_snapshot': + parameters['TargetDBSnapshotIdentifier'] = module.params['db_snapshot_identifier'] + + required_options = get_boto3_client_method_parameters(client, method_name, required=True) + if any(parameters.get(k) is None for k in required_options): + module.fail_json(msg='To {0} requires the parameters: {1}'.format( + get_rds_method_attribute(method_name, module).operation_description, required_options)) + options = get_boto3_client_method_parameters(client, method_name) + parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) + + return parameters + + +def ensure_snapshot_absent(): + snapshot_name = module.params.get("db_snapshot_identifier") + params = {"DBSnapshotIdentifier": snapshot_name} + changed = False + + snapshot = get_snapshot(snapshot_name) + if not snapshot: + module.exit_json(changed=changed) + elif snapshot and snapshot["Status"] != "deleting": + snapshot, changed = call_method(client, module, "delete_db_snapshot", params) + + module.exit_json(changed=changed) + + +def ensure_snapshot_present(params): + source_id = module.params.get('source_db_snapshot_identifier') + snapshot_name = module.params.get('db_snapshot_identifier') + changed = False + snapshot = get_snapshot(snapshot_name) + + # Copy snapshot + if source_id: + changed |= copy_snapshot(params) + + # Create snapshot + elif not snapshot: + changed |= create_snapshot(params) + + # Snapshot exists and we're not creating a copy - modify exising snapshot + else: + changed |= modify_snapshot() + + snapshot = get_snapshot(snapshot_name) + module.exit_json(changed=changed, **camel_dict_to_snake_dict(snapshot, ignore_list=['Tags'])) + + +def create_snapshot(params): + method_params = get_parameters(params, 'create_db_snapshot') + if method_params.get('Tags'): + method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) + snapshot, changed = call_method(client, module, 'create_db_snapshot', method_params) + + return changed + + +def copy_snapshot(params): + changed = False + snapshot_id = module.params.get('db_snapshot_identifier') + snapshot = get_snapshot(snapshot_id) + + if not snapshot: + method_params = get_parameters(params, 'copy_db_snapshot') + if method_params.get('Tags'): + method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) + result, changed = call_method(client, module, 'copy_db_snapshot', method_params) + + return changed + + +def modify_snapshot(): + # TODO - add other modifications aside from purely tags + changed = False + snapshot_id = module.params.get('db_snapshot_identifier') + snapshot = get_snapshot(snapshot_id) + + if module.params.get('tags'): + changed |= ensure_tags(client, module, snapshot['DBSnapshotArn'], snapshot['Tags'], module.params['tags'], module.params['purge_tags']) + + return changed + + +def main(): + global client + global module + + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + db_snapshot_identifier=dict(aliases=['id', 'snapshot_id'], required=True), + db_instance_identifier=dict(aliases=['instance_id']), + source_db_snapshot_identifier=dict(aliases=['source_id', 'source_snapshot_id']), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + copy_tags=dict(type='bool', default=False), + source_region=dict(type='str'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + retry_decorator = AWSRetry.jittered_backoff(retries=10) + try: + client = module.client('rds', retry_decorator=retry_decorator) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS.") + + state = module.params.get("state") + if state == 'absent': + ensure_snapshot_absent() + + elif state == 'present': + params = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in argument_spec)) + ensure_snapshot_present(params) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_option_group.py b/ansible_collections/amazon/aws/plugins/modules/rds_option_group.py new file mode 100644 index 000000000..846581b85 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/rds_option_group.py @@ -0,0 +1,667 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +module: rds_option_group +short_description: Manages the creation, modification, deletion of RDS option groups +version_added: 5.0.0 +description: + - Manages the creation, modification, deletion of RDS option groups. + - This module was originally added to C(community.aws) in release 2.1.0. +author: + - "Nick Aslanidis (@naslanidis)" + - "Will Thames (@willthames)" + - "Alina Buzachis (@alinabuzachis)" +options: + state: + description: + - Specifies whether the option group should be C(present) or C(absent). + required: true + choices: [ 'present', 'absent' ] + type: str + option_group_name: + description: + - Specifies the name of the option group to be created. + required: true + type: str + engine_name: + description: + - Specifies the name of the engine that this option group should be associated with. + type: str + major_engine_version: + description: + - Specifies the major version of the engine that this option group should be associated with. + type: str + option_group_description: + description: + - The description of the option group. + type: str + apply_immediately: + description: + - Indicates whether the changes should be applied immediately, or during the next maintenance window. + required: false + type: bool + default: false + options: + description: + - Options in this list are added to the option group. + - If already present, the specified configuration is used to update the existing configuration. + - If none are supplied, any existing options are removed. + type: list + elements: dict + suboptions: + option_name: + description: The configuration of options to include in a group. + required: false + type: str + port: + description: The optional port for the option. + required: false + type: int + option_version: + description: The version for the option. + required: false + type: str + option_settings: + description: The option settings to include in an option group. + required: false + type: list + elements: dict + suboptions: + name: + description: The name of the option that has settings that you can set. + required: false + type: str + value: + description: The current value of the option setting. + required: false + type: str + default_value: + description: The default value of the option setting. + required: false + type: str + description: + description: The description of the option setting. + required: false + type: str + apply_type: + description: The DB engine specific parameter type. + required: false + type: str + data_type: + description: The data type of the option setting. + required: false + type: str + allowed_values: + description: The allowed values of the option setting. + required: false + type: str + is_modifiable: + description: A Boolean value that, when C(true), indicates the option setting can be modified from the default. + required: false + type: bool + is_collection: + description: Indicates if the option setting is part of a collection. + required: false + type: bool + db_security_group_memberships: + description: A list of C(DBSecurityGroupMembership) name strings used for this option. + required: false + type: list + elements: str + vpc_security_group_memberships: + description: A list of C(VpcSecurityGroupMembership) name strings used for this option. + required: false + type: list + elements: str + wait: + description: Whether to wait for the cluster to be available or deleted. + type: bool + default: True +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +''' + +EXAMPLES = r''' +# Create an RDS Mysql Option group +- name: Create an RDS Mysql option group + amazon.aws.rds_option_group: + state: present + option_group_name: test-mysql-option-group + engine_name: mysql + major_engine_version: 5.6 + option_group_description: test mysql option group + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - "sg-d188c123" + option_settings: + - name: MAX_SIMULTANEOUS_CONNECTIONS + value: "20" + - name: CHUNK_SIZE_GROWTH_FACTOR + value: "1.25" + register: new_rds_mysql_option_group + +# Remove currently configured options for an option group by removing options argument +- name: Create an RDS Mysql option group + amazon.aws.rds_option_group: + state: present + option_group_name: test-mysql-option-group + engine_name: mysql + major_engine_version: 5.6 + option_group_description: test mysql option group + apply_immediately: true + register: rds_mysql_option_group + +- name: Create an RDS Mysql option group using tags + amazon.aws.rds_option_group: + state: present + option_group_name: test-mysql-option-group + engine_name: mysql + major_engine_version: 5.6 + option_group_description: test mysql option group + apply_immediately: true + tags: + Tag1: tag1 + Tag2: tag2 + register: rds_mysql_option_group + +# Delete an RDS Mysql Option group +- name: Delete an RDS Mysql option group + amazon.aws.rds_option_group: + state: absent + option_group_name: test-mysql-option-group + register: deleted_rds_mysql_option_group +''' + +RETURN = r''' +allows_vpc_and_non_vpc_instance_memberships: + description: Indicates whether this option group can be applied to both VPC and non-VPC instances. + returned: always + type: bool + sample: false +changed: + description: If the Option Group has changed. + type: bool + returned: always + sample: true +engine_name: + description: Indicates the name of the engine that this option group can be applied to. + returned: always + type: str + sample: "mysql" +major_engine_version: + description: Indicates the major engine version associated with this option group. + returned: always + type: str + sample: "5.6" +option_group_arn: + description: The Amazon Resource Name (ARN) for the option group. + returned: always + type: str + sample: "arn:aws:rds:ap-southeast-2:123456789012:og:ansible-test-option-group" +option_group_description: + description: Provides a description of the option group. + returned: always + type: str + sample: "test mysql option group" +option_group_name: + description: Specifies the name of the option group. + returned: always + type: str + sample: "test-mysql-option-group" +options: + description: Indicates what options are available in the option group. + returned: always + type: list + elements: dict + contains: + db_security_group_memberships: + description: If the option requires access to a port, then this DB security group allows access to the port. + returned: always + type: list + elements: dict + contains: + status: + description: The status of the DB security group. + returned: always + type: str + sample: "available" + db_security_group_name: + description: The name of the DB security group. + returned: always + type: str + sample: "mydbsecuritygroup" + option_description: + description: The description of the option. + returned: always + type: str + sample: "Innodb Memcached for MySQL" + option_name: + description: The name of the option. + returned: always + type: str + sample: "MEMCACHED" + option_settings: + description: The name of the option. + returned: always + type: list + contains: + allowed_values: + description: The allowed values of the option setting. + returned: always + type: str + sample: "1-2048" + apply_type: + description: The DB engine specific parameter type. + returned: always + type: str + sample: "STATIC" + data_type: + description: The data type of the option setting. + returned: always + type: str + sample: "INTEGER" + default_value: + description: The default value of the option setting. + returned: always + type: str + sample: "1024" + description: + description: The description of the option setting. + returned: always + type: str + sample: "Verbose level for memcached." + is_collection: + description: Indicates if the option setting is part of a collection. + returned: always + type: bool + sample: true + is_modifiable: + description: A Boolean value that, when true, indicates the option setting can be modified from the default. + returned: always + type: bool + sample: true + name: + description: The name of the option that has settings that you can set. + returned: always + type: str + sample: "INNODB_API_ENABLE_MDL" + value: + description: The current value of the option setting. + returned: always + type: str + sample: "0" + permanent: + description: Indicate if this option is permanent. + returned: always + type: bool + sample: true + persistent: + description: Indicate if this option is persistent. + returned: always + type: bool + sample: true + port: + description: If required, the port configured for this option to use. + returned: always + type: int + sample: 11211 + vpc_security_group_memberships: + description: If the option requires access to a port, then this VPC security group allows access to the port. + returned: always + type: list + elements: dict + contains: + status: + description: The status of the VPC security group. + returned: always + type: str + sample: "available" + vpc_security_group_id: + description: The name of the VPC security group. + returned: always + type: str + sample: "sg-0cd636a23ae76e9a4" +vpc_id: + description: If present, this option group can only be applied to instances that are in the VPC indicated by this field. + returned: always + type: str + sample: "vpc-bf07e9d6" +tags: + description: The tags associated the Internet Gateway. + type: dict + returned: always + sample: { + "Ansible": "Test" + } +''' + + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + + +@AWSRetry.jittered_backoff(retries=10) +def _describe_option_groups(client, **params): + try: + paginator = client.get_paginator('describe_option_groups') + return paginator.paginate(**params).build_full_result()['OptionGroupsList'][0] + except is_boto3_error_code('OptionGroupNotFoundFault'): + return {} + + +def get_option_group(client, module): + params = dict() + params['OptionGroupName'] = module.params.get('option_group_name') + + try: + result = camel_dict_to_snake_dict(_describe_option_groups(client, **params)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't describe option groups.") + + if result: + result['tags'] = get_tags(client, module, result['option_group_arn']) + + return result + + +def create_option_group_options(client, module): + changed = True + params = dict() + params['OptionGroupName'] = module.params.get('option_group_name') + options_to_include = module.params.get('options') + params['OptionsToInclude'] = snake_dict_to_camel_dict(options_to_include, capitalize_first=True) + + if module.params.get('apply_immediately'): + params['ApplyImmediately'] = module.params.get('apply_immediately') + + if module.check_mode: + return changed + + try: + client.modify_option_group(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to update Option Group.") + + return changed + + +def remove_option_group_options(client, module, options_to_remove): + changed = True + params = dict() + params['OptionGroupName'] = module.params.get('option_group_name') + params['OptionsToRemove'] = options_to_remove + + if module.params.get('apply_immediately'): + params['ApplyImmediately'] = module.params.get('apply_immediately') + + if module.check_mode: + return changed + + try: + client.modify_option_group(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e) + + return changed + + +def create_option_group(client, module): + changed = True + params = dict() + params['OptionGroupName'] = module.params.get('option_group_name') + params['EngineName'] = module.params.get('engine_name') + params['MajorEngineVersion'] = str(module.params.get('major_engine_version')) + params['OptionGroupDescription'] = module.params.get('option_group_description') + + if module.params.get('tags'): + params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags')) + else: + params['Tags'] = list() + + if module.check_mode: + return changed + try: + client.create_option_group(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Unable to create Option Group.') + + return changed + + +def match_option_group_options(client, module): + requires_update = False + new_options = module.params.get('options') + + # Get existing option groups and compare to our new options spec + current_option = get_option_group(client, module) + + if current_option['options'] == [] and new_options: + requires_update = True + else: + for option in current_option['options']: + for setting_name in new_options: + if setting_name['option_name'] == option['option_name']: + + # Security groups need to be handled separately due to different keys on request and what is + # returned by the API + if any( + name in option.keys() - ['option_settings', 'vpc_security_group_memberships'] and + setting_name[name] != option[name] + for name in setting_name + ): + requires_update = True + + if any( + name in option and name == 'vpc_security_group_memberships' + for name in setting_name + ): + current_sg = set(sg['vpc_security_group_id'] for sg in option['vpc_security_group_memberships']) + new_sg = set(setting_name['vpc_security_group_memberships']) + if current_sg != new_sg: + requires_update = True + + if any( + new_option_setting['name'] == current_option_setting['name'] and + new_option_setting['value'] != current_option_setting['value'] + for new_option_setting in setting_name['option_settings'] + for current_option_setting in option['option_settings'] + ): + requires_update = True + else: + requires_update = True + + return requires_update + + +def compare_option_group(client, module): + to_be_added = None + to_be_removed = None + current_option = get_option_group(client, module) + new_options = module.params.get('options') + new_settings = set([item['option_name'] for item in new_options]) + old_settings = set([item['option_name'] for item in current_option['options']]) + + if new_settings != old_settings: + to_be_added = list(new_settings - old_settings) + to_be_removed = list(old_settings - new_settings) + + return to_be_added, to_be_removed + + +def setup_option_group(client, module): + results = [] + changed = False + to_be_added = None + to_be_removed = None + + # Check if there is an existing options group + existing_option_group = get_option_group(client, module) + + if existing_option_group: + results = existing_option_group + + # Check tagging + changed |= update_tags(client, module, existing_option_group) + + if module.params.get('options'): + # Check if existing options require updating + update_required = match_option_group_options(client, module) + + # Check if there are options to be added or removed + if update_required: + to_be_added, to_be_removed = compare_option_group(client, module) + + if to_be_added or update_required: + changed |= create_option_group_options(client, module) + + if to_be_removed: + changed |= remove_option_group_options(client, module, to_be_removed) + + # If changed, get updated version of option group + if changed: + results = get_option_group(client, module) + else: + # No options were supplied. If options exist, remove them + current_option_group = get_option_group(client, module) + + if current_option_group['options'] != []: + # Here we would call our remove options function + options_to_remove = [] + + for option in current_option_group['options']: + options_to_remove.append(option['option_name']) + + changed |= remove_option_group_options(client, module, options_to_remove) + + # If changed, get updated version of option group + if changed: + results = get_option_group(client, module) + else: + changed = create_option_group(client, module) + + if module.params.get('options'): + changed = create_option_group_options(client, module) + + results = get_option_group(client, module) + + return changed, results + + +def remove_option_group(client, module): + changed = False + params = dict() + params['OptionGroupName'] = module.params.get('option_group_name') + + # Check if there is an existing options group + existing_option_group = get_option_group(client, module) + + if existing_option_group: + + if module.check_mode: + return True, {} + + changed = True + try: + client.delete_option_group(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to delete option group.") + + return changed, {} + + +def update_tags(client, module, option_group): + if module.params.get('tags') is None: + return False + + try: + existing_tags = client.list_tags_for_resource(aws_retry=True, ResourceName=option_group['option_group_arn'])['TagList'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't obtain option group tags.") + + to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), + module.params['tags'], module.params['purge_tags']) + changed = bool(to_update or to_delete) + + if to_update: + try: + if module.check_mode: + return changed + client.add_tags_to_resource(aws_retry=True, ResourceName=option_group['option_group_arn'], + Tags=ansible_dict_to_boto3_tag_list(to_update)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't add tags to option group.") + if to_delete: + try: + if module.check_mode: + return changed + client.remove_tags_from_resource(aws_retry=True, ResourceName=option_group['option_group_arn'], + TagKeys=to_delete) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't remove tags from option group.") + + return changed + + +def main(): + argument_spec = dict( + option_group_name=dict(required=True, type='str'), + engine_name=dict(type='str'), + major_engine_version=dict(type='str'), + option_group_description=dict(type='str'), + options=dict(required=False, type='list', elements='dict'), + apply_immediately=dict(type='bool', default=False), + state=dict(required=True, choices=['present', 'absent']), + tags=dict(required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + wait=dict(type='bool', default=True), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[['state', 'present', ['engine_name', 'major_engine_version', 'option_group_description']]], + ) + + try: + client = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS.') + + state = module.params.get('state') + + if state == 'present': + changed, results = setup_option_group(client, module) + else: + changed, results = remove_option_group(client, module) + + module.exit_json(changed=changed, **results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_option_group_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_option_group_info.py new file mode 100644 index 000000000..532ef5c12 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/rds_option_group_info.py @@ -0,0 +1,327 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: rds_option_group_info +short_description: rds_option_group_info module +version_added: 5.0.0 +description: + - Gather information about RDS option groups. + - This module was originally added to C(community.aws) in release 2.1.0. +author: "Alina Buzachis (@alinabuzachis)" +options: + option_group_name: + description: + - The name of the option group to describe. + - Can't be supplied together with I(engine_name) or I(major_engine_version). + default: '' + required: false + type: str + marker: + description: + - If this parameter is specified, the response includes only records beyond the marker, up to the value specified by I(max_records). + - Allowed values are between C(20) and C(100). + required: false + type: str + max_records: + description: + - The maximum number of records to include in the response. + type: int + default: 100 + required: false + engine_name: + description: Filters the list of option groups to only include groups associated with a specific database engine. + type: str + default: '' + required: false + major_engine_version: + description: + - Filters the list of option groups to only include groups associated with a specific database engine version. + - If specified, then I(engine_name) must also be specified. + type: str + default: '' + required: false +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: List an option group + amazon.aws.rds_option_group_info: + option_group_name: test-mysql-option-group + register: option_group + +- name: List all the option groups + amazon.aws.rds_option_group_info: + region: ap-southeast-2 + profile: production + register: option_group +''' + +RETURN = r''' +changed: + description: True if listing the RDS option group succeeds. + type: bool + returned: always + sample: false +option_groups_list: + description: The available RDS option groups. + returned: always + type: complex + contains: + allows_vpc_and_non_vpc_instance_memberships: + description: Indicates whether this option group can be applied to both VPC and non-VPC instances. + returned: always + type: bool + sample: false + engine_name: + description: Indicates the name of the engine that this option group can be applied to. + returned: always + type: str + sample: "mysql" + major_engine_version: + description: Indicates the major engine version associated with this option group. + returned: always + type: str + sample: "5.6" + option_group_arn: + description: The Amazon Resource Name (ARN) for the option group. + returned: always + type: str + sample: "arn:aws:rds:ap-southeast-2:123456789012:og:ansible-test-option-group" + option_group_description: + description: Provides a description of the option group. + returned: always + type: str + sample: "test mysql option group" + option_group_name: + description: Specifies the name of the option group. + returned: always + type: str + sample: "test-mysql-option-group" + options: + description: Indicates what options are available in the option group. + returned: always + type: complex + contains: + db_security_group_memberships: + description: If the option requires access to a port, then this DB security group allows access to the port. + returned: always + type: complex + sample: list + elements: dict + contains: + status: + description: The status of the DB security group. + returned: always + type: str + sample: "available" + db_security_group_name: + description: The name of the DB security group. + returned: always + type: str + sample: "mydbsecuritygroup" + option_description: + description: The description of the option. + returned: always + type: str + sample: "Innodb Memcached for MySQL" + option_name: + description: The name of the option. + returned: always + type: str + sample: "MEMCACHED" + option_settings: + description: The name of the option. + returned: always + type: complex + contains: + allowed_values: + description: The allowed values of the option setting. + returned: always + type: str + sample: "1-2048" + apply_type: + description: The DB engine specific parameter type. + returned: always + type: str + sample: "STATIC" + data_type: + description: The data type of the option setting. + returned: always + type: str + sample: "INTEGER" + default_value: + description: The default value of the option setting. + returned: always + type: str + sample: "1024" + description: + description: The description of the option setting. + returned: always + type: str + sample: "Verbose level for memcached." + is_collection: + description: Indicates if the option setting is part of a collection. + returned: always + type: bool + sample: true + is_modifiable: + description: A Boolean value that, when true, indicates the option setting can be modified from the default. + returned: always + type: bool + sample: true + name: + description: The name of the option that has settings that you can set. + returned: always + type: str + sample: "INNODB_API_ENABLE_MDL" + value: + description: The current value of the option setting. + returned: always + type: str + sample: "0" + permanent: + description: Indicate if this option is permanent. + returned: always + type: bool + sample: true + persistent: + description: Indicate if this option is persistent. + returned: always + type: bool + sample: true + port: + description: If required, the port configured for this option to use. + returned: always + type: int + sample: 11211 + vpc_security_group_memberships: + description: If the option requires access to a port, then this VPC security group allows access to the port. + returned: always + type: list + elements: dict + contains: + status: + description: The status of the VPC security group. + returned: always + type: str + sample: "available" + vpc_security_group_id: + description: The name of the VPC security group. + returned: always + type: str + sample: "sg-0cd636a23ae76e9a4" + vpc_id: + description: If present, this option group can only be applied to instances that are in the VPC indicated by this field. + returned: always + type: str + sample: "vpc-bf07e9d6" + tags: + description: The tags associated the Internet Gateway. + type: dict + returned: always + sample: { + "Ansible": "Test" + } + +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags + + +@AWSRetry.jittered_backoff(retries=10) +def _describe_option_groups(client, **params): + try: + paginator = client.get_paginator('describe_option_groups') + return paginator.paginate(**params).build_full_result() + except is_boto3_error_code('OptionGroupNotFoundFault'): + return {} + + +def list_option_groups(client, module): + option_groups = list() + params = dict() + params['OptionGroupName'] = module.params.get('option_group_name') + + if module.params.get('marker'): + params['Marker'] = module.params.get('marker') + if int(params['Marker']) < 20 or int(params['Marker']) > 100: + module.fail_json(msg="marker must be between 20 and 100 minutes") + + if module.params.get('max_records'): + params['MaxRecords'] = module.params.get('max_records') + if params['MaxRecords'] > 100: + module.fail_json(msg="The maximum number of records to include in the response is 100.") + + params['EngineName'] = module.params.get('engine_name') + params['MajorEngineVersion'] = module.params.get('major_engine_version') + + try: + result = _describe_option_groups(client, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't describe option groups.") + + for option_group in result['OptionGroupsList']: + # Turn the boto3 result into ansible_friendly_snaked_names + converted_option_group = camel_dict_to_snake_dict(option_group) + converted_option_group['tags'] = get_tags(client, module, converted_option_group['option_group_arn']) + option_groups.append(converted_option_group) + + return option_groups + + +def main(): + argument_spec = dict( + option_group_name=dict(default='', type='str'), + marker=dict(type='str'), + max_records=dict(type='int', default=100), + engine_name=dict(type='str', default=''), + major_engine_version=dict(type='str', default=''), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['option_group_name', 'engine_name'], + ['option_group_name', 'major_engine_version'], + ], + required_together=[ + ['engine_name', 'major_engine_version'], + ], + ) + + # Validate Requirements + try: + connection = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + results = list_option_groups(connection, module) + + module.exit_json(result=results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py b/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py new file mode 100644 index 000000000..0bb42e0af --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py @@ -0,0 +1,341 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rds_param_group +version_added: 5.0.0 +short_description: manage RDS parameter groups +description: + - Creates, modifies, and deletes RDS parameter groups. + - This module was originally added to C(community.aws) in release 1.0.0. +options: + state: + description: + - Specifies whether the group should be present or absent. + required: true + choices: [ 'present' , 'absent' ] + type: str + name: + description: + - Database parameter group identifier. + required: true + type: str + description: + description: + - Database parameter group description. Only set when a new group is added. + type: str + engine: + description: + - The type of database for this group. + - Please use following command to get list of all supported db engines and their respective versions. + - '# aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily"' + - Required for I(state=present). + type: str + immediate: + description: + - Whether to apply the changes immediately, or after the next reboot of any associated instances. + aliases: + - apply_immediately + type: bool + params: + description: + - Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3), + or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group. + aliases: [parameters] + type: dict +author: + - "Scott Anderson (@tastychutney)" + - "Will Thames (@willthames)" +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 + +''' + +EXAMPLES = ''' +- name: Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024 + amazon.aws.rds_param_group: + state: present + name: norwegian-blue + description: 'My Fancy Ex Parrot Group' + engine: 'mysql5.6' + params: + auto_increment_increment: "42K" + tags: + Environment: production + Application: parrot + +- name: Remove a parameter group + amazon.aws.rds_param_group: + state: absent + name: norwegian-blue +''' + +RETURN = ''' +db_parameter_group_name: + description: Name of DB parameter group + type: str + returned: when state is present +db_parameter_group_family: + description: DB parameter group family that this DB parameter group is compatible with. + type: str + returned: when state is present +db_parameter_group_arn: + description: ARN of the DB parameter group + type: str + returned: when state is present +description: + description: description of the DB parameter group + type: str + returned: when state is present +errors: + description: list of errors from attempting to modify parameters that are not modifiable + type: list + returned: when state is present +tags: + description: dictionary of tags + type: dict + returned: when state is present +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE +from ansible.module_utils.six import string_types +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags + +INT_MODIFIERS = { + 'K': 1024, + 'M': pow(1024, 2), + 'G': pow(1024, 3), + 'T': pow(1024, 4), +} + + +@AWSRetry.jittered_backoff() +def _describe_db_parameters(connection, **params): + try: + paginator = connection.get_paginator('describe_db_parameters') + return paginator.paginate(**params).build_full_result() + except is_boto3_error_code('DBParameterGroupNotFound'): + return None + + +def convert_parameter(param, value): + """ + Allows setting parameters with 10M = 10* 1024 * 1024 and so on. + """ + converted_value = value + + if param['DataType'] == 'integer': + if isinstance(value, string_types): + try: + for modifier in INT_MODIFIERS.keys(): + if value.endswith(modifier): + converted_value = int(value[:-1]) * INT_MODIFIERS[modifier] + except ValueError: + # may be based on a variable (ie. {foo*3/4}) so + # just pass it on through to the AWS SDK + pass + elif isinstance(value, bool): + converted_value = 1 if value else 0 + + elif param['DataType'] == 'boolean': + if isinstance(value, string_types): + converted_value = value in BOOLEANS_TRUE + # convert True/False to 1/0 + converted_value = 1 if converted_value else 0 + return str(converted_value) + + +def update_parameters(module, connection): + groupname = module.params['name'] + desired = module.params['params'] + apply_method = 'immediate' if module.params['immediate'] else 'pending-reboot' + errors = [] + modify_list = [] + existing = {} + try: + _existing = _describe_db_parameters(connection, DBParameterGroupName=groupname) + if _existing: + existing = _existing['Parameters'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe existing parameter groups") + lookup = dict((param['ParameterName'], param) for param in existing) + for param_key, param_value in desired.items(): + if param_key not in lookup: + errors.append("Parameter %s is not an available parameter for the %s engine" % + (param_key, module.params.get('engine'))) + else: + converted_value = convert_parameter(lookup[param_key], param_value) + # engine-default parameters do not have a ParameterValue, so we'll always override those. + if converted_value != lookup[param_key].get('ParameterValue'): + if lookup[param_key]['IsModifiable']: + modify_list.append(dict(ParameterValue=converted_value, ParameterName=param_key, ApplyMethod=apply_method)) + else: + errors.append("Parameter %s is not modifiable" % param_key) + + # modify_db_parameters takes at most 20 parameters + if modify_list and not module.check_mode: + try: + from itertools import izip_longest as zip_longest # python 2 + except ImportError: + from itertools import zip_longest # python 3 + for modify_slice in zip_longest(*[iter(modify_list)] * 20, fillvalue=None): + non_empty_slice = [item for item in modify_slice if item] + try: + connection.modify_db_parameter_group(aws_retry=True, DBParameterGroupName=groupname, Parameters=non_empty_slice) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't update parameters") + return True, errors + return False, errors + + +def update_tags(module, connection, group, tags): + if tags is None: + return False + changed = False + + existing_tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'])['TagList'] + to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), + tags, module.params['purge_tags']) + + if module.check_mode: + if not to_update and not to_delete: + return False + else: + return True + + if to_update: + try: + connection.add_tags_to_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'], + Tags=ansible_dict_to_boto3_tag_list(to_update)) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't add tags to parameter group") + if to_delete: + try: + connection.remove_tags_from_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'], + TagKeys=to_delete) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't remove tags from parameter group") + return changed + + +def ensure_present(module, connection): + groupname = module.params['name'] + tags = module.params.get('tags') + changed = False + errors = [] + try: + response = connection.describe_db_parameter_groups(aws_retry=True, DBParameterGroupName=groupname) + except is_boto3_error_code('DBParameterGroupNotFound'): + response = None + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't access parameter group information") + if not response: + params = dict(DBParameterGroupName=groupname, + DBParameterGroupFamily=module.params['engine'], + Description=module.params['description']) + if tags: + params['Tags'] = ansible_dict_to_boto3_tag_list(tags) + if not module.check_mode: + try: + response = connection.create_db_parameter_group(aws_retry=True, **params) + changed = True + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create parameter group") + else: + group = response['DBParameterGroups'][0] + if tags: + changed = update_tags(module, connection, group, tags) + + if module.params.get('params'): + params_changed, errors = update_parameters(module, connection) + changed = changed or params_changed + + try: + response = connection.describe_db_parameter_groups(aws_retry=True, DBParameterGroupName=groupname) + group = camel_dict_to_snake_dict(response['DBParameterGroups'][0]) + except is_boto3_error_code('DBParameterGroupNotFound'): + module.exit_json(changed=True, errors=errors) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't obtain parameter group information") + try: + tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group['db_parameter_group_arn'])['TagList'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't obtain parameter group tags") + group['tags'] = boto3_tag_list_to_ansible_dict(tags) + + module.exit_json(changed=changed, errors=errors, **group) + + +def ensure_absent(module, connection): + group = module.params['name'] + try: + response = connection.describe_db_parameter_groups(DBParameterGroupName=group) + except is_boto3_error_code('DBParameterGroupNotFound'): + module.exit_json(changed=False) + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't access parameter group information") + + if response and module.check_mode: + module.exit_json(changed=True) + + try: + response = connection.delete_db_parameter_group(aws_retry=True, DBParameterGroupName=group) + module.exit_json(changed=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete parameter group") + + +def main(): + argument_spec = dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True), + engine=dict(), + description=dict(), + params=dict(aliases=['parameters'], type='dict'), + immediate=dict(type='bool', aliases=['apply_immediately']), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[['state', 'present', ['description', 'engine']]], + supports_check_mode=True + ) + + try: + conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + state = module.params.get('state') + if state == 'present': + ensure_present(module, conn) + if state == 'absent': + ensure_absent(module, conn) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_snapshot_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_snapshot_info.py new file mode 100644 index 000000000..a9c69ce95 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/rds_snapshot_info.py @@ -0,0 +1,389 @@ +#!/usr/bin/python +# Copyright (c) 2014-2017 Ansible Project +# Copyright (c) 2017, 2018 Will Thames +# Copyright (c) 2017, 2018 Michael De La Rue +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rds_snapshot_info +version_added: 5.0.0 +short_description: obtain information about one or more RDS snapshots +description: + - Obtain information about one or more RDS snapshots. These can be for unclustered snapshots or snapshots of clustered DBs (Aurora). + - Aurora snapshot information may be obtained if no identifier parameters are passed or if one of the cluster parameters are passed. + - This module was originally added to C(community.aws) in release 1.0.0. +options: + db_snapshot_identifier: + description: + - Name of an RDS (unclustered) snapshot. + - Mutually exclusive with I(db_instance_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier) + required: false + aliases: + - snapshot_name + type: str + db_instance_identifier: + description: + - RDS instance name for which to find snapshots. + - Mutually exclusive with I(db_snapshot_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier) + required: false + type: str + db_cluster_identifier: + description: + - RDS cluster name for which to find snapshots. + - Mutually exclusive with I(db_snapshot_identifier), I(db_instance_identifier), I(db_cluster_snapshot_identifier) + required: false + type: str + db_cluster_snapshot_identifier: + description: + - Name of an RDS cluster snapshot. + - Mutually exclusive with I(db_instance_identifier), I(db_snapshot_identifier), I(db_cluster_identifier) + required: false + type: str + snapshot_type: + description: + - Type of snapshot to find. + - By default both automated and manual snapshots will be returned. + required: false + choices: ['automated', 'manual', 'shared', 'public'] + type: str +author: + - "Will Thames (@willthames)" +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 + +''' + +EXAMPLES = ''' +- name: Get information about an snapshot + amazon.aws.rds_snapshot_info: + db_snapshot_identifier: snapshot_name + register: new_database_info + +- name: Get all RDS snapshots for an RDS instance + amazon.aws.rds_snapshot_info: + db_instance_identifier: helloworld-rds-master +''' + +RETURN = ''' +snapshots: + description: List of non-clustered snapshots + returned: When cluster parameters are not passed + type: complex + contains: + allocated_storage: + description: How many gigabytes of storage are allocated + returned: always + type: int + sample: 10 + availability_zone: + description: The availability zone of the database from which the snapshot was taken + returned: always + type: str + sample: us-west-2b + db_instance_identifier: + description: Database instance identifier + returned: always + type: str + sample: hello-world-rds + db_snapshot_arn: + description: Snapshot ARN + returned: always + type: str + sample: arn:aws:rds:us-west-2:123456789012:snapshot:rds:hello-world-rds-us1-2018-05-16-04-03 + db_snapshot_identifier: + description: Snapshot name + returned: always + type: str + sample: rds:hello-world-rds-us1-2018-05-16-04-03 + encrypted: + description: Whether the snapshot was encrypted + returned: always + type: bool + sample: true + engine: + description: Database engine + returned: always + type: str + sample: postgres + engine_version: + description: Database engine version + returned: always + type: str + sample: 9.5.10 + iam_database_authentication_enabled: + description: Whether database authentication through IAM is enabled + returned: always + type: bool + sample: false + instance_create_time: + description: Time the Instance was created + returned: always + type: str + sample: '2017-10-10T04:00:07.434000+00:00' + kms_key_id: + description: ID of the KMS Key encrypting the snapshot + returned: always + type: str + sample: arn:aws:kms:us-west-2:123456789012:key/abcd1234-1234-aaaa-0000-1234567890ab + license_model: + description: License model + returned: always + type: str + sample: postgresql-license + master_username: + description: Database master username + returned: always + type: str + sample: dbadmin + option_group_name: + description: Database option group name + returned: always + type: str + sample: default:postgres-9-5 + percent_progress: + description: Percent progress of snapshot + returned: always + type: int + sample: 100 + snapshot_create_time: + description: Time snapshot was created + returned: always + type: str + sample: '2018-05-16T04:03:33.871000+00:00' + snapshot_type: + description: Type of snapshot + returned: always + type: str + sample: automated + status: + description: Status of snapshot + returned: always + type: str + sample: available + storage_type: + description: Storage type of underlying DB + returned: always + type: str + sample: gp2 + tags: + description: Snapshot tags + returned: when snapshot is not shared + type: complex + contains: {} + vpc_id: + description: ID of VPC containing the DB + returned: always + type: str + sample: vpc-abcd1234 +cluster_snapshots: + description: List of cluster snapshots + returned: always + type: complex + contains: + allocated_storage: + description: How many gigabytes of storage are allocated + returned: always + type: int + sample: 1 + availability_zones: + description: The availability zones of the database from which the snapshot was taken + returned: always + type: list + sample: + - ca-central-1a + - ca-central-1b + cluster_create_time: + description: Date and time the cluster was created + returned: always + type: str + sample: '2018-05-17T00:13:40.223000+00:00' + db_cluster_identifier: + description: Database cluster identifier + returned: always + type: str + sample: test-aurora-cluster + db_cluster_snapshot_arn: + description: ARN of the database snapshot + returned: always + type: str + sample: arn:aws:rds:ca-central-1:123456789012:cluster-snapshot:test-aurora-snapshot + db_cluster_snapshot_identifier: + description: Snapshot identifier + returned: always + type: str + sample: test-aurora-snapshot + engine: + description: Database engine + returned: always + type: str + sample: aurora + engine_version: + description: Database engine version + returned: always + type: str + sample: 5.6.10a + iam_database_authentication_enabled: + description: Whether database authentication through IAM is enabled + returned: always + type: bool + sample: false + kms_key_id: + description: ID of the KMS Key encrypting the snapshot + returned: always + type: str + sample: arn:aws:kms:ca-central-1:123456789012:key/abcd1234-abcd-1111-aaaa-0123456789ab + license_model: + description: License model + returned: always + type: str + sample: aurora + master_username: + description: Database master username + returned: always + type: str + sample: shertel + percent_progress: + description: Percent progress of snapshot + returned: always + type: int + sample: 0 + port: + description: Database port + returned: always + type: int + sample: 0 + snapshot_create_time: + description: Date and time when the snapshot was created + returned: always + type: str + sample: '2018-05-17T00:23:23.731000+00:00' + snapshot_type: + description: Type of snapshot + returned: always + type: str + sample: manual + status: + description: Status of snapshot + returned: always + type: str + sample: creating + storage_encrypted: + description: Whether the snapshot is encrypted + returned: always + type: bool + sample: true + tags: + description: Tags of the snapshot + returned: when snapshot is not shared + type: complex + contains: {} + vpc_id: + description: VPC of the database + returned: always + type: str + sample: vpc-abcd1234 +''' + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + + +def common_snapshot_info(module, conn, method, prefix, params): + paginator = conn.get_paginator(method) + try: + results = paginator.paginate(**params).build_full_result()['%ss' % prefix] + except is_boto3_error_code('%sNotFound' % prefix): + results = [] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, "trying to get snapshot information") + + for snapshot in results: + try: + if snapshot['SnapshotType'] != 'shared': + snapshot['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=snapshot['%sArn' % prefix], + aws_retry=True)['TagList']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['%sIdentifier' % prefix]) + + return [camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) for snapshot in results] + + +def cluster_snapshot_info(module, conn): + snapshot_name = module.params.get('db_cluster_snapshot_identifier') + snapshot_type = module.params.get('snapshot_type') + instance_name = module.params.get('db_cluster_identifier') + + params = dict() + if snapshot_name: + params['DBClusterSnapshotIdentifier'] = snapshot_name + if instance_name: + params['DBClusterIdentifier'] = instance_name + if snapshot_type: + params['SnapshotType'] = snapshot_type + if snapshot_type == 'public': + params['IncludePublic'] = True + elif snapshot_type == 'shared': + params['IncludeShared'] = True + + return common_snapshot_info(module, conn, 'describe_db_cluster_snapshots', 'DBClusterSnapshot', params) + + +def standalone_snapshot_info(module, conn): + snapshot_name = module.params.get('db_snapshot_identifier') + snapshot_type = module.params.get('snapshot_type') + instance_name = module.params.get('db_instance_identifier') + + params = dict() + if snapshot_name: + params['DBSnapshotIdentifier'] = snapshot_name + if instance_name: + params['DBInstanceIdentifier'] = instance_name + if snapshot_type: + params['SnapshotType'] = snapshot_type + if snapshot_type == 'public': + params['IncludePublic'] = True + elif snapshot_type == 'shared': + params['IncludeShared'] = True + + return common_snapshot_info(module, conn, 'describe_db_snapshots', 'DBSnapshot', params) + + +def main(): + argument_spec = dict( + db_snapshot_identifier=dict(aliases=['snapshot_name']), + db_instance_identifier=dict(), + db_cluster_identifier=dict(), + db_cluster_snapshot_identifier=dict(), + snapshot_type=dict(choices=['automated', 'manual', 'shared', 'public']) + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier']] + ) + + conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + results = dict() + if not module.params['db_cluster_identifier'] and not module.params['db_cluster_snapshot_identifier']: + results['snapshots'] = standalone_snapshot_info(module, conn) + if not module.params['db_snapshot_identifier'] and not module.params['db_instance_identifier']: + results['cluster_snapshots'] = cluster_snapshot_info(module, conn) + + module.exit_json(changed=False, **results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_subnet_group.py b/ansible_collections/amazon/aws/plugins/modules/rds_subnet_group.py new file mode 100644 index 000000000..4aae74acd --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/rds_subnet_group.py @@ -0,0 +1,374 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: rds_subnet_group +version_added: 5.0.0 +short_description: manage RDS database subnet groups +description: + - Creates, modifies, and deletes RDS database subnet groups. + - This module was originally added to C(community.aws) in release 1.0.0. +options: + state: + description: + - Specifies whether the subnet should be present or absent. + required: true + choices: [ 'present' , 'absent' ] + type: str + name: + description: + - Database subnet group identifier. + required: true + type: str + description: + description: + - Database subnet group description. + - Required when I(state=present). + type: str + subnets: + description: + - List of subnet IDs that make up the database subnet group. + - Required when I(state=present). + type: list + elements: str +notes: + - Support for I(tags) and I(purge_tags) was added in release 3.2.0. +author: + - "Scott Anderson (@tastychutney)" + - "Alina Buzachis (@alinabuzachis)" +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 + +''' + +EXAMPLES = r''' +- name: Add or change a subnet group + amazon.aws.rds_subnet_group: + state: present + name: norwegian-blue + description: My Fancy Ex Parrot Subnet Group + subnets: + - subnet-aaaaaaaa + - subnet-bbbbbbbb + +- name: Add or change a subnet group and associate tags + amazon.aws.rds_subnet_group: + state: present + name: norwegian-blue + description: My Fancy Ex Parrot Subnet Group + subnets: + - subnet-aaaaaaaa + - subnet-bbbbbbbb + tags: + tag1: Tag1 + tag2: Tag2 + +- name: Remove a subnet group + amazon.aws.rds_subnet_group: + state: absent + name: norwegian-blue +''' + +RETURN = r''' +changed: + description: True if listing the RDS subnet group succeeds. + type: bool + returned: always + sample: "false" +subnet_group: + description: Dictionary of DB subnet group values + returned: I(state=present) + type: complex + contains: + name: + description: The name of the DB subnet group (maintained for backward compatibility) + returned: I(state=present) + type: str + sample: "ansible-test-mbp-13950442" + db_subnet_group_name: + description: The name of the DB subnet group + returned: I(state=present) + type: str + sample: "ansible-test-mbp-13950442" + description: + description: The description of the DB subnet group (maintained for backward compatibility) + returned: I(state=present) + type: str + sample: "Simple description." + db_subnet_group_description: + description: The description of the DB subnet group + returned: I(state=present) + type: str + sample: "Simple description." + vpc_id: + description: The VpcId of the DB subnet group + returned: I(state=present) + type: str + sample: "vpc-0acb0ba033ff2119c" + subnet_ids: + description: Contains a list of Subnet IDs + returned: I(state=present) + type: list + sample: + "subnet-08c94870f4480797e" + subnets: + description: Contains a list of Subnet elements (@see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.describe_db_subnet_groups) # noqa + returned: I(state=present) + type: list + contains: + subnet_availability_zone: + description: Contains Availability Zone information. + returned: I(state=present) + type: dict + version_added: 3.2.0 + version_added_collection: community.aws + sample: + name: "eu-north-1b" + subnet_identifier: + description: The identifier of the subnet. + returned: I(state=present) + type: str + version_added: 3.2.0 + version_added_collection: community.aws + sample: "subnet-08c94870f4480797e" + subnet_outpost: + description: This value specifies the Outpost. + returned: I(state=present) + type: dict + version_added: 3.2.0 + version_added_collection: community.aws + sample: {} + subnet_status: + description: The status of the subnet. + returned: I(state=present) + type: str + version_added: 3.2.0 + version_added_collection: community.aws + sample: "Active" + status: + description: The status of the DB subnet group (maintained for backward compatibility) + returned: I(state=present) + type: str + sample: "Complete" + subnet_group_status: + description: The status of the DB subnet group + returned: I(state=present) + type: str + sample: "Complete" + db_subnet_group_arn: + description: The ARN of the DB subnet group + returned: I(state=present) + type: str + sample: "arn:aws:rds:eu-north-1:123456789012:subgrp:ansible-test-13950442" + tags: + description: The tags associated with the subnet group + returned: I(state=present) + type: dict + version_added: 3.2.0 + version_added_collection: community.aws + sample: + tag1: Tag1 + tag2: Tag2 +''' + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags + + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + + +def create_result(changed, subnet_group=None): + if subnet_group is None: + return dict( + changed=changed + ) + result_subnet_group = dict(subnet_group) + result_subnet_group['name'] = result_subnet_group.get( + 'db_subnet_group_name') + result_subnet_group['description'] = result_subnet_group.get( + 'db_subnet_group_description') + result_subnet_group['status'] = result_subnet_group.get( + 'subnet_group_status') + result_subnet_group['subnet_ids'] = create_subnet_list( + subnet_group.get('subnets')) + return dict( + changed=changed, + subnet_group=result_subnet_group + ) + + +@AWSRetry.jittered_backoff() +def _describe_db_subnet_groups_with_backoff(client, **kwargs): + paginator = client.get_paginator('describe_db_subnet_groups') + return paginator.paginate(**kwargs).build_full_result() + + +def get_subnet_group(client, module): + params = dict() + params['DBSubnetGroupName'] = module.params.get('name').lower() + + try: + _result = _describe_db_subnet_groups_with_backoff(client, **params) + except is_boto3_error_code('DBSubnetGroupNotFoundFault'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't describe subnet groups.") + + if _result: + result = camel_dict_to_snake_dict(_result['DBSubnetGroups'][0]) + result['tags'] = get_tags(client, module, result['db_subnet_group_arn']) + + return result + + +def create_subnet_list(subnets): + r''' + Construct a list of subnet ids from a list of subnets dicts returned by boto3. + Parameters: + subnets (list): A list of subnets definitions. + @see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.describe_db_subnet_groups + Returns: + (list): List of subnet ids (str) + ''' + subnets_ids = [] + for subnet in subnets: + subnets_ids.append(subnet.get('subnet_identifier')) + return subnets_ids + + +def main(): + argument_spec = dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True), + description=dict(required=False), + subnets=dict(required=False, type='list', elements='str'), + tags=dict(required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + ) + required_if = [('state', 'present', ['description', 'subnets'])] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=required_if, + supports_check_mode=True + ) + + state = module.params.get('state') + group_name = module.params.get('name').lower() + group_description = module.params.get('description') + group_subnets = module.params.get('subnets') or [] + + try: + connection = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, 'Failed to instantiate AWS connection.') + + # Default. + changed = None + result = create_result(False) + tags_update = False + subnet_update = False + + if module.params.get("tags") is not None: + _tags = ansible_dict_to_boto3_tag_list(module.params.get("tags")) + else: + _tags = list() + + matching_groups = get_subnet_group(connection, module) + + if state == 'present': + if matching_groups: + # We have one or more subnets at this point. + + # Check if there is any tags update + tags_update = ensure_tags( + connection, + module, + matching_groups['db_subnet_group_arn'], + matching_groups['tags'], + module.params.get("tags"), + module.params['purge_tags'] + ) + + # Sort the subnet groups before we compare them + existing_subnets = create_subnet_list(matching_groups['subnets']) + existing_subnets.sort() + group_subnets.sort() + + # See if anything changed. + if ( + matching_groups['db_subnet_group_name'] != group_name or + matching_groups['db_subnet_group_description'] != group_description or + existing_subnets != group_subnets + ): + if not module.check_mode: + # Modify existing group. + try: + connection.modify_db_subnet_group( + aws_retry=True, + DBSubnetGroupName=group_name, + DBSubnetGroupDescription=group_description, + SubnetIds=group_subnets + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, 'Failed to update a subnet group.') + subnet_update = True + else: + if not module.check_mode: + try: + connection.create_db_subnet_group( + aws_retry=True, + DBSubnetGroupName=group_name, + DBSubnetGroupDescription=group_description, + SubnetIds=group_subnets, + Tags=_tags + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, 'Failed to create a new subnet group.') + subnet_update = True + elif state == 'absent': + if not module.check_mode: + try: + connection.delete_db_subnet_group(aws_retry=True, DBSubnetGroupName=group_name) + except is_boto3_error_code('DBSubnetGroupNotFoundFault'): + module.exit_json(**result) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, 'Failed to delete a subnet group.') + else: + subnet_group = get_subnet_group(connection, module) + if subnet_group: + subnet_update = True + result = create_result(subnet_update, subnet_group) + module.exit_json(**result) + + subnet_update = True + + subnet_group = get_subnet_group(connection, module) + changed = tags_update or subnet_update + result = create_result(changed, subnet_group) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/route53.py b/ansible_collections/amazon/aws/plugins/modules/route53.py new file mode 100644 index 000000000..3ac321763 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/route53.py @@ -0,0 +1,797 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: route53 +version_added: 5.0.0 +short_description: add or delete entries in Amazons Route 53 DNS service +description: + - Creates and deletes DNS records in Amazons Route 53 service. + - This module was originally added to C(community.aws) in release 1.0.0. +options: + state: + description: + - Specifies the state of the resource record. + required: true + aliases: [ 'command' ] + choices: [ 'present', 'absent', 'get', 'create', 'delete' ] + type: str + zone: + description: + - The DNS zone to modify. + - This is a required parameter, if parameter I(hosted_zone_id) is not supplied. + type: str + hosted_zone_id: + description: + - The Hosted Zone ID of the DNS zone to modify. + - This is a required parameter, if parameter I(zone) is not supplied. + type: str + record: + description: + - The full DNS record to create or delete. + required: true + type: str + ttl: + description: + - The TTL, in second, to give the new record. + - Mutually exclusive with I(alias). + default: 3600 + type: int + type: + description: + - The type of DNS record to create. + required: true + choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'SOA' ] + type: str + alias: + description: + - Indicates if this is an alias record. + - Mutually exclusive with I(ttl). + - Defaults to C(false). + type: bool + alias_hosted_zone_id: + description: + - The hosted zone identifier. + type: str + alias_evaluate_target_health: + description: + - Whether or not to evaluate an alias target health. Useful for aliases to Elastic Load Balancers. + type: bool + default: false + value: + description: + - The new value when creating a DNS record. YAML lists or multiple comma-spaced values are allowed for non-alias records. + type: list + elements: str + overwrite: + description: + - Whether an existing record should be overwritten on create if values do not match. + type: bool + retry_interval: + description: + - In the case that Route 53 is still servicing a prior request, this module will wait and try again after this many seconds. + If you have many domain names, the default of C(500) seconds may be too long. + default: 500 + type: int + private_zone: + description: + - If set to C(true), the private zone matching the requested name within the domain will be used if there are both public and private zones. + - The default is to use the public zone. + type: bool + default: false + identifier: + description: + - Have to be specified for Weighted, latency-based and failover resource record sets only. + An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. + type: str + weight: + description: + - Weighted resource record sets only. Among resource record sets that + have the same combination of DNS name and type, a value that + determines what portion of traffic for the current resource record set + is routed to the associated location. + - Mutually exclusive with I(region) and I(failover). + type: int + region: + description: + - Latency-based resource record sets only Among resource record sets + that have the same combination of DNS name and type, a value that + determines which region this should be associated with for the + latency-based routing + - Mutually exclusive with I(weight) and I(failover). + type: str + geo_location: + description: + - Allows to control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. + - Two geolocation resource record sets that specify same geographic location cannot be created. + - Non-geolocation resource record sets that have the same values for the Name and Type elements as geolocation + resource record sets cannot be created. + suboptions: + continent_code: + description: + - The two-letter code for the continent. + - Specifying I(continent_code) with either I(country_code) or I(subdivision_code) returns an InvalidInput error. + type: str + country_code: + description: + - The two-letter code for a country. + - Amazon Route 53 uses the two-letter country codes that are specified in ISO standard 3166-1 alpha-2 . + type: str + subdivision_code: + description: + - The two-letter code for a state of the United States. + - To specify I(subdivision_code), I(country_code) must be set to C(US). + type: str + type: dict + version_added: 3.3.0 + version_added_collection: community.aws + health_check: + description: + - Health check to associate with this record + type: str + failover: + description: + - Failover resource record sets only. Whether this is the primary or + secondary resource record set. Allowed values are PRIMARY and SECONDARY + - Mutually exclusive with I(weight) and I(region). + type: str + choices: ['SECONDARY', 'PRIMARY'] + vpc_id: + description: + - "When used in conjunction with private_zone: true, this will only modify records in the private hosted zone attached to this VPC." + - This allows you to have multiple private hosted zones, all with the same name, attached to different VPCs. + type: str + wait: + description: + - Wait until the changes have been replicated to all Amazon Route 53 DNS servers. + type: bool + default: false + wait_timeout: + description: + - How long to wait for the changes to be replicated, in seconds. + default: 300 + type: int +author: + - Bruce Pennypacker (@bpennypacker) + - Mike Buzzetti (@jimbydamonk) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.boto3 +''' + +RETURN = r''' +nameservers: + description: Nameservers associated with the zone. + returned: when state is 'get' + type: list + sample: + - ns-1036.awsdns-00.org. + - ns-516.awsdns-00.net. + - ns-1504.awsdns-00.co.uk. + - ns-1.awsdns-00.com. +set: + description: Info specific to the resource record. + returned: when state is 'get' + type: complex + contains: + alias: + description: Whether this is an alias. + returned: always + type: bool + sample: false + failover: + description: Whether this is the primary or secondary resource record set. + returned: always + type: str + sample: PRIMARY + geo_location: + description: geograpic location based on which Route53 resonds to DNS queries. + returned: when configured + type: dict + sample: { continent_code: "NA", country_code: "US", subdivision_code: "CA" } + version_added: 3.3.0 + version_added_collection: community.aws + health_check: + description: health_check associated with this record. + returned: always + type: str + identifier: + description: An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. + returned: always + type: str + record: + description: Domain name for the record set. + returned: always + type: str + sample: new.foo.com. + region: + description: Which region this should be associated with for latency-based routing. + returned: always + type: str + sample: us-west-2 + ttl: + description: Resource record cache TTL. + returned: always + type: str + sample: '3600' + type: + description: Resource record set type. + returned: always + type: str + sample: A + value: + description: Record value. + returned: always + type: str + sample: 52.43.18.27 + values: + description: Record Values. + returned: always + type: list + sample: + - 52.43.18.27 + weight: + description: Weight of the record. + returned: always + type: str + sample: '3' + zone: + description: Zone this record set belongs to. + returned: always + type: str + sample: foo.bar.com. +''' + +EXAMPLES = r''' +- name: Add new.foo.com as an A record with 3 IPs and wait until the changes have been replicated + amazon.aws.route53: + state: present + zone: foo.com + record: new.foo.com + type: A + ttl: 7200 + value: 1.1.1.1,2.2.2.2,3.3.3.3 + wait: true +- name: Update new.foo.com as an A record with a list of 3 IPs and wait until the changes have been replicated + amazon.aws.route53: + state: present + zone: foo.com + record: new.foo.com + type: A + ttl: 7200 + value: + - 1.1.1.1 + - 2.2.2.2 + - 3.3.3.3 + wait: true +- name: Retrieve the details for new.foo.com + amazon.aws.route53: + state: get + zone: foo.com + record: new.foo.com + type: A + register: rec +- name: Delete new.foo.com A record using the results from the get command + amazon.aws.route53: + state: absent + zone: foo.com + record: "{{ rec.set.record }}" + ttl: "{{ rec.set.ttl }}" + type: "{{ rec.set.type }}" + value: "{{ rec.set.value }}" +# Add an AAAA record. Note that because there are colons in the value +# that the IPv6 address must be quoted. Also shows using the old form command=create. +- name: Add an AAAA record + amazon.aws.route53: + command: create + zone: foo.com + record: localhost.foo.com + type: AAAA + ttl: 7200 + value: "::1" +# For more information on SRV records see: +# https://en.wikipedia.org/wiki/SRV_record +- name: Add a SRV record with multiple fields for a service on port 22222 + amazon.aws.route53: + state: present + zone: foo.com + record: "_example-service._tcp.foo.com" + type: SRV + value: "0 0 22222 host1.foo.com,0 0 22222 host2.foo.com" +# Note that TXT and SPF records must be surrounded +# by quotes when sent to Route 53: +- name: Add a TXT record. + amazon.aws.route53: + state: present + zone: foo.com + record: localhost.foo.com + type: TXT + ttl: 7200 + value: '"bar"' +- name: Add an alias record that points to an Amazon ELB + amazon.aws.route53: + state: present + zone: foo.com + record: elb.foo.com + type: A + value: "{{ elb_dns_name }}" + alias: True + alias_hosted_zone_id: "{{ elb_zone_id }}" +- name: Retrieve the details for elb.foo.com + amazon.aws.route53: + state: get + zone: foo.com + record: elb.foo.com + type: A + register: rec +- name: Delete an alias record using the results from the get command + amazon.aws.route53: + state: absent + zone: foo.com + record: "{{ rec.set.record }}" + ttl: "{{ rec.set.ttl }}" + type: "{{ rec.set.type }}" + value: "{{ rec.set.value }}" + alias: True + alias_hosted_zone_id: "{{ rec.set.alias_hosted_zone_id }}" +- name: Add an alias record that points to an Amazon ELB and evaluates it health + amazon.aws.route53: + state: present + zone: foo.com + record: elb.foo.com + type: A + value: "{{ elb_dns_name }}" + alias: True + alias_hosted_zone_id: "{{ elb_zone_id }}" + alias_evaluate_target_health: True +- name: Add an AAAA record with Hosted Zone ID + amazon.aws.route53: + state: present + zone: foo.com + hosted_zone_id: Z2AABBCCDDEEFF + record: localhost.foo.com + type: AAAA + ttl: 7200 + value: "::1" +- name: Use a routing policy to distribute traffic + amazon.aws.route53: + state: present + zone: foo.com + record: www.foo.com + type: CNAME + value: host1.foo.com + ttl: 30 + # Routing policy + identifier: "host1@www" + weight: 100 + health_check: "d994b780-3150-49fd-9205-356abdd42e75" +- name: Add a CAA record (RFC 6844) + amazon.aws.route53: + state: present + zone: example.com + record: example.com + type: CAA + value: + - 0 issue "ca.example.net" + - 0 issuewild ";" + - 0 iodef "mailto:security@example.com" +- name: Create a record with geo_location - country_code + amazon.aws.route53: + state: present + zone: '{{ zone_one }}' + record: 'geo-test.{{ zone_one }}' + identifier: "geohost@www" + type: A + value: 1.1.1.1 + ttl: 30 + geo_location: + country_code: US +- name: Create a record with geo_location - subdivision code + amazon.aws.route53: + state: present + zone: '{{ zone_one }}' + record: 'geo-test.{{ zone_one }}' + identifier: "geohost@www" + type: A + value: 1.1.1.1 + ttl: 30 + geo_location: + country_code: US + subdivision_code: TX +''' + +from operator import itemgetter + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + +MAX_AWS_RETRIES = 10 # How many retries to perform when an API call is failing +WAIT_RETRY = 5 # how many seconds to wait between propagation status polls + + +@AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES) +def _list_record_sets(route53, **kwargs): + paginator = route53.get_paginator('list_resource_record_sets') + return paginator.paginate(**kwargs).build_full_result()['ResourceRecordSets'] + + +@AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES) +def _list_hosted_zones(route53, **kwargs): + paginator = route53.get_paginator('list_hosted_zones') + return paginator.paginate(**kwargs).build_full_result()['HostedZones'] + + +def get_record(route53, zone_id, record_name, record_type, record_identifier): + record_sets_results = _list_record_sets(route53, HostedZoneId=zone_id) + + for record_set in record_sets_results: + record_set['Name'] = record_set['Name'].encode().decode('unicode_escape') + # If the record name and type is not equal, move to the next record + if (record_name.lower(), record_type) != (record_set['Name'].lower(), record_set['Type']): + continue + + if record_identifier and record_identifier != record_set.get("SetIdentifier"): + continue + + return record_set + + return None + + +def get_zone_id_by_name(route53, module, zone_name, want_private, want_vpc_id): + """Finds a zone by name or zone_id""" + hosted_zones_results = _list_hosted_zones(route53) + + for zone in hosted_zones_results: + # only save this zone id if the private status of the zone matches + # the private_zone_in boolean specified in the params + private_zone = module.boolean(zone['Config'].get('PrivateZone', False)) + zone_id = zone['Id'].replace("/hostedzone/", "") + + if private_zone == want_private and zone['Name'] == zone_name: + if want_vpc_id: + # NOTE: These details aren't available in other boto3 methods, hence the necessary + # extra API call + hosted_zone = route53.get_hosted_zone(aws_retry=True, Id=zone_id) + if want_vpc_id in [v['VPCId'] for v in hosted_zone['VPCs']]: + return zone_id + else: + return zone_id + return None + + +def format_record(record_in, zone_in, zone_id): + """ + Formats a record in a way that's consistent with the pre-boto3 migration values + as well as returning the 'normal' boto3 style values + """ + if not record_in: + return None + + record = dict(record_in) + record['zone'] = zone_in + record['hosted_zone_id'] = zone_id + + record['type'] = record_in.get('Type', None) + record['record'] = record_in.get('Name').encode().decode('unicode_escape') + record['ttl'] = record_in.get('TTL', None) + record['identifier'] = record_in.get('SetIdentifier', None) + record['weight'] = record_in.get('Weight', None) + record['region'] = record_in.get('Region', None) + record['failover'] = record_in.get('Failover', None) + record['health_check'] = record_in.get('HealthCheckId', None) + + if record['ttl']: + record['ttl'] = str(record['ttl']) + if record['weight']: + record['weight'] = str(record['weight']) + if record['region']: + record['region'] = str(record['region']) + + if record_in.get('AliasTarget'): + record['alias'] = True + record['value'] = record_in['AliasTarget'].get('DNSName') + record['values'] = [record_in['AliasTarget'].get('DNSName')] + record['alias_hosted_zone_id'] = record_in['AliasTarget'].get('HostedZoneId') + record['alias_evaluate_target_health'] = record_in['AliasTarget'].get('EvaluateTargetHealth') + else: + record['alias'] = False + records = [r.get('Value') for r in record_in.get('ResourceRecords')] + record['value'] = ','.join(sorted(records)) + record['values'] = sorted(records) + + return record + + +def get_hosted_zone_nameservers(route53, zone_id): + hosted_zone_name = route53.get_hosted_zone(aws_retry=True, Id=zone_id)['HostedZone']['Name'] + resource_records_sets = _list_record_sets(route53, HostedZoneId=zone_id) + + nameservers_records = list( + filter(lambda record: record['Name'] == hosted_zone_name and record['Type'] == 'NS', resource_records_sets) + )[0]['ResourceRecords'] + + return [ns_record['Value'] for ns_record in nameservers_records] + + +def main(): + argument_spec = dict( + state=dict(type='str', required=True, choices=['absent', 'create', 'delete', 'get', 'present'], aliases=['command']), + zone=dict(type='str'), + hosted_zone_id=dict(type='str'), + record=dict(type='str', required=True), + ttl=dict(type='int', default=3600), + type=dict(type='str', required=True, choices=['A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SPF', 'SRV', 'TXT']), + alias=dict(type='bool'), + alias_hosted_zone_id=dict(type='str'), + alias_evaluate_target_health=dict(type='bool', default=False), + value=dict(type='list', elements='str'), + overwrite=dict(type='bool'), + retry_interval=dict(type='int', default=500), + private_zone=dict(type='bool', default=False), + identifier=dict(type='str'), + weight=dict(type='int'), + region=dict(type='str'), + geo_location=dict(type='dict', + options=dict( + continent_code=dict(type="str"), + country_code=dict(type="str"), + subdivision_code=dict(type="str")), + required=False), + health_check=dict(type='str'), + failover=dict(type='str', choices=['PRIMARY', 'SECONDARY']), + vpc_id=dict(type='str'), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[['zone', 'hosted_zone_id']], + # If alias is True then you must specify alias_hosted_zone as well + required_together=[['alias', 'alias_hosted_zone_id']], + # state=present, absent, create, delete THEN value is required + required_if=( + ('state', 'present', ['value']), + ('state', 'create', ['value']), + ), + # failover, region and weight are mutually exclusive + mutually_exclusive=[ + ('failover', 'region', 'weight'), + ('alias', 'ttl'), + ], + # failover, region, weight and geo_location require identifier + required_by=dict( + failover=('identifier',), + region=('identifier',), + weight=('identifier',), + geo_location=('identifier'), + ), + ) + + if module.params['state'] in ('present', 'create'): + command_in = 'create' + elif module.params['state'] in ('absent', 'delete'): + command_in = 'delete' + elif module.params['state'] == 'get': + command_in = 'get' + + zone_in = (module.params.get('zone') or '').lower() + hosted_zone_id_in = module.params.get('hosted_zone_id') + ttl_in = module.params.get('ttl') + record_in = module.params.get('record').lower() + type_in = module.params.get('type') + value_in = module.params.get('value') or [] + alias_in = module.params.get('alias') + alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id') + alias_evaluate_target_health_in = module.params.get('alias_evaluate_target_health') + retry_interval_in = module.params.get('retry_interval') + + if module.params['vpc_id'] is not None: + private_zone_in = True + else: + private_zone_in = module.params.get('private_zone') + + identifier_in = module.params.get('identifier') + weight_in = module.params.get('weight') + region_in = module.params.get('region') + health_check_in = module.params.get('health_check') + failover_in = module.params.get('failover') + vpc_id_in = module.params.get('vpc_id') + wait_in = module.params.get('wait') + wait_timeout_in = module.params.get('wait_timeout') + geo_location = module.params.get('geo_location') + + if zone_in[-1:] != '.': + zone_in += "." + + if record_in[-1:] != '.': + record_in += "." + + if command_in == 'create' or command_in == 'delete': + if alias_in and len(value_in) != 1: + module.fail_json(msg="parameter 'value' must contain a single dns name for alias records") + if (weight_in is None and region_in is None and failover_in is None and geo_location is None) and identifier_in is not None: + module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover.") + + retry_decorator = AWSRetry.jittered_backoff( + retries=MAX_AWS_RETRIES, + delay=retry_interval_in, + catch_extra_error_codes=['PriorRequestNotComplete'], + max_delay=max(60, retry_interval_in), + ) + + # connect to the route53 endpoint + try: + route53 = module.client('route53', retry_decorator=retry_decorator) + except botocore.exceptions.HTTPClientError as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + # Find the named zone ID + zone_id = hosted_zone_id_in or get_zone_id_by_name(route53, module, zone_in, private_zone_in, vpc_id_in) + + # Verify that the requested zone is already defined in Route53 + if zone_id is None: + errmsg = "Zone %s does not exist in Route53" % (zone_in or hosted_zone_id_in) + module.fail_json(msg=errmsg) + + aws_record = get_record(route53, zone_id, record_in, type_in, identifier_in) + + resource_record_set = scrub_none_parameters({ + 'Name': record_in, + 'Type': type_in, + 'Weight': weight_in, + 'Region': region_in, + 'Failover': failover_in, + 'TTL': ttl_in, + 'ResourceRecords': [dict(Value=value) for value in value_in], + 'HealthCheckId': health_check_in, + 'SetIdentifier': identifier_in, + }) + + if geo_location: + continent_code = geo_location.get('continent_code') + country_code = geo_location.get('country_code') + subdivision_code = geo_location.get('subdivision_code') + + if continent_code and (country_code or subdivision_code): + module.fail_json(changed=False, msg='While using geo_location, continent_code is mutually exclusive with country_code and subdivision_code.') + + if not any([continent_code, country_code, subdivision_code]): + module.fail_json(changed=False, msg='To use geo_location please specify either continent_code, country_code, or subdivision_code.') + + if geo_location.get('subdivision_code') and geo_location.get('country_code').lower() != 'us': + module.fail_json(changed=False, msg='To use subdivision_code, you must specify country_code as US.') + + # Build geo_location suboptions specification + resource_record_set['GeoLocation'] = {} + if continent_code: + resource_record_set['GeoLocation']['ContinentCode'] = continent_code + if country_code: + resource_record_set['GeoLocation']['CountryCode'] = country_code + if subdivision_code: + resource_record_set['GeoLocation']['SubdivisionCode'] = subdivision_code + + if command_in == 'delete' and aws_record is not None: + resource_record_set['TTL'] = aws_record.get('TTL') + if not resource_record_set['ResourceRecords']: + resource_record_set['ResourceRecords'] = aws_record.get('ResourceRecords') + + if alias_in: + resource_record_set['AliasTarget'] = dict( + HostedZoneId=alias_hosted_zone_id_in, + DNSName=value_in[0], + EvaluateTargetHealth=alias_evaluate_target_health_in + ) + if 'ResourceRecords' in resource_record_set: + del resource_record_set['ResourceRecords'] + if 'TTL' in resource_record_set: + del resource_record_set['TTL'] + + # On CAA records order doesn't matter + if type_in == 'CAA': + resource_record_set['ResourceRecords'] = sorted(resource_record_set['ResourceRecords'], key=itemgetter('Value')) + if aws_record: + aws_record['ResourceRecords'] = sorted(aws_record['ResourceRecords'], key=itemgetter('Value')) + + if command_in == 'create' and aws_record == resource_record_set: + rr_sets = [camel_dict_to_snake_dict(resource_record_set)] + module.exit_json(changed=False, resource_records_sets=rr_sets) + + if command_in == 'get': + if type_in == 'NS': + ns = aws_record.get('values', []) + else: + # Retrieve name servers associated to the zone. + ns = get_hosted_zone_nameservers(route53, zone_id) + + formatted_aws = format_record(aws_record, zone_in, zone_id) + + if formatted_aws is None: + # record does not exist + module.exit_json(changed=False, set=[], nameservers=ns, resource_record_sets=[]) + + rr_sets = [camel_dict_to_snake_dict(aws_record)] + module.exit_json(changed=False, set=formatted_aws, nameservers=ns, resource_record_sets=rr_sets) + + if command_in == 'delete' and not aws_record: + module.exit_json(changed=False) + + if command_in == 'create' or command_in == 'delete': + if command_in == 'create' and aws_record: + if not module.params['overwrite']: + module.fail_json(msg="Record already exists with different value. Set 'overwrite' to replace it") + command = 'UPSERT' + else: + command = command_in.upper() + + if not module.check_mode: + try: + change_resource_record_sets = route53.change_resource_record_sets( + aws_retry=True, + HostedZoneId=zone_id, + ChangeBatch=dict( + Changes=[ + dict( + Action=command, + ResourceRecordSet=resource_record_set + ) + ] + ) + ) + + if wait_in: + waiter = get_waiter(route53, 'resource_record_sets_changed') + waiter.wait( + Id=change_resource_record_sets['ChangeInfo']['Id'], + WaiterConfig=dict( + Delay=WAIT_RETRY, + MaxAttempts=wait_timeout_in // WAIT_RETRY, + ) + ) + except is_boto3_error_message('but it already exists'): + module.exit_json(changed=False) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg='Timeout waiting for resource records changes to be applied') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to update records') + except Exception as e: + module.fail_json(msg='Unhandled exception. (%s)' % to_native(e)) + + rr_sets = [camel_dict_to_snake_dict(resource_record_set)] + formatted_aws = format_record(aws_record, zone_in, zone_id) + formatted_record = format_record(resource_record_set, zone_in, zone_id) + + module.exit_json( + changed=True, + diff=dict( + before=formatted_aws, + after=formatted_record if command_in != 'delete' else {}, + resource_record_sets=rr_sets, + ), + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py b/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py new file mode 100644 index 000000000..1528be9ae --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py @@ -0,0 +1,672 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: route53_health_check +version_added: 5.0.0 +short_description: Manage health-checks in Amazons Route53 DNS service +description: + - Creates and deletes DNS Health checks in Amazons Route53 service. + - Only the port, resource_path, string_match and request_interval are + considered when updating existing health-checks. + - This module was originally added to C(community.aws) in release 1.0.0. +options: + state: + description: + - Specifies the action to take. + choices: [ 'present', 'absent' ] + type: str + default: 'present' + disabled: + description: + - Stops Route 53 from performing health checks. + - See the AWS documentation for more details on the exact implications. + U(https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/health-checks-creating-values.html) + - Defaults to C(true) when creating a new health check. + type: bool + version_added: 2.1.0 + version_added_collection: community.aws + ip_address: + description: + - IP address of the end-point to check. Either this or I(fqdn) has to be provided. + - IP addresses must be publicly routable. + type: str + port: + description: + - The port on the endpoint on which you want Amazon Route 53 to perform + health checks. Required for TCP checks. + type: int + type: + description: + - The type of health check that you want to create, which indicates how + Amazon Route 53 determines whether an endpoint is healthy. + - Once health_check is created, type can not be changed. + choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP' ] + type: str + resource_path: + description: + - The path that you want Amazon Route 53 to request when performing + health checks. The path can be any value for which your endpoint will + return an HTTP status code of 2xx or 3xx when the endpoint is healthy, + for example the file /docs/route53-health-check.html. + - Mutually exclusive with I(type='TCP'). + - The path must begin with a / + - Maximum 255 characters. + type: str + fqdn: + description: + - Domain name of the endpoint to check. Either this or I(ip_address) has + to be provided. When both are given the I(fqdn) is used in the C(Host:) + header of the HTTP request. + type: str + string_match: + description: + - If the check type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string + that you want Amazon Route 53 to search for in the response body from + the specified resource. If the string appears in the first 5120 bytes + of the response body, Amazon Route 53 considers the resource healthy. + type: str + request_interval: + description: + - The number of seconds between the time that Amazon Route 53 gets a + response from your endpoint and the time that it sends the next + health-check request. + default: 30 + choices: [ 10, 30 ] + type: int + failure_threshold: + description: + - The number of consecutive health checks that an endpoint must pass or + fail for Amazon Route 53 to change the current status of the endpoint + from unhealthy to healthy or vice versa. + - Will default to C(3) if not specified on creation. + choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ] + type: int + health_check_name: + description: + - Name of the Health Check. + - Used together with I(use_unique_names) to set/make use of I(health_check_name) as a unique identifier. + type: str + required: False + aliases: ['name'] + version_added: 4.1.0 + version_added_collection: community.aws + use_unique_names: + description: + - Used together with I(health_check_name) to set/make use of I(health_check_name) as a unique identifier. + type: bool + required: False + version_added: 4.1.0 + version_added_collection: community.aws + health_check_id: + description: + - ID of the health check to be update or deleted. + - If provided, a health check can be updated or deleted based on the ID as unique identifier. + type: str + required: False + aliases: ['id'] + version_added: 4.1.0 + version_added_collection: community.aws + measure_latency: + description: + - To enable/disable latency graphs to monitor the latency between health checkers in multiple Amazon Web Services regions and your endpoint. + - Value of I(measure_latency) is immutable and can not be modified after creating a health check. + See U(https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/monitoring-health-check-latency.html) + type: bool + required: False + version_added: 5.4.0 +author: + - "zimbatm (@zimbatm)" +notes: + - Support for I(tags) and I(purge_tags) was added in release 2.1.0. +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +''' + +EXAMPLES = ''' +- name: Create a health-check for host1.example.com and use it in record + amazon.aws.route53_health_check: + state: present + fqdn: host1.example.com + type: HTTP_STR_MATCH + resource_path: / + string_match: "Hello" + request_interval: 10 + failure_threshold: 2 + register: my_health_check + +- amazon.aws.route53: + action: create + zone: "example.com" + type: CNAME + record: "www.example.com" + value: host1.example.com + ttl: 30 + # Routing policy + identifier: "host1@www" + weight: 100 + health_check: "{{ my_health_check.health_check.id }}" + +- name: create a simple health check with health_check_name as unique identifier + amazon.aws.route53_health_check: + state: present + health_check_name: ansible + fqdn: ansible.com + port: 443 + type: HTTPS + use_unique_names: true + +- name: create a TCP health check with latency graphs enabled + amazon.aws.route53_health_check: + state: present + health_check_name: ansible + fqdn: ansible.com + port: 443 + type: HTTPS + use_unique_names: true + measure_latency: true + +- name: Delete health-check + amazon.aws.route53_health_check: + state: absent + fqdn: host1.example.com + +- name: Update Health check by ID - update ip_address + amazon.aws.route53_health_check: + id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx + ip_address: 1.2.3.4 + +- name: Update Health check by ID - update port + amazon.aws.route53_health_check: + id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx + ip_address: 8080 + +- name: Delete Health check by ID + amazon.aws.route53_health_check: + state: absent + id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx + +''' + +RETURN = r''' +health_check: + description: Information about the health check. + returned: success + type: dict + contains: + action: + description: The action performed by the module. + type: str + returned: When a change is or would be made. + sample: 'updated' + id: + description: The Unique ID assigned by AWS to the health check. + type: str + returned: When the health check exists. + sample: 50ec8a13-9623-4c66-9834-dd8c5aedc9ba + health_check_version: + description: The version number of the health check. + type: int + returned: When the health check exists. + sample: 14 + health_check_config: + description: + - Detailed information about the health check. + - May contain additional values from Route 53 health check + features not yet supported by this module. + type: dict + returned: When the health check exists. + contains: + type: + description: The type of the health check. + type: str + returned: When the health check exists. + sample: 'HTTPS_STR_MATCH' + failure_threshold: + description: + - The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to + change the current status of the endpoint from unhealthy to healthy or vice versa. + type: int + returned: When the health check exists. + sample: 3 + fully_qualified_domain_name: + description: The FQDN configured for the health check to test. + type: str + returned: When the health check exists and an FQDN is configured. + sample: 'updated' + ip_address: + description: The IPv4 or IPv6 IP address of the endpoint to be queried. + type: str + returned: When the health check exists and a specific IP address is configured. + sample: '' + port: + description: The port on the endpoint that the health check will query. + type: str + returned: When the health check exists. + sample: 'updated' + request_interval: + description: The number of seconds between health check queries. + type: int + returned: When the health check exists. + sample: 30 + resource_path: + description: The URI path to query when performing an HTTP/HTTPS based health check. + type: str + returned: When the health check exists and a resource path has been configured. + sample: '/healthz' + search_string: + description: A string that must be present in the response for a health check to be considered successful. + type: str + returned: When the health check exists and a search string has been configured. + sample: 'ALIVE' + disabled: + description: Whether the health check has been disabled or not. + type: bool + returned: When the health check exists. + sample: false + tags: + description: A dictionary representing the tags on the health check. + type: dict + returned: When the health check exists. + sample: '{"my_key": "my_value"}' +''' + +import uuid + +try: + import botocore +except ImportError: + pass # Handled by HAS_BOTO + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.route53 import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.route53 import manage_tags + + +def _list_health_checks(**params): + try: + results = client.list_health_checks(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to list health checks') + return results + + +def find_health_check(ip_addr, fqdn, hc_type, request_interval, port): + """Searches for health checks that have the exact same set of immutable values""" + + # In lieu of an Id we perform matches against the following values: + # - ip_addr + # - fqdn + # - type (immutable) + # - request_interval + # - port + + # Because the list and route53 provides no 'filter' mechanism, + # the using a paginator would result in (on average) double the + # number of API calls and can get really slow. + # Additionally, we can't properly wrap the paginator, so retrying means + # starting from scratch with a paginator + results = _list_health_checks() + while True: + for check in results.get('HealthChecks'): + config = check.get('HealthCheckConfig') + if ( + config.get('IPAddress', None) == ip_addr and + config.get('FullyQualifiedDomainName', None) == fqdn and + config.get('Type') == hc_type and + config.get('RequestInterval') == request_interval and + config.get('Port', None) == port + ): + return check + + if results.get('IsTruncated', False): + results = _list_health_checks(Marker=results.get('NextMarker')) + else: + return None + + +def get_existing_checks_with_name(): + results = _list_health_checks() + health_checks_with_name = {} + while True: + for check in results.get('HealthChecks'): + if 'Name' in describe_health_check(check['Id'])['tags']: + check_name = describe_health_check(check['Id'])['tags']['Name'] + health_checks_with_name[check_name] = check + if results.get('IsTruncated', False): + results = _list_health_checks(Marker=results.get('NextMarker')) + else: + return health_checks_with_name + + +def delete_health_check(check_id): + if not check_id: + return False, None + + if module.check_mode: + return True, 'delete' + + try: + client.delete_health_check( + aws_retry=True, + HealthCheckId=check_id, + ) + except is_boto3_error_code('NoSuchHealthCheck'): + # Handle the deletion race condition as cleanly as possible + return False, None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg='Failed to list health checks') + + return True, 'delete' + + +def create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in): + + # In general, if a request is repeated with the same CallerRef it won't + # result in a duplicate check appearing. This means we can safely use our + # retry decorators + caller_ref = str(uuid.uuid4()) + missing_args = [] + + health_check = dict( + Type=type_in, + RequestInterval=request_interval_in, + Port=port_in, + ) + if module.params.get('disabled') is not None: + health_check['Disabled'] = module.params.get('disabled') + if ip_addr_in: + health_check['IPAddress'] = ip_addr_in + if fqdn_in: + health_check['FullyQualifiedDomainName'] = fqdn_in + + if type_in in ['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: + resource_path = module.params.get('resource_path') + # if not resource_path: + # missing_args.append('resource_path') + if resource_path: + health_check['ResourcePath'] = resource_path + if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: + string_match = module.params.get('string_match') + if not string_match: + missing_args.append('string_match') + health_check['SearchString'] = module.params.get('string_match') + + failure_threshold = module.params.get('failure_threshold') + if not failure_threshold: + failure_threshold = 3 + health_check['FailureThreshold'] = failure_threshold + + if module.params.get('measure_latency') is not None: + health_check['MeasureLatency'] = module.params.get('measure_latency') + + if missing_args: + module.fail_json(msg='missing required arguments for creation: {0}'.format( + ', '.join(missing_args)), + ) + + if module.check_mode: + return True, 'create', None + + try: + result = client.create_health_check( + aws_retry=True, + CallerReference=caller_ref, + HealthCheckConfig=health_check, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg='Failed to create health check.', health_check=health_check) + + check_id = result.get('HealthCheck').get('Id') + return True, 'create', check_id + + +def update_health_check(existing_check): + # It's possible to update following parameters + # - ResourcePath + # - SearchString + # - FailureThreshold + # - Disabled + # - IPAddress + # - Port + # - FullyQualifiedDomainName + + changes = dict() + existing_config = existing_check.get('HealthCheckConfig') + + resource_path = module.params.get('resource_path', None) + if resource_path and resource_path != existing_config.get('ResourcePath'): + changes['ResourcePath'] = resource_path + + search_string = module.params.get('string_match', None) + if search_string and search_string != existing_config.get('SearchString'): + changes['SearchString'] = search_string + + failure_threshold = module.params.get('failure_threshold', None) + if failure_threshold and failure_threshold != existing_config.get('FailureThreshold'): + changes['FailureThreshold'] = failure_threshold + + disabled = module.params.get('disabled', None) + if disabled is not None and disabled != existing_config.get('Disabled'): + changes['Disabled'] = module.params.get('disabled') + + # If updating based on Health Check ID or health_check_name, we can update + if module.params.get('health_check_id') or module.params.get('use_unique_names'): + ip_address = module.params.get('ip_address', None) + if ip_address is not None and ip_address != existing_config.get('IPAddress'): + changes['IPAddress'] = module.params.get('ip_address') + + port = module.params.get('port', None) + if port is not None and port != existing_config.get('Port'): + changes['Port'] = module.params.get('port') + + fqdn = module.params.get('fqdn', None) + if fqdn is not None and fqdn != existing_config.get('FullyQualifiedDomainName'): + changes['FullyQualifiedDomainName'] = module.params.get('fqdn') + + # No changes... + if not changes: + return False, None + if module.check_mode: + return True, 'update' + + check_id = existing_check.get('Id') + # This makes sure we're starting from the version we think we are... + version_id = existing_check.get('HealthCheckVersion', 1) + try: + client.update_health_check( + HealthCheckId=check_id, + HealthCheckVersion=version_id, + **changes, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg='Failed to update health check.', id=check_id) + + return True, 'update' + + +def describe_health_check(id): + if not id: + return dict() + + try: + result = client.get_health_check( + aws_retry=True, + HealthCheckId=id, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg='Failed to get health check.', id=id) + + health_check = result.get('HealthCheck', {}) + health_check = camel_dict_to_snake_dict(health_check) + tags = get_tags(module, client, 'healthcheck', id) + health_check['tags'] = tags + return health_check + + +def main(): + argument_spec = dict( + state=dict(choices=['present', 'absent'], default='present'), + disabled=dict(type='bool'), + ip_address=dict(), + port=dict(type='int'), + type=dict(choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']), + resource_path=dict(), + fqdn=dict(), + string_match=dict(), + request_interval=dict(type='int', choices=[10, 30], default=30), + failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + health_check_id=dict(type='str', aliases=['id'], required=False), + health_check_name=dict(type='str', aliases=['name'], required=False), + use_unique_names=dict(type='bool', required=False), + measure_latency=dict(type='bool', required=False), + ) + + args_one_of = [ + ['ip_address', 'fqdn', 'health_check_id'], + ] + + args_if = [ + ['type', 'TCP', ('port',)], + ] + + args_required_together = [ + ['use_unique_names', 'health_check_name'], + ] + + args_mutually_exclusive = [ + ['health_check_id', 'health_check_name'] + ] + + global module + global client + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_one_of=args_one_of, + required_if=args_if, + required_together=args_required_together, + mutually_exclusive=args_mutually_exclusive, + supports_check_mode=True, + ) + + if not module.params.get('health_check_id') and not module.params.get('type'): + module.fail_json(msg="parameter 'type' is required if not updating or deleting health check by ID.") + + state_in = module.params.get('state') + ip_addr_in = module.params.get('ip_address') + port_in = module.params.get('port') + type_in = module.params.get('type') + resource_path_in = module.params.get('resource_path') + fqdn_in = module.params.get('fqdn') + string_match_in = module.params.get('string_match') + request_interval_in = module.params.get('request_interval') + failure_threshold_in = module.params.get('failure_threshold') + health_check_name = module.params.get('health_check_name') + tags = module.params.get('tags') + + # Default port + if port_in is None: + if type_in in ['HTTP', 'HTTP_STR_MATCH']: + port_in = 80 + elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']: + port_in = 443 + + if string_match_in: + if type_in not in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: + module.fail_json(msg="parameter 'string_match' argument is only for the HTTP(S)_STR_MATCH types") + if len(string_match_in) > 255: + module.fail_json(msg="parameter 'string_match' is limited to 255 characters max") + + client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) + + changed = False + action = None + check_id = None + + if module.params.get('use_unique_names') or module.params.get('health_check_id'): + module.deprecate( + 'The health_check_name is currently non required parameter.' + ' This behavior will change and health_check_name ' + ' will change to required=True and use_unique_names will change to default=True in release 6.0.0.', + version='6.0.0', collection_name='amazon.aws') + + # If update or delete Health Check based on ID + update_delete_by_id = False + if module.params.get('health_check_id'): + update_delete_by_id = True + id_to_update_delete = module.params.get('health_check_id') + try: + existing_check = client.get_health_check(HealthCheckId=id_to_update_delete)['HealthCheck'] + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.exit_json(changed=False, msg='The specified health check with ID: {0} does not exist'.format(id_to_update_delete)) + else: + existing_check = find_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) + if existing_check: + check_id = existing_check.get('Id') + + # Delete Health Check + if state_in == 'absent': + if update_delete_by_id: + changed, action = delete_health_check(id_to_update_delete) + else: + changed, action = delete_health_check(check_id) + check_id = None + + # Create Health Check + elif state_in == 'present': + if existing_check is None and not module.params.get('use_unique_names') and not update_delete_by_id: + changed, action, check_id = create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) + + # Update Health Check + else: + # If health_check_name is a unique identifier + if module.params.get('use_unique_names'): + existing_checks_with_name = get_existing_checks_with_name() + # update the health_check if another health check with same name exists + if health_check_name in existing_checks_with_name: + changed, action = update_health_check(existing_checks_with_name[health_check_name]) + else: + # create a new health_check if another health check with same name does not exists + changed, action, check_id = create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) + # Add tag to add name to health check + if check_id: + if not tags: + tags = {} + tags['Name'] = health_check_name + + else: + if update_delete_by_id: + changed, action = update_health_check(existing_check) + else: + changed, action = update_health_check(existing_check) + + if check_id: + changed |= manage_tags(module, client, 'healthcheck', check_id, + tags, module.params.get('purge_tags')) + + health_check = describe_health_check(id=check_id) + health_check['action'] = action + module.exit_json( + changed=changed, + health_check=health_check, + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/route53_info.py b/ansible_collections/amazon/aws/plugins/modules/route53_info.py new file mode 100644 index 000000000..0342aef6f --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/route53_info.py @@ -0,0 +1,874 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +module: route53_info +short_description: Retrieves route53 details using AWS methods +version_added: 5.0.0 +description: + - Gets various details related to Route53 zone, record set or health check details. + - This module was originally added to C(community.aws) in release 1.0.0. +options: + query: + description: + - Specifies the query action to take. + required: True + choices: [ + 'change', + 'checker_ip_range', + 'health_check', + 'hosted_zone', + 'record_sets', + 'reusable_delegation_set', + ] + type: str + change_id: + description: + - The ID of the change batch request. + - The value that you specify here is the value that + ChangeResourceRecordSets returned in the Id element + when you submitted the request. + - Required if I(query=change). + required: false + type: str + hosted_zone_id: + description: + - The Hosted Zone ID of the DNS zone. + - Required if I(query) is set to I(hosted_zone) and I(hosted_zone_method) is set to I(details). + - Required if I(query) is set to I(record_sets). + required: false + type: str + max_items: + description: + - Maximum number of items to return for various get/list requests. + required: false + type: int + next_marker: + description: + - "Some requests such as list_command: hosted_zones will return a maximum + number of entries - EG 100 or the number specified by I(max_items). + If the number of entries exceeds this maximum another request can be sent + using the NextMarker entry from the first response to get the next page + of results." + required: false + type: str + delegation_set_id: + description: + - The DNS Zone delegation set ID. + required: false + type: str + start_record_name: + description: + - "The first name in the lexicographic ordering of domain names that you want + the list_command: record_sets to start listing from." + required: false + type: str + type: + description: + - The type of DNS record. + required: false + choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'NAPTR', 'SOA', 'DS' ] + type: str + dns_name: + description: + - The first name in the lexicographic ordering of domain names that you want + the list_command to start listing from. + required: false + type: str + resource_id: + description: + - The ID/s of the specified resource/s. + - Required if I(query=health_check) and I(health_check_method=tags). + - Required if I(query=hosted_zone) and I(hosted_zone_method=tags). + required: false + aliases: ['resource_ids'] + type: list + elements: str + health_check_id: + description: + - The ID of the health check. + - Required if C(query) is set to C(health_check) and + C(health_check_method) is set to C(details) or C(status) or C(failure_reason). + required: false + type: str + hosted_zone_method: + description: + - "This is used in conjunction with query: hosted_zone. + It allows for listing details, counts or tags of various + hosted zone details." + required: false + choices: [ + 'details', + 'list', + 'list_by_name', + 'count', + 'tags', + ] + default: 'list' + type: str + health_check_method: + description: + - "This is used in conjunction with query: health_check. + It allows for listing details, counts or tags of various + health check details." + required: false + choices: [ + 'list', + 'details', + 'status', + 'failure_reason', + 'count', + 'tags', + ] + default: 'list' + type: str +author: + - Karen Cheng (@Etherdaemon) +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.boto3 + +''' + +EXAMPLES = r''' +# Simple example of listing all hosted zones +- name: List all hosted zones + amazon.aws.route53_info: + query: hosted_zone + register: hosted_zones + +# Getting a count of hosted zones +- name: Return a count of all hosted zones + amazon.aws.route53_info: + query: hosted_zone + hosted_zone_method: count + register: hosted_zone_count + +- name: List the first 20 resource record sets in a given hosted zone + amazon.aws.route53_info: + profile: account_name + query: record_sets + hosted_zone_id: ZZZ1111112222 + max_items: 20 + register: record_sets + +- name: List first 20 health checks + amazon.aws.route53_info: + query: health_check + health_check_method: list + max_items: 20 + register: health_checks + +- name: Get health check last failure_reason + amazon.aws.route53_info: + query: health_check + health_check_method: failure_reason + health_check_id: 00000000-1111-2222-3333-12345678abcd + register: health_check_failure_reason + +- name: Retrieve reusable delegation set details + amazon.aws.route53_info: + query: reusable_delegation_set + delegation_set_id: delegation id + register: delegation_sets + +- name: setup of example for using next_marker + amazon.aws.route53_info: + query: hosted_zone + max_items: 1 + register: first_info + +- name: example for using next_marker + amazon.aws.route53_info: + query: hosted_zone + next_marker: "{{ first_info.NextMarker }}" + max_items: 1 + when: "{{ 'NextMarker' in first_info }}" + +- name: retrieve host entries starting with host1.workshop.test.io + block: + - name: grab zone id + amazon.aws.route53_zone: + zone: "test.io" + register: AWSINFO + + - name: grab Route53 record information + amazon.aws.route53_info: + type: A + query: record_sets + hosted_zone_id: "{{ AWSINFO.zone_id }}" + start_record_name: "host1.workshop.test.io" + register: RECORDS +''' + +RETURN = r''' +resource_record_sets: + description: A list of resource record sets returned by list_resource_record_sets in boto3. + returned: when I(query=record_sets) + type: list + elements: dict + contains: + name: + description: The name of a record in the specified hosted zone. + type: str + sample: 'www.example.com' + type: + description: The DNS record type. + type: str + sample: 'A' + ttl: + description: The resource record cache time to live (TTL), in seconds. + type: int + sample: 60 + set_identifier: + description: An identifier that differentiates among multiple resource record sets that have the same combination of name and type. + type: str + sample: 'abcd' + resource_records: + description: Information about the resource records. + type: list + elements: dict + contains: + value: + description: The current or new DNS record value. + type: str + sample: 'ns-12.awsdns-34.com.' + geo_location: + description: The specified geographic location for which the Route53 responds to based on location. + type: dict + elements: str + contains: + continent_code: + description: The two-letter code for the continent. + type: str + sample: 'NA' + country_code: + description: The two-letter code for a country. + type: str + sample: 'US' + subdivision_code: + description: The two-letter code for a state of the United States + type: str + sample: 'NY' + version_added: 4.0.0 + version_added_collection: community.aws +hosted_zones: + description: A list of hosted zones returned by list_hosted_zones in boto3. + returned: when I(query=hosted_zone) + type: list + elements: dict + contains: + id: + description: The ID of the hosted zone assigned by Amazon Route53 to the hosted zone at the creation time. + type: str + sample: '/hostedzone/Z01234567AB1234567890' + name: + description: The name of the domain. + type: str + sample: 'example.io' + resource_record_set_count: + description: The number of resource record sets in the hosted zone. + type: int + sample: 3 + caller_reference: + description: The value specified for CallerReference at the time of hosted zone creation. + type: str + sample: '01d0db12-x0x9-12a3-1234-0z000z00zz0z' + config: + description: A dict that contains Comment and PrivateZone elements. + type: dict + contains: + comment: + description: Any comments that included about in the hosted zone. + type: str + sample: 'HostedZone created by Route53 Registrar' + private_zone: + description: A value that indicates whether this is a private hosted zone or not. + type: bool + sample: false + version_added: 4.0.0 + version_added_collection: community.aws +health_checks: + description: A list of Route53 health checks returned by list_health_checks in boto3. + type: list + elements: dict + returned: when I(query=health_check) + contains: + id: + description: The identifier that Amazon Route53 assigned to the health check at the time of creation. + type: str + sample: '12345cdc-2cc4-1234-bed2-123456abc1a2' + health_check_version: + description: The version of the health check. + type: str + sample: 1 + caller_reference: + description: A unique string that you specified when you created the health check. + type: str + sample: '01d0db12-x0x9-12a3-1234-0z000z00zz0z' + health_check_config: + description: A dict that contains detailed information about one health check. + type: dict + contains: + disabled: + description: Whether Route53 should stop performing health checks on a endpoint. + type: bool + sample: false + enable_sni: + description: Whether Route53 should send value of FullyQualifiedDomainName to endpoint in client_hello message during TLS negotiation. + type: bool + sample: true + failure_threshold: + description: The number of consecutive health checks that an endpoint must pass/fail for Route53 to change current status of endpoint. + type: int + sample: 3 + fully_qualified_domain_name: + description: The fully qualified DNS name of the endpoint on which Route53 performs health checks. + type: str + sample: 'hello' + inverted: + description: Whether Route53 should invert the status of a health check. + type: bool + sample: false + ip_address: + description: The IPv4/IPv6 IP address of the endpoint that Route53 should perform health checks on. + type: str + sample: 192.0.2.44 + measure_latency: + description: Whether Route53 should measure latency between health checkers in multiple AWS regions and the endpoint. + type: bool + sample: false + port: + description: The port of the endpoint that Route53 should perform health checks on. + type: int + sample: 80 + request_interval: + description: The number of seconds between the time that Route53 gets a response from endpoint and the next health check request. + type: int + sample: 30 + resource_path: + description: The path that Route53 requests when performing health checks. + type: str + sample: '/welcome.html' + search_string: + description: The string that Route53 uses to search for in the response body from specified resource. + type: str + sample: 'test-string-to-match' + type: + description: The type of the health check. + type: str + sample: HTTPS + version_added: 4.0.0 + version_added_collection: community.aws +checker_ip_ranges: + description: A list of IP ranges in CIDR format for Amazon Route 53 health checkers. + returned: when I(query=checker_ip_range) + type: list + elements: str + version_added: 4.1.0 + version_added_collection: community.aws +delegation_sets: + description: A list of dicts that contains information about the reusable delegation set. + returned: when I(query=reusable_delegation_set) + type: list + elements: dict + version_added: 4.1.0 + version_added_collection: community.aws +health_check: + description: A dict of Route53 health check details returned by get_health_check in boto3. + type: dict + returned: when I(query=health_check) and I(health_check_method=details) + contains: + id: + description: The identifier that Amazon Route53 assigned to the health check at the time of creation. + type: str + sample: '12345cdc-2cc4-1234-bed2-123456abc1a2' + health_check_version: + description: The version of the health check. + type: str + sample: 1 + caller_reference: + description: A unique string that you specified when you created the health check. + type: str + sample: '01d0db12-x0x9-12a3-1234-0z000z00zz0z' + health_check_config: + description: A dict that contains detailed information about one health check. + type: dict + contains: + disabled: + description: Whether Route53 should stop performing health checks on a endpoint. + type: bool + sample: false + enable_sni: + description: Whether Route53 should send value of FullyQualifiedDomainName to endpoint in client_hello message during TLS negotiation. + type: bool + sample: true + failure_threshold: + description: The number of consecutive health checks that an endpoint must pass/fail for Route53 to change current status of endpoint. + type: int + sample: 3 + fully_qualified_domain_name: + description: The fully qualified DNS name of the endpoint on which Route53 performs health checks. + type: str + sample: 'hello' + inverted: + description: Whether Route53 should invert the status of a health check. + type: bool + sample: false + ip_address: + description: The IPv4/IPv6 IP address of the endpoint that Route53 should perform health checks on. + type: str + sample: 192.0.2.44 + measure_latency: + description: Whether Route53 should measure latency between health checkers in multiple AWS regions and the endpoint. + type: bool + sample: false + port: + description: The port of the endpoint that Route53 should perform health checks on. + type: int + sample: 80 + request_interval: + description: The number of seconds between the time that Route53 gets a response from endpoint and the next health check request. + type: int + sample: 30 + resource_path: + description: The path that Route53 requests when performing health checks. + type: str + sample: '/welcome.html' + search_string: + description: The string that Route53 uses to search for in the response body from specified resource. + type: str + sample: 'test-string-to-match' + type: + description: The type of the health check. + type: str + sample: HTTPS + version_added: 4.1.0 + version_added_collection: community.aws +health_check_observations: + description: A dict of Route53 health check details returned by get_health_check_status and get_health_check_last_failure_reason in boto3. + type: list + elements: dict + returned: when I(query=health_check) and I(health_check_method=status) or I(health_check_method=failure_reason) + contains: + ip_address: + description: The IP address of the Amazon Route 53 health checker that provided the failure reason in StatusReport. + type: str + sample: '12.345.67.89' + region: + description: The region of the Amazon Route 53 health checker that provided the status in StatusReport. + type: str + sample: 'us-west-1' + status_report: + description: A complex type that contains the last failure reason and the time of the failed health check. + type: dict + contains: + checked_time: + description: The date and time that the health checker performed the health check in ISO 8601 format and Coordinated Universal Time (UTC). + type: str + sample: '2023-03-08T23:10:08.452000+00:00' + status: + description: A description of the status of the health check endpoint as reported by one of the Amazon Route 53 health checkers. + type: str + sample: 'Failure: Resolved IP: 12.345.67.89. The connection was closed by the endpoint.' + version_added: 5.4.0 +ResourceRecordSets: + description: A deprecated CamelCased list of resource record sets returned by list_resource_record_sets in boto3. \ + This list contains same elements/parameters as it's snake_cased version mentioned above. \ + This field is deprecated and will be removed in 6.0.0 version release. + returned: when I(query=record_sets) + type: list + elements: dict +HostedZones: + description: A deprecated CamelCased list of hosted zones returned by list_hosted_zones in boto3. \ + This list contains same elements/parameters as it's snake_cased version mentioned above. \ + This field is deprecated and will be removed in 6.0.0 version release. + returned: when I(query=hosted_zone) + type: list + elements: dict +HealthChecks: + description: A deprecated CamelCased list of Route53 health checks returned by list_health_checks in boto3. \ + This list contains same elements/parameters as it's snake_cased version mentioned above. \ + This field is deprecated and will be removed in 6.0.0 version release. + type: list + elements: dict + returned: when I(query=health_check) +CheckerIpRanges: + description: A deprecated CamelCased list of IP ranges in CIDR format for Amazon Route 53 health checkers.\ + This list contains same elements/parameters as it's snake_cased version mentioned abobe. \ + This field is deprecated and will be removed in 6.0.0 version release. + type: list + elements: str + returned: when I(query=checker_ip_range) +DelegationSets: + description: A deprecated CamelCased list of dicts that contains information about the reusable delegation set. \ + This list contains same elements/parameters as it's snake_cased version mentioned above. \ + This field is deprecated and will be removed in 6.0.0 version release. + type: list + elements: dict + returned: when I(query=reusable_delegation_set) +HealthCheck: + description: A deprecated CamelCased dict of Route53 health check details returned by get_health_check in boto3. \ + This dict contains same elements/parameters as it's snake_cased version mentioned above. \ + This field is deprecated and will be removed in 6.0.0 version release. + type: dict + returned: when I(query=health_check) and I(health_check_method=details) +''' + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils._text import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict + + +# Split out paginator to allow for the backoff decorator to function +@AWSRetry.jittered_backoff() +def _paginated_result(paginator_name, **params): + paginator = client.get_paginator(paginator_name) + return paginator.paginate(**params).build_full_result() + + +def get_hosted_zone(): + params = dict() + + if module.params.get('hosted_zone_id'): + params['Id'] = module.params.get('hosted_zone_id') + else: + module.fail_json(msg="Hosted Zone Id is required") + + return client.get_hosted_zone(**params) + + +def reusable_delegation_set_details(): + params = dict() + + if not module.params.get('delegation_set_id'): + if module.params.get('max_items'): + params['MaxItems'] = str(module.params.get('max_items')) + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + results = client.list_reusable_delegation_sets(**params) + else: + params['DelegationSetId'] = module.params.get('delegation_set_id') + results = client.get_reusable_delegation_set(**params) + + results['delegation_sets'] = results['DelegationSets'] + module.deprecate("The 'CamelCase' return values with key 'DelegationSets' is deprecated and \ + will be replaced by 'snake_case' return values with key 'delegation_sets'. \ + Both case values are returned for now.", + date='2025-01-01', collection_name='amazon.aws') + + return results + + +def list_hosted_zones(): + params = dict() + + # Set PaginationConfig with max_items + if module.params.get('max_items'): + params['PaginationConfig'] = dict( + MaxItems=module.params.get('max_items') + ) + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + if module.params.get('delegation_set_id'): + params['DelegationSetId'] = module.params.get('delegation_set_id') + + zones = _paginated_result('list_hosted_zones', **params)['HostedZones'] + snaked_zones = [camel_dict_to_snake_dict(zone) for zone in zones] + + module.deprecate("The 'CamelCase' return values with key 'HostedZones' and 'list' are deprecated and \ + will be replaced by 'snake_case' return values with key 'hosted_zones'. \ + Both case values are returned for now.", + date='2025-01-01', collection_name='amazon.aws') + + return { + "HostedZones": zones, + "list": zones, + "hosted_zones": snaked_zones, + } + + +def list_hosted_zones_by_name(): + params = dict() + + if module.params.get('hosted_zone_id'): + params['HostedZoneId'] = module.params.get('hosted_zone_id') + + if module.params.get('dns_name'): + params['DNSName'] = module.params.get('dns_name') + + if module.params.get('max_items'): + params['MaxItems'] = str(module.params.get('max_items')) + + return client.list_hosted_zones_by_name(**params) + + +def change_details(): + params = dict() + + if module.params.get('change_id'): + params['Id'] = module.params.get('change_id') + else: + module.fail_json(msg="change_id is required") + + results = client.get_change(**params) + return results + + +def checker_ip_range_details(): + results = client.get_checker_ip_ranges() + results['checker_ip_ranges'] = results['CheckerIpRanges'] + module.deprecate("The 'CamelCase' return values with key 'CheckerIpRanges' is deprecated and \ + will be replaced by 'snake_case' return values with key 'checker_ip_ranges'. \ + Both case values are returned for now.", + date='2025-01-01', collection_name='amazon.aws') + + return results + + +def get_count(): + if module.params.get('query') == 'health_check': + results = client.get_health_check_count() + else: + results = client.get_hosted_zone_count() + + return results + + +def get_health_check(): + params = dict() + results = dict() + + if not module.params.get('health_check_id'): + module.fail_json(msg="health_check_id is required") + else: + params['HealthCheckId'] = module.params.get('health_check_id') + + if module.params.get('health_check_method') == 'details': + results = client.get_health_check(**params) + results["health_check"] = camel_dict_to_snake_dict(results["HealthCheck"]) + module.deprecate( + "The 'CamelCase' return values with key 'HealthCheck' is deprecated \ + and will be replaced by 'snake_case' return values with key 'health_check'. \ + Both case values are returned for now.", + date="2025-01-01", + collection_name="amazon.aws", + ) + + elif module.params.get('health_check_method') == 'failure_reason': + response = client.get_health_check_last_failure_reason(**params) + results["health_check_observations"] = [ + camel_dict_to_snake_dict(health_check) for health_check in response["HealthCheckObservations"] + ] + + elif module.params.get('health_check_method') == 'status': + response = client.get_health_check_status(**params) + results["health_check_observations"] = [ + camel_dict_to_snake_dict(health_check) for health_check in response["HealthCheckObservations"] + ] + + return results + + +def get_resource_tags(): + params = dict() + + if module.params.get('resource_id'): + params['ResourceIds'] = module.params.get('resource_id') + else: + module.fail_json(msg="resource_id or resource_ids is required") + + if module.params.get('query') == 'health_check': + params['ResourceType'] = 'healthcheck' + else: + params['ResourceType'] = 'hostedzone' + + return client.list_tags_for_resources(**params) + + +def list_health_checks(): + params = dict() + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + # Set PaginationConfig with max_items + if module.params.get('max_items'): + params['PaginationConfig'] = dict( + MaxItems=module.params.get('max_items') + ) + + health_checks = _paginated_result('list_health_checks', **params)['HealthChecks'] + snaked_health_checks = [camel_dict_to_snake_dict(health_check) for health_check in health_checks] + + module.deprecate("The 'CamelCase' return values with key 'HealthChecks' and 'list' are deprecated and \ + will be replaced by 'snake_case' return values with key 'health_checks'. \ + Both case values are returned for now.", + date='2025-01-01', collection_name='amazon.aws') + + return { + "HealthChecks": health_checks, + "list": health_checks, + "health_checks": snaked_health_checks, + } + + +def record_sets_details(): + params = dict() + + if module.params.get('hosted_zone_id'): + params['HostedZoneId'] = module.params.get('hosted_zone_id') + else: + module.fail_json(msg="Hosted Zone Id is required") + + if module.params.get('start_record_name'): + params['StartRecordName'] = module.params.get('start_record_name') + + # Check that both params are set if type is applied + if module.params.get('type') and not module.params.get('start_record_name'): + module.fail_json(msg="start_record_name must be specified if type is set") + + if module.params.get('type'): + params['StartRecordType'] = module.params.get('type') + + # Set PaginationConfig with max_items + if module.params.get('max_items'): + params['PaginationConfig'] = dict( + MaxItems=module.params.get('max_items') + ) + + record_sets = _paginated_result('list_resource_record_sets', **params)['ResourceRecordSets'] + snaked_record_sets = [camel_dict_to_snake_dict(record_set) for record_set in record_sets] + + module.deprecate("The 'CamelCase' return values with key 'ResourceRecordSets' and 'list' are deprecated and \ + will be replaced by 'snake_case' return values with key 'resource_record_sets'. \ + Both case values are returned for now.", + date='2025-01-01', collection_name='amazon.aws') + + return { + "ResourceRecordSets": record_sets, + "list": record_sets, + "resource_record_sets": snaked_record_sets, + } + + +def health_check_details(): + health_check_invocations = { + 'list': list_health_checks, + 'details': get_health_check, + 'status': get_health_check, + 'failure_reason': get_health_check, + 'count': get_count, + 'tags': get_resource_tags, + } + + results = health_check_invocations[module.params.get('health_check_method')]() + return results + + +def hosted_zone_details(): + hosted_zone_invocations = { + 'details': get_hosted_zone, + 'list': list_hosted_zones, + 'list_by_name': list_hosted_zones_by_name, + 'count': get_count, + 'tags': get_resource_tags, + } + + results = hosted_zone_invocations[module.params.get('hosted_zone_method')]() + return results + + +def main(): + global module + global client + + argument_spec = dict( + query=dict(choices=[ + 'change', + 'checker_ip_range', + 'health_check', + 'hosted_zone', + 'record_sets', + 'reusable_delegation_set', + ], required=True), + change_id=dict(), + hosted_zone_id=dict(), + max_items=dict(type='int'), + next_marker=dict(), + delegation_set_id=dict(), + start_record_name=dict(), + type=dict(type='str', choices=[ + 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'NAPTR', 'SOA', 'DS' + ]), + dns_name=dict(), + resource_id=dict(type='list', aliases=['resource_ids'], elements='str'), + health_check_id=dict(), + hosted_zone_method=dict(choices=[ + 'details', + 'list', + 'list_by_name', + 'count', + 'tags' + ], default='list'), + health_check_method=dict(choices=[ + 'list', + 'details', + 'status', + 'failure_reason', + 'count', + 'tags', + ], default='list'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['hosted_zone_method', 'health_check_method'], + ], + check_boto3=False, + ) + + try: + client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + invocations = { + 'change': change_details, + 'checker_ip_range': checker_ip_range_details, + 'health_check': health_check_details, + 'hosted_zone': hosted_zone_details, + 'record_sets': record_sets_details, + 'reusable_delegation_set': reusable_delegation_set_details, + } + + results = dict(changed=False) + try: + results = invocations[module.params.get('query')]() + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Query failed") + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/route53_zone.py b/ansible_collections/amazon/aws/plugins/modules/route53_zone.py new file mode 100644 index 000000000..ac549ba56 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/route53_zone.py @@ -0,0 +1,556 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +module: route53_zone +short_description: add or delete Route53 zones +version_added: 5.0.0 +description: + - Creates and deletes Route53 private and public zones. + - This module was originally added to C(community.aws) in release 1.0.0. +options: + zone: + description: + - "The DNS zone record (eg: foo.com.)" + required: true + type: str + state: + description: + - Whether or not the zone should exist or not. + default: present + choices: [ "present", "absent" ] + type: str + vpc_id: + description: + - The VPC ID the zone should be a part of (if this is going to be a private zone). + type: str + vpc_region: + description: + - The VPC Region the zone should be a part of (if this is going to be a private zone). + type: str + vpcs: + version_added: 5.3.0 + description: + - The VPCs the zone should be a part of (if this is going to be a private zone). + type: list + elements: dict + suboptions: + id: + description: + - The ID of the VPC. + type: str + required: true + region: + description: + - The region of the VPC. + type: str + required: true + comment: + description: + - Comment associated with the zone. + default: '' + type: str + hosted_zone_id: + description: + - The unique zone identifier you want to delete or "all" if there are many zones with the same domain name. + - Required if there are multiple zones identified with the above options. + type: str + delegation_set_id: + description: + - The reusable delegation set ID to be associated with the zone. + - Note that you can't associate a reusable delegation set with a private hosted zone. + type: str +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +notes: + - Support for I(tags) and I(purge_tags) was added in release 2.1.0. +author: + - "Christopher Troup (@minichate)" +''' + +EXAMPLES = r''' +- name: create a public zone + amazon.aws.route53_zone: + zone: example.com + comment: this is an example + +- name: delete a public zone + amazon.aws.route53_zone: + zone: example.com + state: absent + +- name: create a private zone + amazon.aws.route53_zone: + zone: devel.example.com + vpc_id: '{{ myvpc_id }}' + vpc_region: us-west-2 + comment: developer domain + +- name: create a private zone with multiple associated VPCs + amazon.aws.route53_zone: + zone: crossdevel.example.com + vpcs: + - id: vpc-123456 + region: us-west-2 + - id: vpc-000001 + region: us-west-2 + comment: developer cross-vpc domain + +- name: create a public zone associated with a specific reusable delegation set + amazon.aws.route53_zone: + zone: example.com + comment: reusable delegation set example + delegation_set_id: A1BCDEF2GHIJKL + +- name: create a public zone with tags + amazon.aws.route53_zone: + zone: example.com + comment: this is an example + tags: + Owner: Ansible Team + +- name: modify a public zone, removing all previous tags and adding a new one + amazon.aws.route53_zone: + zone: example.com + comment: this is an example + tags: + Support: Ansible Community + purge_tags: true +''' + +RETURN = r''' +comment: + description: optional hosted zone comment + returned: when hosted zone exists + type: str + sample: "Private zone" +name: + description: hosted zone name + returned: when hosted zone exists + type: str + sample: "private.local." +private_zone: + description: whether hosted zone is private or public + returned: when hosted zone exists + type: bool + sample: true +vpc_id: + description: id of the first vpc attached to private hosted zone (use vpcs for associating multiple). + returned: for private hosted zone + type: str + sample: "vpc-1d36c84f" +vpc_region: + description: region of the first vpc attached to private hosted zone (use vpcs for assocaiting multiple). + returned: for private hosted zone + type: str + sample: "eu-west-1" +vpcs: + version_added: 5.3.0 + description: The list of VPCs attached to the private hosted zone + returned: for private hosted zone + type: list + elements: dict + sample: "[{'id': 'vpc-123456', 'region': 'us-west-2'}]" + contains: + id: + description: ID of the VPC + returned: for private hosted zone + type: str + sample: "vpc-123456" + region: + description: Region of the VPC + returned: for private hosted zone + type: str + sample: "eu-west-2" +zone_id: + description: hosted zone id + returned: when hosted zone exists + type: str + sample: "Z6JQG9820BEFMW" +delegation_set_id: + description: id of the associated reusable delegation set + returned: for public hosted zones, if they have been associated with a reusable delegation set + type: str + sample: "A1BCDEF2GHIJKL" +tags: + description: tags associated with the zone + returned: when tags are defined + type: dict +''' + +import time +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.route53 import manage_tags +from ansible_collections.amazon.aws.plugins.module_utils.route53 import get_tags + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # caught by AnsibleAWSModule + + +@AWSRetry.jittered_backoff() +def _list_zones(): + paginator = client.get_paginator('list_hosted_zones') + return paginator.paginate().build_full_result() + + +def find_zones(zone_in, private_zone): + try: + results = _list_zones() + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not list current hosted zones") + zones = [] + for r53zone in results['HostedZones']: + if r53zone['Name'] != zone_in: + continue + # only save zone names that match the public/private setting + if (r53zone['Config']['PrivateZone'] and private_zone) or \ + (not r53zone['Config']['PrivateZone'] and not private_zone): + zones.append(r53zone) + + return zones + + +def create(matching_zones): + zone_in = module.params.get('zone').lower() + vpc_id = module.params.get('vpc_id') + vpc_region = module.params.get('vpc_region') + vpcs = module.params.get('vpcs') or ([{'id': vpc_id, 'region': vpc_region}] if vpc_id and vpc_region else None) + comment = module.params.get('comment') + delegation_set_id = module.params.get('delegation_set_id') + tags = module.params.get('tags') + purge_tags = module.params.get('purge_tags') + + if not zone_in.endswith('.'): + zone_in += "." + + private_zone = bool(vpcs) + + record = { + 'private_zone': private_zone, + 'vpc_id': vpcs and vpcs[0]['id'], # The first one for backwards compatibility + 'vpc_region': vpcs and vpcs[0]['region'], # The first one for backwards compatibility + 'vpcs': vpcs, + 'comment': comment, + 'name': zone_in, + 'delegation_set_id': delegation_set_id, + 'zone_id': None, + } + + if private_zone: + changed, result = create_or_update_private(matching_zones, record) + else: + changed, result = create_or_update_public(matching_zones, record) + + zone_id = result.get('zone_id') + if zone_id: + if tags is not None: + changed |= manage_tags(module, client, 'hostedzone', zone_id, tags, purge_tags) + result['tags'] = get_tags(module, client, 'hostedzone', zone_id) + else: + result['tags'] = tags + + return changed, result + + +def create_or_update_private(matching_zones, record): + for z in matching_zones: + try: + result = client.get_hosted_zone(Id=z['Id']) # could be in different regions or have different VPCids + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id']) + zone_details = result['HostedZone'] + vpc_details = result['VPCs'] + current_vpc_ids = None + current_vpc_regions = None + matching = False + if isinstance(vpc_details, dict) and len(record['vpcs']) == 1: + if vpc_details['VPC']['VPCId'] == record['vpcs'][0]['id']: + current_vpc_ids = [vpc_details['VPC']['VPCId']] + current_vpc_regions = [vpc_details['VPC']['VPCRegion']] + matching = True + else: + # Sort the lists and compare them to make sure they contain the same items + if (sorted([vpc['id'] for vpc in record['vpcs']]) == sorted([v['VPCId'] for v in vpc_details]) + and sorted([vpc['region'] for vpc in record['vpcs']]) == sorted([v['VPCRegion'] for v in vpc_details])): + current_vpc_ids = [vpc['id'] for vpc in record['vpcs']] + current_vpc_regions = [vpc['region'] for vpc in record['vpcs']] + matching = True + + if matching: + record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '') + if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']: + if not module.check_mode: + try: + client.update_hosted_zone_comment(Id=zone_details['Id'], Comment=record['comment']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id']) + return True, record + else: + record['msg'] = "There is already a private hosted zone in the same region with the same VPC(s) \ + you chose. Unable to create a new private hosted zone in the same name space." + return False, record + + if not module.check_mode: + try: + result = client.create_hosted_zone( + Name=record['name'], + HostedZoneConfig={ + 'Comment': record['comment'] if record['comment'] is not None else "", + 'PrivateZone': True, + }, + VPC={ + 'VPCRegion': record['vpcs'][0]['region'], + 'VPCId': record['vpcs'][0]['id'], + }, + CallerReference="%s-%s" % (record['name'], time.time()), + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not create hosted zone") + + hosted_zone = result['HostedZone'] + zone_id = hosted_zone['Id'].replace('/hostedzone/', '') + record['zone_id'] = zone_id + + if len(record['vpcs']) > 1: + for vpc in record['vpcs'][1:]: + try: + result = client.associate_vpc_with_hosted_zone( + HostedZoneId=zone_id, + VPC={ + 'VPCRegion': vpc['region'], + 'VPCId': vpc['id'], + }, + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not associate additional VPCs with hosted zone") + + changed = True + return changed, record + + +def create_or_update_public(matching_zones, record): + zone_details, zone_delegation_set_details = None, {} + for matching_zone in matching_zones: + try: + zone = client.get_hosted_zone(Id=matching_zone['Id']) + zone_details = zone['HostedZone'] + zone_delegation_set_details = zone.get('DelegationSet', {}) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % matching_zone['Id']) + if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']: + if not module.check_mode: + try: + client.update_hosted_zone_comment( + Id=zone_details['Id'], + Comment=record['comment'] + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id']) + changed = True + else: + changed = False + break + + if zone_details is None: + if not module.check_mode: + try: + params = dict( + Name=record['name'], + HostedZoneConfig={ + 'Comment': record['comment'] if record['comment'] is not None else "", + 'PrivateZone': False, + }, + CallerReference="%s-%s" % (record['name'], time.time()), + ) + + if record.get('delegation_set_id') is not None: + params['DelegationSetId'] = record['delegation_set_id'] + + result = client.create_hosted_zone(**params) + zone_details = result['HostedZone'] + zone_delegation_set_details = result.get('DelegationSet', {}) + + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not create hosted zone") + changed = True + + if module.check_mode: + if zone_details: + record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '') + else: + record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '') + record['name'] = zone_details['Name'] + record['delegation_set_id'] = zone_delegation_set_details.get('Id', '').replace('/delegationset/', '') + + return changed, record + + +def delete_private(matching_zones, vpcs): + for z in matching_zones: + try: + result = client.get_hosted_zone(Id=z['Id']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id']) + zone_details = result['HostedZone'] + vpc_details = result['VPCs'] + if isinstance(vpc_details, dict): + if vpc_details['VPC']['VPCId'] == vpcs[0]['id'] and vpcs[0]['region'] == vpc_details['VPC']['VPCRegion']: + if not module.check_mode: + try: + client.delete_hosted_zone(Id=z['Id']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id']) + return True, "Successfully deleted %s" % zone_details['Name'] + else: + # Sort the lists and compare them to make sure they contain the same items + if (sorted([vpc['id'] for vpc in vpcs]) == sorted([v['VPCId'] for v in vpc_details]) + and sorted([vpc['region'] for vpc in vpcs]) == sorted([v['VPCRegion'] for v in vpc_details])): + if not module.check_mode: + try: + client.delete_hosted_zone(Id=z['Id']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id']) + return True, "Successfully deleted %s" % zone_details['Name'] + + return False, "The VPCs do not match a private hosted zone." + + +def delete_public(matching_zones): + if len(matching_zones) > 1: + changed = False + msg = "There are multiple zones that match. Use hosted_zone_id to specify the correct zone." + else: + if not module.check_mode: + try: + client.delete_hosted_zone(Id=matching_zones[0]['Id']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not get delete hosted zone %s" % matching_zones[0]['Id']) + changed = True + msg = "Successfully deleted %s" % matching_zones[0]['Id'] + return changed, msg + + +def delete_hosted_id(hosted_zone_id, matching_zones): + if hosted_zone_id == "all": + deleted = [] + for z in matching_zones: + deleted.append(z['Id']) + if not module.check_mode: + try: + client.delete_hosted_zone(Id=z['Id']) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id']) + changed = True + msg = "Successfully deleted zones: %s" % deleted + elif hosted_zone_id in [zo['Id'].replace('/hostedzone/', '') for zo in matching_zones]: + if not module.check_mode: + try: + client.delete_hosted_zone(Id=hosted_zone_id) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Could not delete hosted zone %s" % hosted_zone_id) + changed = True + msg = "Successfully deleted zone: %s" % hosted_zone_id + else: + changed = False + msg = "There is no zone to delete that matches hosted_zone_id %s." % hosted_zone_id + return changed, msg + + +def delete(matching_zones): + zone_in = module.params.get('zone').lower() + vpc_id = module.params.get('vpc_id') + vpc_region = module.params.get('vpc_region') + vpcs = module.params.get('vpcs') or ([{'id': vpc_id, 'region': vpc_region}] if vpc_id and vpc_region else None) + hosted_zone_id = module.params.get('hosted_zone_id') + + if not zone_in.endswith('.'): + zone_in += "." + + private_zone = bool(vpcs) + + if zone_in in [z['Name'] for z in matching_zones]: + if hosted_zone_id: + changed, result = delete_hosted_id(hosted_zone_id, matching_zones) + else: + if private_zone: + changed, result = delete_private(matching_zones, vpcs) + else: + changed, result = delete_public(matching_zones) + else: + changed = False + result = "No zone to delete." + + return changed, result + + +def main(): + global module + global client + + argument_spec = dict( + zone=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + vpc_id=dict(default=None), + vpc_region=dict(default=None), + vpcs=dict(type='list', default=None, elements='dict', options=dict( + id=dict(required=True), + region=dict(required=True) + )), + comment=dict(default=''), + hosted_zone_id=dict(), + delegation_set_id=dict(), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + ) + + mutually_exclusive = [ + ['delegation_set_id', 'vpc_id'], + ['delegation_set_id', 'vpc_region'], + ['delegation_set_id', 'vpcs'], + ['vpcs', 'vpc_id'], + ['vpcs', 'vpc_region'], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + ) + + zone_in = module.params.get('zone').lower() + state = module.params.get('state').lower() + vpc_id = module.params.get('vpc_id') + vpc_region = module.params.get('vpc_region') + vpcs = module.params.get('vpcs') + + if not zone_in.endswith('.'): + zone_in += "." + + private_zone = bool(vpcs or (vpc_id and vpc_region)) + + client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) + + zones = find_zones(zone_in, private_zone) + if state == 'present': + changed, result = create(matching_zones=zones) + elif state == 'absent': + changed, result = delete(matching_zones=zones) + + if isinstance(result, dict): + module.exit_json(changed=changed, result=result, **result) + else: + module.exit_json(changed=changed, result=result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py b/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py new file mode 100644 index 000000000..a4e2a8f56 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py @@ -0,0 +1,1247 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: s3_bucket +version_added: 1.0.0 +short_description: Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID +description: + - Manage S3 buckets. + - Compatible with AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID. + - When using non-AWS services, I(endpoint_url) should be specified. +author: + - Rob White (@wimnat) + - Aubin Bikouo (@abikouo) +options: + force: + description: + - When trying to delete a bucket, delete all keys (including versions and delete markers) + in the bucket first (an S3 bucket must be empty for a successful deletion). + type: bool + default: false + name: + description: + - Name of the S3 bucket. + required: true + type: str + policy: + description: + - The JSON policy as a string. Set to the string C("null") to force the absence of a policy. + type: json + ceph: + description: + - Enable API compatibility with Ceph RGW. + - It takes into account the S3 API subset working with Ceph in order to provide the same module + behaviour where possible. + - Requires I(endpoint_url) if I(ceph=true). + aliases: ['rgw'] + type: bool + default: false + requester_pays: + description: + - With Requester Pays buckets, the requester instead of the bucket owner pays the cost + of the request and the data download from the bucket. + type: bool + state: + description: + - Create or remove the S3 bucket. + required: false + default: present + choices: [ 'present', 'absent' ] + type: str + versioning: + description: + - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended). + type: bool + encryption: + description: + - Describes the default server-side encryption to apply to new objects in the bucket. + In order to remove the server-side encryption, the encryption needs to be set to 'none' explicitly. + - "Note: Since January 2023 Amazon S3 doesn't support disabling encryption on S3 buckets." + choices: [ 'none', 'AES256', 'aws:kms' ] + type: str + encryption_key_id: + description: KMS master key ID to use for the default encryption. This parameter is allowed if I(encryption) is C(aws:kms). If + not specified then it will default to the AWS provided KMS key. + type: str + bucket_key_enabled: + description: + - Enable S3 Bucket Keys for SSE-KMS on new objects. + - See the AWS documentation for more information + U(https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucket-key.html). + - Bucket Key encryption is only supported if I(encryption=aws:kms). + required: false + type: bool + version_added: 4.1.0 + public_access: + description: + - Configure public access block for S3 bucket. + - This option cannot be used together with I(delete_public_access). + - | + Note: At the end of April 2023 Amazon updated the default settings to block public access by + default. While the defaults for this module remain unchanged, it is necessary to explicitly + pass the I(public_access) parameter to enable public access ACLs. + suboptions: + block_public_acls: + description: Sets BlockPublicAcls value. + type: bool + default: False + block_public_policy: + description: Sets BlockPublicPolicy value. + type: bool + default: False + ignore_public_acls: + description: Sets IgnorePublicAcls value. + type: bool + default: False + restrict_public_buckets: + description: Sets RestrictPublicAcls value. + type: bool + default: False + type: dict + version_added: 1.3.0 + delete_public_access: + description: + - Delete public access block configuration from bucket. + - This option cannot be used together with a I(public_access) definition. + default: false + type: bool + version_added: 1.3.0 + object_ownership: + description: + - Allow bucket's ownership controls. + - C(BucketOwnerEnforced) - ACLs are disabled and no longer affect access permissions to your + bucket. Requests to set or update ACLs fail. However, requests to read ACLs are supported. + Bucket owner has full ownership and control. Object writer no longer has full ownership and + control. + - C(BucketOwnerPreferred) - Objects uploaded to the bucket change ownership to the bucket owner + if the objects are uploaded with the bucket-owner-full-control canned ACL. + - C(ObjectWriter) - The uploading account will own the object + if the object is uploaded with the bucket-owner-full-control canned ACL. + - This option cannot be used together with a I(delete_object_ownership) definition. + - C(BucketOwnerEnforced) has been added in version 3.2.0. + - "Note: At the end of April 2023 Amazon updated the default setting to C(BucketOwnerEnforced)." + choices: [ 'BucketOwnerEnforced', 'BucketOwnerPreferred', 'ObjectWriter' ] + type: str + version_added: 2.0.0 + object_lock_enabled: + description: + - Whether S3 Object Lock to be enabled. + - Defaults to C(False) when creating a new bucket. + type: bool + version_added: 5.3.0 + delete_object_ownership: + description: + - Delete bucket's ownership controls. + - This option cannot be used together with a I(object_ownership) definition. + default: false + type: bool + version_added: 2.0.0 + acl: + description: + - The canned ACL to apply to the bucket. + - If your bucket uses the bucket owner enforced setting for S3 Object Ownership, + ACLs are disabled and no longer affect permissions. + choices: [ 'private', 'public-read', 'public-read-write', 'authenticated-read' ] + type: str + version_added: 3.1.0 + validate_bucket_name: + description: + - Whether the bucket name should be validated to conform to AWS S3 naming rules. + - On by default, this may be disabled for S3 backends that do not enforce these rules. + - See https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html + type: bool + version_added: 3.1.0 + default: True + +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 + +notes: + - If C(requestPayment), C(policy), C(tagging) or C(versioning) + operations/API aren't implemented by the endpoint, module doesn't fail + if each parameter satisfies the following condition. + I(requester_pays) is C(False), I(policy), I(tags), and I(versioning) are C(None). + - In release 5.0.0 the I(s3_url) parameter was merged into the I(endpoint_url) parameter, + I(s3_url) remains as an alias for I(endpoint_url). + - For Walrus I(endpoint_url) should be set to the FQDN of the endpoint with neither scheme nor path. + - Support for the C(S3_URL) environment variable has been + deprecated and will be removed in a release after 2024-12-01, please use the I(endpoint_url) parameter + or the C(AWS_URL) environment variable. +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create a simple S3 bucket +- amazon.aws.s3_bucket: + name: mys3bucket + state: present + +# Create a simple S3 bucket on Ceph Rados Gateway +- amazon.aws.s3_bucket: + name: mys3bucket + endpoint_url: http://your-ceph-rados-gateway-server.xxx + ceph: true + +# Remove an S3 bucket and any keys it contains +- amazon.aws.s3_bucket: + name: mys3bucket + state: absent + force: true + +# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag +- amazon.aws.s3_bucket: + name: mys3bucket + policy: "{{ lookup('file','policy.json') }}" + requester_pays: true + versioning: true + tags: + example: tag1 + another: tag2 + +# Create a simple DigitalOcean Spaces bucket using their provided regional endpoint +- amazon.aws.s3_bucket: + name: mydobucket + endpoint_url: 'https://nyc3.digitaloceanspaces.com' + +# Create a bucket with AES256 encryption +- amazon.aws.s3_bucket: + name: mys3bucket + state: present + encryption: "AES256" + +# Create a bucket with aws:kms encryption, KMS key +- amazon.aws.s3_bucket: + name: mys3bucket + state: present + encryption: "aws:kms" + encryption_key_id: "arn:aws:kms:us-east-1:1234/5678example" + +# Create a bucket with aws:kms encryption, Bucket key +- amazon.aws.s3_bucket: + name: mys3bucket + bucket_key_enabled: true + encryption: "aws:kms" + +# Create a bucket with aws:kms encryption, default key +- amazon.aws.s3_bucket: + name: mys3bucket + state: present + encryption: "aws:kms" + +# Create a bucket with public policy block configuration +- amazon.aws.s3_bucket: + name: mys3bucket + state: present + public_access: + block_public_acls: true + ignore_public_acls: true + ## keys == 'false' can be omitted, undefined keys defaults to 'false' + # block_public_policy: false + # restrict_public_buckets: false + +# Delete public policy block from bucket +- amazon.aws.s3_bucket: + name: mys3bucket + state: present + delete_public_access: true + +# Create a bucket with object ownership controls set to ObjectWriter +- amazon.aws.s3_bucket: + name: mys3bucket + state: present + object_ownership: ObjectWriter + +# Delete onwership controls from bucket +- amazon.aws.s3_bucket: + name: mys3bucket + state: present + delete_object_ownership: true + +# Delete a bucket policy from bucket +- amazon.aws.s3_bucket: + name: mys3bucket + state: present + policy: "null" + +# This example grants public-read to everyone on bucket using ACL +- amazon.aws.s3_bucket: + name: mys3bucket + state: present + acl: public-read +''' + +RETURN = r''' +encryption: + description: + - Server-side encryption of the objects in the S3 bucket. + type: str + returned: I(state=present) + sample: '' +name: + description: Name of the S3 bucket. + type: str + returned: I(state=present) + sample: "2d3ce10a8210d36d6b4d23b822892074complex" +object_ownership: + description: S3 bucket's ownership controls. + type: str + returned: I(state=present) + sample: "BucketOwnerPreferred" +policy: + description: S3 bucket's policy. + type: dict + returned: I(state=present) + sample: { + "Statement": [ + { + "Action": "s3:GetObject", + "Effect": "Allow", + "Principal": "*", + "Resource": "arn:aws:s3:::2d3ce10a8210d36d6b4d23b822892074complex/*", + "Sid": "AddPerm" + } + ], + "Version": "2012-10-17" + } +requester_pays: + description: + - Indicates that the requester was successfully charged for the request. + type: str + returned: I(state=present) + sample: '' +tags: + description: S3 bucket's tags. + type: dict + returned: I(state=present) + sample: { + "Tag1": "tag1", + "Tag2": "tag2" + } +versioning: + description: S3 bucket's versioning configuration. + type: dict + returned: I(state=present) + sample: { + "MfaDelete": "Disabled", + "Versioning": "Enabled" + } +acl: + description: S3 bucket's canned ACL. + type: dict + returned: I(state=present) + sample: 'public-read' +''' + +import json +import os +import time + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.basic import to_text +from ansible.module_utils.six import string_types +from ansible.module_utils.six.moves.urllib.parse import urlparse + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.s3 import validate_bucket_name + + +def create_or_update_bucket(s3_client, module, location): + + policy = module.params.get("policy") + name = module.params.get("name") + requester_pays = module.params.get("requester_pays") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + versioning = module.params.get("versioning") + encryption = module.params.get("encryption") + encryption_key_id = module.params.get("encryption_key_id") + bucket_key_enabled = module.params.get("bucket_key_enabled") + public_access = module.params.get("public_access") + delete_public_access = module.params.get("delete_public_access") + delete_object_ownership = module.params.get("delete_object_ownership") + object_ownership = module.params.get("object_ownership") + object_lock_enabled = module.params.get("object_lock_enabled") + acl = module.params.get("acl") + changed = False + result = {} + + try: + bucket_is_present = bucket_exists(s3_client, name) + except botocore.exceptions.EndpointConnectionError as e: + module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e)) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to check bucket presence") + + if not bucket_is_present: + try: + bucket_changed = create_bucket(s3_client, name, location, object_lock_enabled) + s3_client.get_waiter('bucket_exists').wait(Bucket=name) + changed = changed or bucket_changed + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed while creating bucket") + + # Versioning + try: + versioning_status = get_bucket_versioning(s3_client, name) + except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + if versioning is not None: + module.fail_json_aws(e, msg="Failed to get bucket versioning") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get bucket versioning") + else: + if versioning is not None: + required_versioning = None + if versioning and versioning_status.get('Status') != "Enabled": + required_versioning = 'Enabled' + elif not versioning and versioning_status.get('Status') == "Enabled": + required_versioning = 'Suspended' + + if required_versioning: + try: + put_bucket_versioning(s3_client, name, required_versioning) + changed = True + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to update bucket versioning") + + versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning) + + # This output format is there to ensure compatibility with previous versions of the module + result['versioning'] = { + 'Versioning': versioning_status.get('Status', 'Disabled'), + 'MfaDelete': versioning_status.get('MFADelete', 'Disabled'), + } + + # Requester pays + try: + requester_pays_status = get_bucket_request_payment(s3_client, name) + except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + if requester_pays is not None: + module.fail_json_aws(e, msg="Failed to get bucket request payment") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get bucket request payment") + else: + if requester_pays is not None: + payer = 'Requester' if requester_pays else 'BucketOwner' + if requester_pays_status != payer: + put_bucket_request_payment(s3_client, name, payer) + requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False) + if requester_pays_status is None: + # We have seen that it happens quite a lot of times that the put request was not taken into + # account, so we retry one more time + put_bucket_request_payment(s3_client, name, payer) + requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True) + changed = True + + result['requester_pays'] = requester_pays + + # Public access clock configuration + current_public_access = {} + try: + current_public_access = get_bucket_public_access(s3_client, name) + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + if public_access is not None: + module.fail_json_aws(e, msg="Bucket public access settings are not supported by the current S3 Endpoint") + except is_boto3_error_code("AccessDenied") as e: + if public_access is not None: + module.fail_json_aws(e, msg="Failed to get bucket public access configuration") + module.debug("AccessDenied fetching bucket public access settings") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get bucket public access configuration") + else: + # -- Create / Update public access block + if public_access is not None: + camel_public_block = snake_dict_to_camel_dict(public_access, capitalize_first=True) + + if current_public_access == camel_public_block: + result["public_access_block"] = current_public_access + else: + put_bucket_public_access(s3_client, name, camel_public_block) + changed = True + result["public_access_block"] = camel_public_block + + # -- Delete public access block + if delete_public_access: + if current_public_access == {}: + result["public_access_block"] = current_public_access + else: + delete_bucket_public_access(s3_client, name) + changed = True + result["public_access_block"] = {} + + # Policy + try: + current_policy = get_bucket_policy(s3_client, name) + except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + if policy is not None: + module.fail_json_aws(e, msg="Failed to get bucket policy") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get bucket policy") + else: + if policy is not None: + if isinstance(policy, string_types): + policy = json.loads(policy) + + if not policy and current_policy: + try: + delete_bucket_policy(s3_client, name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete bucket policy") + current_policy = wait_policy_is_applied(module, s3_client, name, policy) + changed = True + elif compare_policies(current_policy, policy): + try: + put_bucket_policy(s3_client, name, policy) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to update bucket policy") + current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False) + if current_policy is None: + # As for request payement, it happens quite a lot of times that the put request was not taken into + # account, so we retry one more time + put_bucket_policy(s3_client, name, policy) + current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True) + changed = True + + result['policy'] = current_policy + + # Tags + try: + current_tags_dict = get_current_bucket_tags_dict(s3_client, name) + except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + if tags is not None: + module.fail_json_aws(e, msg="Failed to get bucket tags") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get bucket tags") + else: + if tags is not None: + # Tags are always returned as text + tags = dict((to_text(k), to_text(v)) for k, v in tags.items()) + if not purge_tags: + # Ensure existing tags that aren't updated by desired tags remain + current_copy = current_tags_dict.copy() + current_copy.update(tags) + tags = current_copy + if current_tags_dict != tags: + if tags: + try: + put_bucket_tagging(s3_client, name, tags) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to update bucket tags") + else: + if purge_tags: + try: + delete_bucket_tagging(s3_client, name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete bucket tags") + current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags) + changed = True + + result['tags'] = current_tags_dict + + # Encryption + try: + current_encryption = get_bucket_encryption(s3_client, name) + except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + if encryption is not None: + module.fail_json_aws(e, msg="Failed to get bucket encryption settings") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get bucket encryption settings") + else: + if encryption is not None: + current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None + current_encryption_key = current_encryption.get('KMSMasterKeyID') if current_encryption else None + if encryption == 'none': + if current_encryption_algorithm is not None: + try: + delete_bucket_encryption(s3_client, name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete bucket encryption") + current_encryption = wait_encryption_is_applied(module, s3_client, name, None) + changed = True + else: + if (encryption != current_encryption_algorithm) or (encryption == 'aws:kms' and current_encryption_key != encryption_key_id): + expected_encryption = {'SSEAlgorithm': encryption} + if encryption == 'aws:kms' and encryption_key_id is not None: + expected_encryption.update({'KMSMasterKeyID': encryption_key_id}) + current_encryption = put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption) + changed = True + + if bucket_key_enabled is not None: + current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None + if current_encryption_algorithm == 'aws:kms': + if get_bucket_key(s3_client, name) != bucket_key_enabled: + if bucket_key_enabled: + expected_encryption = True + else: + expected_encryption = False + current_encryption = put_bucket_key_with_retry(module, s3_client, name, expected_encryption) + changed = True + result['encryption'] = current_encryption + + # -- Bucket ownership + try: + bucket_ownership = get_bucket_ownership_cntrl(s3_client, name) + result['object_ownership'] = bucket_ownership + except KeyError as e: + # Some non-AWS providers appear to return policy documents that aren't + # compatible with AWS, cleanly catch KeyError so users can continue to use + # other features. + if delete_object_ownership or object_ownership is not None: + module.fail_json_aws(e, msg="Failed to get bucket object ownership settings") + except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + if delete_object_ownership or object_ownership is not None: + module.fail_json_aws(e, msg="Failed to get bucket object ownership settings") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get bucket object ownership settings") + else: + if delete_object_ownership: + # delete S3 buckect ownership + if bucket_ownership is not None: + delete_bucket_ownership(s3_client, name) + changed = True + result['object_ownership'] = None + elif object_ownership is not None: + # update S3 bucket ownership + if bucket_ownership != object_ownership: + put_bucket_ownership(s3_client, name, object_ownership) + changed = True + result['object_ownership'] = object_ownership + + # -- Bucket ACL + if acl: + try: + s3_client.put_bucket_acl(Bucket=name, ACL=acl) + result['acl'] = acl + changed = True + except KeyError as e: + # Some non-AWS providers appear to return policy documents that aren't + # compatible with AWS, cleanly catch KeyError so users can continue to use + # other features. + module.fail_json_aws(e, msg="Failed to get bucket acl block") + except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + module.fail_json_aws(e, msg="Failed to update bucket ACL") + except is_boto3_error_code('AccessDenied') as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Access denied trying to update bucket ACL") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to update bucket ACL") + + # -- Object Lock + try: + object_lock_status = get_bucket_object_lock_enabled(s3_client, name) + result["object_lock_enabled"] = object_lock_status + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + if object_lock_enabled is not None: + module.fail_json(msg="Fetching bucket object lock state is not supported") + except is_boto3_error_code("ObjectLockConfigurationNotFoundError"): # pylint: disable=duplicate-except + if object_lock_enabled: + module.fail_json(msg="Enabling object lock for existing buckets is not supported") + result["object_lock_enabled"] = False + except is_boto3_error_code("AccessDenied") as e: # pylint: disable=duplicate-except + if object_lock_enabled is not None: + module.fail_json(msg="Permission denied fetching object lock state for bucket") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to fetch bucket object lock state") + else: + if object_lock_status is not None: + if not object_lock_enabled and object_lock_status: + module.fail_json(msg="Disabling object lock for existing buckets is not supported") + if object_lock_enabled and not object_lock_status: + module.fail_json(msg="Enabling object lock for existing buckets is not supported") + + # Module exit + module.exit_json(changed=changed, name=name, **result) + + +def bucket_exists(s3_client, bucket_name): + try: + s3_client.head_bucket(Bucket=bucket_name) + bucket_exists = True + except is_boto3_error_code('404'): + bucket_exists = False + return bucket_exists + + +@AWSRetry.exponential_backoff(max_delay=120) +def create_bucket(s3_client, bucket_name, location, object_lock_enabled=False): + try: + params = {"Bucket": bucket_name} + + configuration = {} + if location not in ('us-east-1', None): + configuration['LocationConstraint'] = location + + if configuration: + params["CreateBucketConfiguration"] = configuration + + if object_lock_enabled is not None: + params["ObjectLockEnabledForBucket"] = object_lock_enabled + + s3_client.create_bucket(**params) + + return True + except is_boto3_error_code('BucketAlreadyOwnedByYou'): + # We should never get here since we check the bucket presence before calling the create_or_update_bucket + # method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception + return False + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def put_bucket_tagging(s3_client, bucket_name, tags): + s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)}) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def put_bucket_policy(s3_client, bucket_name, policy): + s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy)) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def delete_bucket_policy(s3_client, bucket_name): + s3_client.delete_bucket_policy(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def get_bucket_policy(s3_client, bucket_name): + try: + current_policy_string = s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy') + if not current_policy_string: + return None + current_policy = json.loads(current_policy_string) + except is_boto3_error_code('NoSuchBucketPolicy'): + return None + + return current_policy + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def put_bucket_request_payment(s3_client, bucket_name, payer): + s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer}) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def get_bucket_request_payment(s3_client, bucket_name): + return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer') + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def get_bucket_versioning(s3_client, bucket_name): + return s3_client.get_bucket_versioning(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def put_bucket_versioning(s3_client, bucket_name, required_versioning): + s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning}) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) +def get_bucket_object_lock_enabled(s3_client, bucket_name): + object_lock_configuration = s3_client.get_object_lock_configuration(Bucket=bucket_name) + return object_lock_configuration["ObjectLockConfiguration"]["ObjectLockEnabled"] == "Enabled" + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def get_bucket_encryption(s3_client, bucket_name): + try: + result = s3_client.get_bucket_encryption(Bucket=bucket_name) + return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('ApplyServerSideEncryptionByDefault') + except is_boto3_error_code('ServerSideEncryptionConfigurationNotFoundError'): + return None + except (IndexError, KeyError): + return None + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def get_bucket_key(s3_client, bucket_name): + try: + result = s3_client.get_bucket_encryption(Bucket=bucket_name) + return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('BucketKeyEnabled') + except is_boto3_error_code('ServerSideEncryptionConfigurationNotFoundError'): + return None + except (IndexError, KeyError): + return None + + +def put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption): + max_retries = 3 + for retries in range(1, max_retries + 1): + try: + put_bucket_encryption(s3_client, name, expected_encryption) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to set bucket encryption") + current_encryption = wait_encryption_is_applied(module, s3_client, name, expected_encryption, + should_fail=(retries == max_retries), retries=5) + if current_encryption == expected_encryption: + return current_encryption + + # We shouldn't get here, the only time this should happen is if + # current_encryption != expected_encryption and retries == max_retries + # Which should use module.fail_json and fail out first. + module.fail_json(msg='Failed to apply bucket encryption', + current=current_encryption, expected=expected_encryption, retries=retries) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def put_bucket_encryption(s3_client, bucket_name, encryption): + server_side_encryption_configuration = {'Rules': [{'ApplyServerSideEncryptionByDefault': encryption}]} + s3_client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration) + + +def put_bucket_key_with_retry(module, s3_client, name, expected_encryption): + max_retries = 3 + for retries in range(1, max_retries + 1): + try: + put_bucket_key(s3_client, name, expected_encryption) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to set bucket Key") + current_encryption = wait_bucket_key_is_applied(module, s3_client, name, expected_encryption, + should_fail=(retries == max_retries), retries=5) + if current_encryption == expected_encryption: + return current_encryption + + # We shouldn't get here, the only time this should happen is if + # current_encryption != expected_encryption and retries == max_retries + # Which should use module.fail_json and fail out first. + module.fail_json(msg='Failed to set bucket key', + current=current_encryption, expected=expected_encryption, retries=retries) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def put_bucket_key(s3_client, bucket_name, encryption): + # server_side_encryption_configuration ={'Rules': [{'BucketKeyEnabled': encryption}]} + encryption_status = s3_client.get_bucket_encryption(Bucket=bucket_name) + encryption_status['ServerSideEncryptionConfiguration']['Rules'][0]['BucketKeyEnabled'] = encryption + s3_client.put_bucket_encryption( + Bucket=bucket_name, + ServerSideEncryptionConfiguration=encryption_status[ + 'ServerSideEncryptionConfiguration'] + ) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def delete_bucket_tagging(s3_client, bucket_name): + s3_client.delete_bucket_tagging(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def delete_bucket_encryption(s3_client, bucket_name): + s3_client.delete_bucket_encryption(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=240, catch_extra_error_codes=['OperationAborted']) +def delete_bucket(s3_client, bucket_name): + try: + s3_client.delete_bucket(Bucket=bucket_name) + except is_boto3_error_code('NoSuchBucket'): + # This means bucket should have been in a deleting state when we checked it existence + # We just ignore the error + pass + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def put_bucket_public_access(s3_client, bucket_name, public_acces): + ''' + Put new public access block to S3 bucket + ''' + s3_client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=public_acces) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def delete_bucket_public_access(s3_client, bucket_name): + ''' + Delete public access block from S3 bucket + ''' + s3_client.delete_public_access_block(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def delete_bucket_ownership(s3_client, bucket_name): + ''' + Delete bucket ownership controls from S3 bucket + ''' + s3_client.delete_bucket_ownership_controls(Bucket=bucket_name) + + +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def put_bucket_ownership(s3_client, bucket_name, target): + ''' + Put bucket ownership controls for S3 bucket + ''' + s3_client.put_bucket_ownership_controls( + Bucket=bucket_name, + OwnershipControls={ + 'Rules': [{'ObjectOwnership': target}] + }) + + +def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True): + for dummy in range(0, 12): + try: + current_policy = get_bucket_policy(s3_client, bucket_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get bucket policy") + + if compare_policies(current_policy, expected_policy): + time.sleep(5) + else: + return current_policy + if should_fail: + module.fail_json(msg="Bucket policy failed to apply in the expected time", + requested_policy=expected_policy, live_policy=current_policy) + else: + return None + + +def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True): + for dummy in range(0, 12): + try: + requester_pays_status = get_bucket_request_payment(s3_client, bucket_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get bucket request payment") + if requester_pays_status != expected_payer: + time.sleep(5) + else: + return requester_pays_status + if should_fail: + module.fail_json(msg="Bucket request payment failed to apply in the expected time", + requested_status=expected_payer, live_status=requester_pays_status) + else: + return None + + +def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encryption, should_fail=True, retries=12): + for dummy in range(0, retries): + try: + encryption = get_bucket_encryption(s3_client, bucket_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get updated encryption for bucket") + if encryption != expected_encryption: + time.sleep(5) + else: + return encryption + + if should_fail: + module.fail_json(msg="Bucket encryption failed to apply in the expected time", + requested_encryption=expected_encryption, live_encryption=encryption) + + return encryption + + +def wait_bucket_key_is_applied(module, s3_client, bucket_name, expected_encryption, should_fail=True, retries=12): + for dummy in range(0, retries): + try: + encryption = get_bucket_key(s3_client, bucket_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get updated encryption for bucket") + if encryption != expected_encryption: + time.sleep(5) + else: + return encryption + + if should_fail: + module.fail_json(msg="Bucket Key failed to apply in the expected time", + requested_encryption=expected_encryption, live_encryption=encryption) + return encryption + + +def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning): + for dummy in range(0, 24): + try: + versioning_status = get_bucket_versioning(s3_client, bucket_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get updated versioning for bucket") + if versioning_status.get('Status') != required_versioning: + time.sleep(8) + else: + return versioning_status + module.fail_json(msg="Bucket versioning failed to apply in the expected time", + requested_versioning=required_versioning, live_versioning=versioning_status) + + +def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict): + for dummy in range(0, 12): + try: + current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get bucket policy") + if current_tags_dict != expected_tags_dict: + time.sleep(5) + else: + return current_tags_dict + module.fail_json(msg="Bucket tags failed to apply in the expected time", + requested_tags=expected_tags_dict, live_tags=current_tags_dict) + + +def get_current_bucket_tags_dict(s3_client, bucket_name): + try: + current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet') + except is_boto3_error_code('NoSuchTagSet'): + return {} + # The Ceph S3 API returns a different error code to AWS + except is_boto3_error_code('NoSuchTagSetError'): # pylint: disable=duplicate-except + return {} + + return boto3_tag_list_to_ansible_dict(current_tags) + + +def get_bucket_public_access(s3_client, bucket_name): + ''' + Get current bucket public access block + ''' + try: + bucket_public_access_block = s3_client.get_public_access_block(Bucket=bucket_name) + return bucket_public_access_block['PublicAccessBlockConfiguration'] + except is_boto3_error_code('NoSuchPublicAccessBlockConfiguration'): + return {} + + +def get_bucket_ownership_cntrl(s3_client, bucket_name): + ''' + Get current bucket public access block + ''' + try: + bucket_ownership = s3_client.get_bucket_ownership_controls(Bucket=bucket_name) + return bucket_ownership['OwnershipControls']['Rules'][0]['ObjectOwnership'] + except is_boto3_error_code(['OwnershipControlsNotFoundError', 'NoSuchOwnershipControls']): + return None + + +def paginated_list(s3_client, **pagination_params): + pg = s3_client.get_paginator('list_objects_v2') + for page in pg.paginate(**pagination_params): + yield [data['Key'] for data in page.get('Contents', [])] + + +def paginated_versions_list(s3_client, **pagination_params): + try: + pg = s3_client.get_paginator('list_object_versions') + for page in pg.paginate(**pagination_params): + # We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion + yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))] + except is_boto3_error_code('NoSuchBucket'): + yield [] + + +def destroy_bucket(s3_client, module): + + force = module.params.get("force") + name = module.params.get("name") + try: + bucket_is_present = bucket_exists(s3_client, name) + except botocore.exceptions.EndpointConnectionError as e: + module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e)) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to check bucket presence") + + if not bucket_is_present: + module.exit_json(changed=False) + + if force: + # if there are contents then we need to delete them (including versions) before we can delete the bucket + try: + for key_version_pairs in paginated_versions_list(s3_client, Bucket=name): + formatted_keys = [{'Key': key, 'VersionId': version} for key, version in key_version_pairs] + for fk in formatted_keys: + # remove VersionId from cases where they are `None` so that + # unversioned objects are deleted using `DeleteObject` + # rather than `DeleteObjectVersion`, improving backwards + # compatibility with older IAM policies. + if not fk.get('VersionId'): + fk.pop('VersionId') + + if formatted_keys: + resp = s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys}) + if resp.get('Errors'): + module.fail_json( + msg='Could not empty bucket before deleting. Could not delete objects: {0}'.format( + ', '.join([k['Key'] for k in resp['Errors']]) + ), + errors=resp['Errors'], response=resp + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed while deleting bucket") + + try: + delete_bucket(s3_client, name) + s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60)) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.') + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete bucket") + + module.exit_json(changed=True) + + +def is_fakes3(endpoint_url): + """ Return True if endpoint_url has scheme fakes3:// """ + if endpoint_url is not None: + return urlparse(endpoint_url).scheme in ('fakes3', 'fakes3s') + else: + return False + + +def get_s3_client(module, aws_connect_kwargs, location, ceph, endpoint_url): + if ceph: # TODO - test this + ceph = urlparse(endpoint_url) + params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', + region=location, endpoint=endpoint_url, **aws_connect_kwargs) + elif is_fakes3(endpoint_url): + fakes3 = urlparse(endpoint_url) + port = fakes3.port + if fakes3.scheme == 'fakes3s': + protocol = "https" + if port is None: + port = 443 + else: + protocol = "http" + if port is None: + port = 80 + params = dict(module=module, conn_type='client', resource='s3', region=location, + endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), + use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) + else: + params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=endpoint_url, **aws_connect_kwargs) + return boto3_conn(**params) + + +def main(): + + argument_spec = dict( + force=dict(default=False, type='bool'), + policy=dict(type='json'), + name=dict(required=True), + requester_pays=dict(type='bool'), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + versioning=dict(type='bool'), + ceph=dict(default=False, type='bool', aliases=['rgw']), + encryption=dict(choices=['none', 'AES256', 'aws:kms']), + encryption_key_id=dict(), + bucket_key_enabled=dict(type='bool'), + public_access=dict(type='dict', options=dict( + block_public_acls=dict(type='bool', default=False), + ignore_public_acls=dict(type='bool', default=False), + block_public_policy=dict(type='bool', default=False), + restrict_public_buckets=dict(type='bool', default=False))), + delete_public_access=dict(type='bool', default=False), + object_ownership=dict(type='str', choices=['BucketOwnerEnforced', 'BucketOwnerPreferred', 'ObjectWriter']), + delete_object_ownership=dict(type='bool', default=False), + acl=dict(type='str', choices=['private', 'public-read', 'public-read-write', 'authenticated-read']), + validate_bucket_name=dict(type='bool', default=True), + object_lock_enabled=dict(type="bool"), + ) + + required_by = dict( + encryption_key_id=('encryption',), + ) + + mutually_exclusive = [ + ['public_access', 'delete_public_access'], + ['delete_object_ownership', 'object_ownership'] + ] + + required_if = [ + ['ceph', True, ['endpoint_url']], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_by=required_by, + required_if=required_if, + mutually_exclusive=mutually_exclusive + ) + + region, _ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + + if module.params.get('validate_bucket_name'): + validate_bucket_name(module, module.params["name"]) + + if region in ('us-east-1', '', None): + # default to US Standard region + location = 'us-east-1' + else: + # Boto uses symbolic names for locations but region strings will + # actually work fine for everything except us-east-1 (US Standard) + location = region + + endpoint_url = module.params.get('endpoint_url') + ceph = module.params.get('ceph') + + # Look at endpoint_url and tweak connection settings + # allow eucarc environment variables to be used if ansible vars aren't set + if not endpoint_url and 'S3_URL' in os.environ: + endpoint_url = os.environ['S3_URL'] + module.deprecate( + "Support for the 'S3_URL' environment variable has been " + "deprecated. We recommend using the 'endpoint_url' module " + "parameter. Alternatively, the 'AWS_URL' environment variable can" + "be used instead.", + date='2024-12-01', collection_name='amazon.aws', + ) + + # if connecting to Ceph RGW, Walrus or fakes3 + if endpoint_url: + for key in ['validate_certs', 'security_token', 'profile_name']: + aws_connect_kwargs.pop(key, None) + s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, endpoint_url) + + if s3_client is None: # this should never happen + module.fail_json(msg='Unknown error, failed to create s3 connection, no information available.') + + state = module.params.get("state") + encryption = module.params.get("encryption") + encryption_key_id = module.params.get("encryption_key_id") + + # Parameter validation + if encryption_key_id is not None and encryption != 'aws:kms': + module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.") + + if state == 'present': + create_or_update_bucket(s3_client, module, location) + elif state == 'absent': + destroy_bucket(s3_client, module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_object.py b/ansible_collections/amazon/aws/plugins/modules/s3_object.py new file mode 100644 index 000000000..50beab9d2 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/s3_object.py @@ -0,0 +1,1287 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: s3_object +version_added: 1.0.0 +short_description: Manage objects in S3 +description: + - This module allows the user to manage the objects and directories within S3 buckets. Includes + support for creating and deleting objects and directories, retrieving objects as files or + strings, generating download links and copying objects that are already stored in Amazon S3. + - Support for creating or deleting S3 buckets with this module has been deprecated and will be + removed in release 6.0.0. + - S3 buckets can be created or deleted using the M(amazon.aws.s3_bucket) module. + - Compatible with AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID. + - When using non-AWS services, I(endpoint_url) should be specified. +options: + bucket: + description: + - Bucket name. + required: true + type: str + dest: + description: + - The destination file path when downloading an object/key when I(mode=get). + - Ignored when I(mode) is not C(get). + type: path + encrypt: + description: + - Asks for server-side encryption of the objects when I(mode=put) or I(mode=copy). + - Ignored when I(mode) is neither C(put) nor C(copy). + default: true + type: bool + encryption_mode: + description: + - The encryption mode to use if I(encrypt=true). + default: AES256 + choices: + - AES256 + - aws:kms + type: str + expiry: + description: + - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a + I(mode=put) or I(mode=geturl) operation. + - Ignored when I(mode) is neither C(put) nor C(geturl). + default: 600 + aliases: ['expiration'] + type: int + headers: + description: + - Custom headers to use when I(mode=put) as a dictionary of key value pairs. + - Ignored when I(mode) is not C(put). + type: dict + marker: + description: + - Specifies the key to start with when using list mode. Object keys are returned in + alphabetical order, starting with key after the marker in order. + type: str + default: '' + max_keys: + description: + - Max number of results to return when I(mode=list), set this if you want to retrieve fewer + than the default 1000 keys. + - Ignored when I(mode) is not C(list). + default: 1000 + type: int + metadata: + description: + - Metadata to use when I(mode=put) or I(mode=copy) as a dictionary of key value pairs. + type: dict + mode: + description: + - Switches the module behaviour between + - 'C(put): upload' + - 'C(get): download' + - 'C(geturl): return download URL' + - 'C(getstr): download object as string' + - 'C(list): list keys' + - 'C(create): create bucket directories' + - 'C(delete): delete bucket directories' + - 'C(delobj): delete object' + - 'C(copy): copy object that is already stored in another bucket' + - Support for creating and deleting buckets has been deprecated and will + be removed in release 6.0.0. To create and manage the bucket itself + please use the M(amazon.aws.s3_bucket) module. + required: true + choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list', 'copy'] + type: str + object: + description: + - Keyname of the object inside the bucket. + - Can be used to create "virtual directories", see examples. + type: str + sig_v4: + description: + - Forces the Boto SDK to use Signature Version 4. + - Only applies to get modes, I(mode=get), I(mode=getstr), I(mode=geturl). + default: true + type: bool + version_added: 5.0.0 + permission: + description: + - This option lets the user set the canned permissions on the object/bucket that are created. + The permissions that can be set are C(private), C(public-read), C(public-read-write), + C(authenticated-read) for a bucket or C(private), C(public-read), C(public-read-write), + C(aws-exec-read), C(authenticated-read), C(bucket-owner-read), C(bucket-owner-full-control) + for an object. Multiple permissions can be specified as a list; although only the first one + will be used during the initial upload of the file. + - For a full list of permissions see the AWS documentation + U(https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl). + default: ['private'] + type: list + elements: str + prefix: + description: + - Limits the response to keys that begin with the specified prefix for list mode. + default: "" + type: str + version: + description: + - Version ID of the object inside the bucket. Can be used to get a specific version of a file + if versioning is enabled in the target bucket. + type: str + overwrite: + description: + - Force overwrite either locally on the filesystem or remotely with the object/key. + - Used when I(mode=put) or I(mode=get). + - Ignored when when I(mode) is neither C(put) nor C(get). + - Must be a Boolean, C(always), C(never), C(different) or C(latest). + - C(true) is the same as C(always). + - C(false) is equal to C(never). + - When this is set to C(different) the MD5 sum of the local file is compared with the 'ETag' + of the object/key in S3. The ETag may or may not be an MD5 digest of the object data. See + the ETag response header here + U(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html). + - When I(mode=get) and I(overwrite=latest) the last modified timestamp of local file + is compared with the 'LastModified' of the object/key in S3. + default: 'different' + aliases: ['force'] + type: str + retries: + description: + - On recoverable failure, how many times to retry before actually failing. + default: 0 + type: int + aliases: ['retry'] + dualstack: + description: + - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6. + type: bool + default: false + ceph: + description: + - Enable API compatibility with Ceph RGW. + - It takes into account the S3 API subset working with Ceph in order to provide the same module + behaviour where possible. + - Requires I(endpoint_url) if I(ceph=true). + aliases: ['rgw'] + default: false + type: bool + src: + description: + - The source file path when performing a C(put) operation. + - One of I(content), I(content_base64) or I(src) must be specified when I(mode=put) + otherwise ignored. + type: path + content: + description: + - The content to C(put) into an object. + - The parameter value will be treated as a string and converted to UTF-8 before sending it to + S3. + - To send binary data, use the I(content_base64) parameter instead. + - One of I(content), I(content_base64) or I(src) must be specified when I(mode=put) + otherwise ignored. + version_added: "1.3.0" + type: str + content_base64: + description: + - The base64-encoded binary data to C(put) into an object. + - Use this if you need to put raw binary data, and don't forget to encode in base64. + - One of I(content), I(content_base64) or I(src) must be specified when I(mode=put) + otherwise ignored. + version_added: "1.3.0" + type: str + ignore_nonexistent_bucket: + description: + - Overrides initial bucket lookups in case bucket or IAM policies are restrictive. + - This can be useful when a user may have the C(GetObject) permission but no other + permissions. In which case using I(mode=get) will fail unless + I(ignore_nonexistent_bucket=true) is specified. + type: bool + default: false + encryption_kms_key_id: + description: + - KMS key id to use when encrypting objects using I(encrypting=aws:kms). + - Ignored if I(encryption) is not C(aws:kms). + type: str + copy_src: + description: + - The source details of the object to copy. + - Required if I(mode=copy). + type: dict + version_added: 2.0.0 + suboptions: + bucket: + type: str + description: + - The name of the source bucket. + required: true + object: + type: str + description: + - key name of the source object. + required: true + version_id: + type: str + description: + - version ID of the source object. + validate_bucket_name: + description: + - Whether the bucket name should be validated to conform to AWS S3 naming rules. + - On by default, this may be disabled for S3 backends that do not enforce these rules. + - See the Amazon documentation for more information about bucket naming rules + U(https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). + type: bool + version_added: 3.1.0 + default: True +author: + - "Lester Wade (@lwade)" + - "Sloane Hertel (@s-hertel)" + - "Alina Buzachis (@alinabuzachis)" +notes: + - Support for I(tags) and I(purge_tags) was added in release 2.0.0. + - In release 5.0.0 the I(s3_url) parameter was merged into the I(endpoint_url) parameter, + I(s3_url) remains as an alias for I(endpoint_url). + - For Walrus I(endpoint_url) should be set to the FQDN of the endpoint with neither scheme nor path. + - Support for the C(S3_URL) environment variable has been + deprecated and will be removed in a release after 2024-12-01, please use the I(endpoint_url) parameter + or the C(AWS_URL) environment variable. +extends_documentation_fragment: + - amazon.aws.aws + - amazon.aws.ec2 + - amazon.aws.tags + - amazon.aws.boto3 +''' + +EXAMPLES = ''' +- name: Simple PUT operation + amazon.aws.s3_object: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + +- name: PUT operation from a rendered template + amazon.aws.s3_object: + bucket: mybucket + object: /object.yaml + content: "{{ lookup('template', 'templates/object.yaml.j2') }}" + mode: put + +- name: Simple PUT operation in Ceph RGW S3 + amazon.aws.s3_object: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + ceph: true + endpoint_url: "http://localhost:8000" + +- name: Simple GET operation + amazon.aws.s3_object: + bucket: mybucket + object: /my/desired/key.txt + dest: /usr/local/myfile.txt + mode: get + +- name: Get a specific version of an object. + amazon.aws.s3_object: + bucket: mybucket + object: /my/desired/key.txt + version: 48c9ee5131af7a716edc22df9772aa6f + dest: /usr/local/myfile.txt + mode: get + +- name: PUT/upload with metadata + amazon.aws.s3_object: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + metadata: 'Content-Encoding=gzip,Cache-Control=no-cache' + +- name: PUT/upload with custom headers + amazon.aws.s3_object: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + headers: 'x-amz-grant-full-control=emailAddress=owner@example.com' + +- name: List keys simple + amazon.aws.s3_object: + bucket: mybucket + mode: list + +- name: List keys all options + amazon.aws.s3_object: + bucket: mybucket + mode: list + prefix: /my/desired/ + marker: /my/desired/0023.txt + max_keys: 472 + +- name: Create an empty bucket + amazon.aws.s3_object: + bucket: mybucket + mode: create + permission: public-read + +- name: Create a bucket with key as directory, in the EU region + amazon.aws.s3_object: + bucket: mybucket + object: /my/directory/path + mode: create + region: eu-west-1 + +- name: Delete a bucket and all contents + amazon.aws.s3_object: + bucket: mybucket + mode: delete + +- name: GET an object but don't download if the file checksums match. New in 2.0 + amazon.aws.s3_object: + bucket: mybucket + object: /my/desired/key.txt + dest: /usr/local/myfile.txt + mode: get + overwrite: different + +- name: Delete an object from a bucket + amazon.aws.s3_object: + bucket: mybucket + object: /my/desired/key.txt + mode: delobj + +- name: Copy an object already stored in another bucket + amazon.aws.s3_object: + bucket: mybucket + object: /my/desired/key.txt + mode: copy + copy_src: + bucket: srcbucket + object: /source/key.txt +''' + +RETURN = ''' +msg: + description: Message indicating the status of the operation. + returned: always + type: str + sample: PUT operation complete +url: + description: URL of the object. + returned: (for put and geturl operations) + type: str + sample: https://my-bucket.s3.amazonaws.com/my-key.txt?AWSAccessKeyId=&Expires=1506888865&Signature= +expiry: + description: Number of seconds the presigned url is valid for. + returned: (for geturl operation) + type: int + sample: 600 +contents: + description: Contents of the object as string. + returned: (for getstr operation) + type: str + sample: "Hello, world!" +s3_keys: + description: List of object keys. + returned: (for list operation) + type: list + elements: str + sample: + - prefix1/ + - prefix1/key1 + - prefix1/key2 +''' + +import mimetypes +import os +import io +from ssl import SSLError +import base64 +import time + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.basic import to_text +from ansible.module_utils.basic import to_native +from ansible.module_utils.six.moves.urllib.parse import urlparse + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.s3 import HAS_MD5 +from ansible_collections.amazon.aws.plugins.module_utils.s3 import calculate_etag +from ansible_collections.amazon.aws.plugins.module_utils.s3 import calculate_etag_content +from ansible_collections.amazon.aws.plugins.module_utils.s3 import validate_bucket_name + +IGNORE_S3_DROP_IN_EXCEPTIONS = ['XNotImplemented', 'NotImplemented'] + + +class Sigv4Required(Exception): + pass + + +def key_check(module, s3, bucket, obj, version=None, validate=True): + try: + if version: + s3.head_object(Bucket=bucket, Key=obj, VersionId=version) + else: + s3.head_object(Bucket=bucket, Key=obj) + except is_boto3_error_code('404'): + return False + except is_boto3_error_code('403') as e: # pylint: disable=duplicate-except + if validate is True: + module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj) + + return True + + +def etag_compare(module, s3, bucket, obj, version=None, local_file=None, content=None): + s3_etag = get_etag(s3, bucket, obj, version=version) + if local_file is not None: + local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version) + else: + local_etag = calculate_etag_content(module, content, s3_etag, s3, bucket, obj, version) + + return s3_etag == local_etag + + +def get_etag(s3, bucket, obj, version=None): + try: + if version: + key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version) + else: + key_check = s3.head_object(Bucket=bucket, Key=obj) + if not key_check: + return None + return key_check['ETag'] + except is_boto3_error_code('404'): + return None + + +def get_s3_last_modified_timestamp(s3, bucket, obj, version=None): + if version: + key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version) + else: + key_check = s3.head_object(Bucket=bucket, Key=obj) + if not key_check: + return None + return key_check['LastModified'].timestamp() + + +def is_local_object_latest(module, s3, bucket, obj, version=None, local_file=None): + s3_last_modified = get_s3_last_modified_timestamp(s3, bucket, obj, version) + if os.path.exists(local_file) is False: + return False + else: + local_last_modified = os.path.getmtime(local_file) + + return s3_last_modified <= local_last_modified + + +def bucket_check(module, s3, bucket, validate=True): + exists = True + try: + s3.head_bucket(Bucket=bucket) + except is_boto3_error_code('404'): + return False + except is_boto3_error_code('403') as e: # pylint: disable=duplicate-except + if validate is True: + module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket) + except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Invalid endpoint provided") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket) + return exists + + +def create_bucket(module, s3, bucket, location=None): + module.deprecate('Support for creating S3 buckets using the s3_object module' + ' has been deprecated. Please use the ``s3_bucket`` module' + ' instead.', version='6.0.0', collection_name='amazon.aws') + if module.check_mode: + module.exit_json(msg="CREATE operation skipped - running in check mode", changed=True) + configuration = {} + if location not in ('us-east-1', None): + configuration['LocationConstraint'] = location + try: + if len(configuration) > 0: + s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration) + else: + s3.create_bucket(Bucket=bucket) + if module.params.get('permission'): + # Wait for the bucket to exist before setting ACLs + s3.get_waiter('bucket_exists').wait(Bucket=bucket) + for acl in module.params.get('permission'): + AWSRetry.jittered_backoff( + max_delay=120, catch_extra_error_codes=['NoSuchBucket'] + )(s3.put_bucket_acl)(ACL=acl, Bucket=bucket) + except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): + module.warn("PutBucketAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).") + + if bucket: + return True + + +def paginated_list(s3, **pagination_params): + pg = s3.get_paginator('list_objects_v2') + for page in pg.paginate(**pagination_params): + yield [data['Key'] for data in page.get('Contents', [])] + + +def paginated_versioned_list_with_fallback(s3, **pagination_params): + try: + versioned_pg = s3.get_paginator('list_object_versions') + for page in versioned_pg.paginate(**pagination_params): + delete_markers = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('DeleteMarkers', [])] + current_objects = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('Versions', [])] + yield delete_markers + current_objects + except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS + ['AccessDenied']): + for page in paginated_list(s3, **pagination_params): + yield [{'Key': data['Key']} for data in page] + + +def list_keys(module, s3, bucket, prefix, marker, max_keys): + pagination_params = {'Bucket': bucket} + for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)): + pagination_params[param_name] = param_value + try: + keys = sum(paginated_list(s3, **pagination_params), []) + module.exit_json(msg="LIST operation complete", s3_keys=keys) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed while listing the keys in the bucket {0}".format(bucket)) + + +def delete_bucket(module, s3, bucket): + module.deprecate('Support for deleting S3 buckets using the s3_object module' + ' has been deprecated. Please use the ``s3_bucket`` module' + ' instead.', version='6.0.0', collection_name='amazon.aws') + if module.check_mode: + module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True) + try: + exists = bucket_check(module, s3, bucket) + if exists is False: + return False + # if there are contents then we need to delete them before we can delete the bucket + for keys in paginated_versioned_list_with_fallback(s3, Bucket=bucket): + if keys: + s3.delete_objects(Bucket=bucket, Delete={'Objects': keys}) + s3.delete_bucket(Bucket=bucket) + return True + except is_boto3_error_code('NoSuchBucket'): + return False + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed while deleting bucket %s." % bucket) + + +def delete_key(module, s3, bucket, obj): + if module.check_mode: + module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True) + try: + s3.delete_object(Bucket=bucket, Key=obj) + module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed while trying to delete %s." % obj) + + +def create_dirkey(module, s3, bucket, obj, encrypt, expiry): + if module.check_mode: + module.exit_json(msg="PUT operation skipped - running in check mode", changed=True) + try: + params = {'Bucket': bucket, 'Key': obj, 'Body': b''} + if encrypt: + params['ServerSideEncryption'] = module.params['encryption_mode'] + if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': + params['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] + + s3.put_object(**params) + for acl in module.params.get('permission'): + s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj) + except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): + module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed while creating object %s." % obj) + + # Tags + tags, _changed = ensure_tags(s3, module, bucket, obj) + + try: + url = s3.generate_presigned_url(ClientMethod='put_object', + Params={'Bucket': bucket, 'Key': obj}, + ExpiresIn=expiry) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to generate presigned URL") + + url = put_download_url(module, s3, bucket, obj, expiry) + + module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket), url=url, tags=tags, changed=True) + + +def path_check(path): + if os.path.exists(path): + return True + else: + return False + + +def option_in_extra_args(option): + temp_option = option.replace('-', '').lower() + + allowed_extra_args = {'acl': 'ACL', 'cachecontrol': 'CacheControl', 'contentdisposition': 'ContentDisposition', + 'contentencoding': 'ContentEncoding', 'contentlanguage': 'ContentLanguage', + 'contenttype': 'ContentType', 'expires': 'Expires', 'grantfullcontrol': 'GrantFullControl', + 'grantread': 'GrantRead', 'grantreadacp': 'GrantReadACP', 'grantwriteacp': 'GrantWriteACP', + 'metadata': 'Metadata', 'requestpayer': 'RequestPayer', 'serversideencryption': 'ServerSideEncryption', + 'storageclass': 'StorageClass', 'ssecustomeralgorithm': 'SSECustomerAlgorithm', 'ssecustomerkey': 'SSECustomerKey', + 'ssecustomerkeymd5': 'SSECustomerKeyMD5', 'ssekmskeyid': 'SSEKMSKeyId', 'websiteredirectlocation': 'WebsiteRedirectLocation'} + + if temp_option in allowed_extra_args: + return allowed_extra_args[temp_option] + + +def upload_s3file(module, s3, bucket, obj, expiry, metadata, encrypt, headers, src=None, content=None, acl_disabled=False): + if module.check_mode: + module.exit_json(msg="PUT operation skipped - running in check mode", changed=True) + try: + extra = {} + if encrypt: + extra['ServerSideEncryption'] = module.params['encryption_mode'] + if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': + extra['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] + if metadata: + extra['Metadata'] = {} + + # determine object metadata and extra arguments + for option in metadata: + extra_args_option = option_in_extra_args(option) + if extra_args_option is not None: + extra[extra_args_option] = metadata[option] + else: + extra['Metadata'][option] = metadata[option] + + if module.params.get('permission'): + permissions = module.params['permission'] + if isinstance(permissions, str): + extra['ACL'] = permissions + elif isinstance(permissions, list): + extra['ACL'] = permissions[0] + + if 'ContentType' not in extra: + content_type = None + if src is not None: + content_type = mimetypes.guess_type(src)[0] + if content_type is None: + # s3 default content type + content_type = 'binary/octet-stream' + extra['ContentType'] = content_type + + if src is not None: + s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra) + else: + f = io.BytesIO(content) + s3.upload_fileobj(Fileobj=f, Bucket=bucket, Key=obj, ExtraArgs=extra) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to complete PUT operation.") + if not acl_disabled: + try: + for acl in module.params.get('permission'): + s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj) + except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): + module.warn("PutObjectAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Unable to set object ACL") + + # Tags + tags, _changed = ensure_tags(s3, module, bucket, obj) + + url = put_download_url(module, s3, bucket, obj, expiry) + + module.exit_json(msg="PUT operation complete", url=url, tags=tags, changed=True) + + +def download_s3file(module, s3, bucket, obj, dest, retries, version=None): + if module.check_mode: + module.exit_json(msg="GET operation skipped - running in check mode", changed=True) + # retries is the number of loops; range/xrange needs to be one + # more to get that count of loops. + try: + # Note: Something of a permissions related hack + # get_object returns the HEAD information, plus a *stream* which can be read. + # because the stream's dropped on the floor, we never pull the data and this is the + # functional equivalent of calling get_head which still relying on the 'GET' permission + if version: + s3.get_object(Bucket=bucket, Key=obj, VersionId=version) + else: + s3.get_object(Bucket=bucket, Key=obj) + except is_boto3_error_code(['404', '403']) as e: + # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but + # user does not have the s3:GetObject permission. 404 errors are handled by download_file(). + module.fail_json_aws(e, msg="Could not find the key %s." % obj) + except is_boto3_error_message('require AWS Signature Version 4'): # pylint: disable=duplicate-except + raise Sigv4Required() + except is_boto3_error_code('InvalidArgument') as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Could not find the key %s." % obj) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Could not find the key %s." % obj) + + optional_kwargs = {'ExtraArgs': {'VersionId': version}} if version else {} + for x in range(0, retries + 1): + try: + s3.download_file(bucket, obj, dest, **optional_kwargs) + module.exit_json(msg="GET operation complete", changed=True) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + # actually fail on last pass through the loop. + if x >= retries: + module.fail_json_aws(e, msg="Failed while downloading %s." % obj) + # otherwise, try again, this may be a transient timeout. + except SSLError as e: # will ClientError catch SSLError? + # actually fail on last pass through the loop. + if x >= retries: + module.fail_json_aws(e, msg="s3 download failed") + # otherwise, try again, this may be a transient timeout. + + +def download_s3str(module, s3, bucket, obj, version=None, validate=True): + if module.check_mode: + module.exit_json(msg="GET operation skipped - running in check mode", changed=True) + try: + if version: + contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read()) + else: + contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read()) + module.exit_json(msg="GET operation complete", contents=contents, changed=True) + except is_boto3_error_message('require AWS Signature Version 4'): + raise Sigv4Required() + except is_boto3_error_code('InvalidArgument') as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj) + + +def get_download_url(module, s3, bucket, obj, expiry, tags=None, changed=True): + try: + url = s3.generate_presigned_url(ClientMethod='get_object', + Params={'Bucket': bucket, 'Key': obj}, + ExpiresIn=expiry) + module.exit_json(msg="Download url:", url=url, tags=tags, expiry=expiry, changed=changed) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed while getting download url.") + + +def put_download_url(module, s3, bucket, obj, expiry): + try: + url = s3.generate_presigned_url(ClientMethod='put_object', + Params={'Bucket': bucket, 'Key': obj}, + ExpiresIn=expiry) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Unable to generate presigned URL") + return url + + +def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, d_etag): + if module.check_mode: + module.exit_json(msg="COPY operation skipped - running in check mode", changed=True) + try: + params = {'Bucket': bucket, 'Key': obj} + bucketsrc = {'Bucket': module.params['copy_src'].get('bucket'), 'Key': module.params['copy_src'].get('object')} + version = None + if module.params['copy_src'].get('version_id') is not None: + version = module.params['copy_src'].get('version_id') + bucketsrc.update({'VersionId': version}) + if not key_check(module, s3, bucketsrc['Bucket'], bucketsrc['Key'], version=version, validate=validate): + # Key does not exist in source bucket + module.exit_json(msg="Key %s does not exist in bucket %s." % (bucketsrc['Key'], bucketsrc['Bucket']), changed=False) + + s_etag = get_etag(s3, bucketsrc['Bucket'], bucketsrc['Key'], version=version) + if s_etag == d_etag: + # Tags + tags, changed = ensure_tags(s3, module, bucket, obj) + if not changed: + module.exit_json(msg="ETag from source and destination are the same", changed=False) + else: + module.exit_json(msg="tags successfully updated.", changed=changed, tags=tags) + else: + params.update({'CopySource': bucketsrc}) + if encrypt: + params['ServerSideEncryption'] = module.params['encryption_mode'] + if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': + params['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] + if metadata: + params['Metadata'] = {} + # determine object metadata and extra arguments + for option in metadata: + extra_args_option = option_in_extra_args(option) + if extra_args_option is not None: + params[extra_args_option] = metadata[option] + else: + params['Metadata'][option] = metadata[option] + s3.copy_object(**params) + for acl in module.params.get('permission'): + s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj) + # Tags + tags, changed = ensure_tags(s3, module, bucket, obj) + module.exit_json(msg="Object copied from bucket %s to bucket %s." % (bucketsrc['Bucket'], bucket), tags=tags, changed=True) + except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): + module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed while copying object %s from bucket %s." % (obj, module.params['copy_src'].get('Bucket'))) + + +def is_fakes3(endpoint_url): + """ Return True if endpoint_url has scheme fakes3:// """ + if endpoint_url is not None: + return urlparse(endpoint_url).scheme in ('fakes3', 'fakes3s') + else: + return False + + +def get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=False): + if ceph: # TODO - test this + ceph = urlparse(endpoint_url) + params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', + region=location, endpoint=endpoint_url, **aws_connect_kwargs) + elif is_fakes3(endpoint_url): + fakes3 = urlparse(endpoint_url) + port = fakes3.port + if fakes3.scheme == 'fakes3s': + protocol = "https" + if port is None: + port = 443 + else: + protocol = "http" + if port is None: + port = 80 + params = dict(module=module, conn_type='client', resource='s3', region=location, + endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), + use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) + else: + params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=endpoint_url, **aws_connect_kwargs) + if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms': + params['config'] = botocore.client.Config(signature_version='s3v4') + elif module.params['mode'] in ('get', 'getstr', 'geturl') and sig_4: + params['config'] = botocore.client.Config(signature_version='s3v4') + if module.params['dualstack']: + dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True}) + if 'config' in params: + params['config'] = params['config'].merge(dualconf) + else: + params['config'] = dualconf + return boto3_conn(**params) + + +def get_current_object_tags_dict(s3, bucket, obj, version=None): + try: + if version: + current_tags = s3.get_object_tagging(Bucket=bucket, Key=obj, VersionId=version).get('TagSet') + else: + current_tags = s3.get_object_tagging(Bucket=bucket, Key=obj).get('TagSet') + except is_boto3_error_code('NoSuchTagSet'): + return {} + except is_boto3_error_code('NoSuchTagSetError'): # pylint: disable=duplicate-except + return {} + + return boto3_tag_list_to_ansible_dict(current_tags) + + +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def put_object_tagging(s3, bucket, obj, tags): + s3.put_object_tagging(Bucket=bucket, Key=obj, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)}) + + +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +def delete_object_tagging(s3, bucket, obj): + s3.delete_object_tagging(Bucket=bucket, Key=obj) + + +def wait_tags_are_applied(module, s3, bucket, obj, expected_tags_dict, version=None): + for dummy in range(0, 12): + try: + current_tags_dict = get_current_object_tags_dict(s3, bucket, obj, version) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get object tags.") + if current_tags_dict != expected_tags_dict: + time.sleep(5) + else: + return current_tags_dict + + module.fail_json(msg="Object tags failed to apply in the expected time.", + requested_tags=expected_tags_dict, live_tags=current_tags_dict) + + +def ensure_tags(client, module, bucket, obj): + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + changed = False + + try: + current_tags_dict = get_current_object_tags_dict(client, bucket, obj) + except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): + module.warn("GetObjectTagging is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning.") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to get object tags.") + else: + if tags is not None: + if not purge_tags: + # Ensure existing tags that aren't updated by desired tags remain + current_copy = current_tags_dict.copy() + current_copy.update(tags) + tags = current_copy + if current_tags_dict != tags: + if tags: + try: + put_object_tagging(client, bucket, obj, tags) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to update object tags.") + else: + if purge_tags: + try: + delete_object_tagging(client, bucket, obj) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to delete object tags.") + current_tags_dict = wait_tags_are_applied(module, client, bucket, obj, tags) + changed = True + return current_tags_dict, changed + + +def main(): + # Beware: this module uses an action plugin (plugins/action/s3_object.py) + # so that src parameter can be either in 'files/' lookup path on the + # controller, *or* on the remote host that the task is executed on. + + argument_spec = dict( + bucket=dict(required=True), + dest=dict(default=None, type='path'), + encrypt=dict(default=True, type='bool'), + encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'), + expiry=dict(default=600, type='int', aliases=['expiration']), + headers=dict(type='dict'), + marker=dict(default=""), + max_keys=dict(default=1000, type='int', no_log=False), + metadata=dict(type='dict'), + mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list', 'copy'], required=True), + sig_v4=dict(default=True, type='bool'), + object=dict(), + permission=dict(type='list', elements='str', default=['private']), + version=dict(default=None), + overwrite=dict(aliases=['force'], default='different'), + prefix=dict(default=""), + retries=dict(aliases=['retry'], type='int', default=0), + dualstack=dict(default=False, type='bool'), + ceph=dict(default=False, type='bool', aliases=['rgw']), + src=dict(type='path'), + content=dict(), + content_base64=dict(), + ignore_nonexistent_bucket=dict(default=False, type='bool'), + encryption_kms_key_id=dict(), + tags=dict(type='dict', aliases=['resource_tags']), + purge_tags=dict(type='bool', default=True), + copy_src=dict(type='dict', options=dict(bucket=dict(required=True), object=dict(required=True), version_id=dict())), + validate_bucket_name=dict(type='bool', default=True), + ) + + required_if = [ + ['ceph', True, ['endpoint_url']], + ['mode', 'put', ['object']], + ['mode', 'get', ['dest', 'object']], + ['mode', 'getstr', ['object']], + ['mode', 'geturl', ['object']], + ['mode', 'copy', ['copy_src']], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + mutually_exclusive=[['content', 'content_base64', 'src']], + ) + + bucket = module.params.get('bucket') + encrypt = module.params.get('encrypt') + expiry = module.params.get('expiry') + dest = module.params.get('dest', '') + headers = module.params.get('headers') + marker = module.params.get('marker') + max_keys = module.params.get('max_keys') + metadata = module.params.get('metadata') + mode = module.params.get('mode') + obj = module.params.get('object') + version = module.params.get('version') + overwrite = module.params.get('overwrite') + sig_v4 = module.params.get('sig_v4') + prefix = module.params.get('prefix') + retries = module.params.get('retries') + endpoint_url = module.params.get('endpoint_url') + dualstack = module.params.get('dualstack') + ceph = module.params.get('ceph') + src = module.params.get('src') + content = module.params.get('content') + content_base64 = module.params.get('content_base64') + ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket') + + object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"] + bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"] + + if module.params.get('validate_bucket_name'): + validate_bucket_name(module, bucket) + + if overwrite not in ['always', 'never', 'different', 'latest']: + if module.boolean(overwrite): + overwrite = 'always' + else: + overwrite = 'never' + + if overwrite == 'different' and not HAS_MD5: + module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support') + + region, _ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + + if region in ('us-east-1', '', None): + # default to US Standard region + location = 'us-east-1' + else: + # Boto uses symbolic names for locations but region strings will + # actually work fine for everything except us-east-1 (US Standard) + location = region + + if module.params.get('object'): + obj = module.params['object'] + # If there is a top level object, do nothing - if the object starts with / + # remove the leading character to maintain compatibility with Ansible versions < 2.4 + if obj.startswith('/'): + obj = obj[1:] + + # Bucket deletion does not require obj. Prevents ambiguity with delobj. + if obj and mode == "delete": + module.fail_json(msg='Parameter obj cannot be used with mode=delete') + + # allow eucarc environment variables to be used if ansible vars aren't set + if not endpoint_url and 'S3_URL' in os.environ: + endpoint_url = os.environ['S3_URL'] + module.deprecate( + "Support for the 'S3_URL' environment variable has been " + "deprecated. We recommend using the 'endpoint_url' module " + "parameter. Alternatively, the 'AWS_URL' environment variable can " + "be used instead.", + date='2024-12-01', collection_name='amazon.aws', + ) + + if dualstack and endpoint_url is not None and 'amazonaws.com' not in endpoint_url: + module.fail_json(msg='dualstack only applies to AWS S3') + + # Look at endpoint_url and tweak connection settings + # if connecting to RGW, Walrus or fakes3 + if endpoint_url: + for key in ['validate_certs', 'security_token', 'profile_name']: + aws_connect_kwargs.pop(key, None) + s3 = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_v4) + + validate = not ignore_nonexistent_bucket + + # check if bucket exists, if yes, check if ACL is disabled + acl_disabled = False + exists = bucket_check(module, s3, bucket) + if exists: + try: + ownership_controls = s3.get_bucket_ownership_controls(Bucket=bucket)['OwnershipControls'] + if ownership_controls.get('Rules'): + object_ownership = ownership_controls['Rules'][0]['ObjectOwnership'] + if object_ownership == 'BucketOwnerEnforced': + acl_disabled = True + # if bucket ownership controls are not found + except botocore.exceptions.ClientError: + pass + + # separate types of ACLs + if not acl_disabled: + bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl] + object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl] + error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl] + if error_acl: + module.fail_json(msg='Unknown permission specified: %s' % error_acl) + + # First, we check to see if the bucket exists, we get "bucket" returned. + bucketrtn = bucket_check(module, s3, bucket, validate=validate) + + if validate and mode not in ('create', 'put', 'delete', 'copy') and not bucketrtn: + module.fail_json(msg="Source bucket cannot be found.") + + if mode == 'get': + keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) + if keyrtn is False: + if version: + module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) + else: + module.fail_json(msg="Key %s does not exist." % obj) + + if dest and path_check(dest) and overwrite != 'always': + if overwrite == 'never': + module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False) + if overwrite == 'different' and etag_compare(module, s3, bucket, obj, version=version, local_file=dest): + module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False) + if overwrite == 'latest' and is_local_object_latest(module, s3, bucket, obj, version=version, local_file=dest): + module.exit_json(msg="Local object is latest, ignoreing. Use overwrite=always parameter to force.", changed=False) + + try: + download_s3file(module, s3, bucket, obj, dest, retries, version=version) + except Sigv4Required: + s3 = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=True) + download_s3file(module, s3, bucket, obj, dest, retries, version=version) + + if mode == 'put': + + # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified + # these were separated into the variables bucket_acl and object_acl above + + if content is None and content_base64 is None and src is None: + module.fail_json(msg='Either content, content_base64 or src must be specified for PUT operations') + if src is not None and not path_check(src): + module.fail_json(msg='Local object "%s" does not exist for PUT operation' % (src)) + + keyrtn = None + if bucketrtn: + keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) + else: + # If the bucket doesn't exist we should create it. + # only use valid bucket acls for create_bucket function + module.params['permission'] = bucket_acl + create_bucket(module, s3, bucket, location) + + # the content will be uploaded as a byte string, so we must encode it first + bincontent = None + if content is not None: + bincontent = content.encode('utf-8') + if content_base64 is not None: + bincontent = base64.standard_b64decode(content_base64) + + if keyrtn and overwrite != 'always': + if overwrite == 'never' or etag_compare(module, s3, bucket, obj, version=version, local_file=src, content=bincontent): + # Return the download URL for the existing object and ensure tags are updated + tags, tags_update = ensure_tags(s3, module, bucket, obj) + get_download_url(module, s3, bucket, obj, expiry, tags, changed=tags_update) + + # only use valid object acls for the upload_s3file function + if not acl_disabled: + module.params['permission'] = object_acl + upload_s3file(module, s3, bucket, obj, expiry, metadata, encrypt, headers, src=src, content=bincontent, acl_disabled=acl_disabled) + + # Delete an object from a bucket, not the entire bucket + if mode == 'delobj': + if obj is None: + module.fail_json(msg="object parameter is required") + if bucket: + deletertn = delete_key(module, s3, bucket, obj) + if deletertn is True: + module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True) + else: + module.fail_json(msg="Bucket parameter is required.") + + # Delete an entire bucket, including all objects in the bucket + if mode == 'delete': + if bucket: + deletertn = delete_bucket(module, s3, bucket) + if deletertn is True: + module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True) + else: + module.fail_json(msg="Bucket parameter is required.") + + # Support for listing a set of keys + if mode == 'list': + + # If the bucket does not exist then bail out + if not bucketrtn: + module.fail_json(msg="Target bucket (%s) cannot be found" % bucket) + + list_keys(module, s3, bucket, prefix, marker, max_keys) + + # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. + # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. + if mode == 'create': + + # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified + # these were separated above into the variables bucket_acl and object_acl + + if bucket and not obj: + if bucketrtn: + module.exit_json(msg="Bucket already exists.", changed=False) + else: + # only use valid bucket acls when creating the bucket + module.params['permission'] = bucket_acl + module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location)) + if bucket and obj: + if obj.endswith('/'): + dirobj = obj + else: + dirobj = obj + "/" + if bucketrtn: + if key_check(module, s3, bucket, dirobj): + module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False) + else: + # setting valid object acls for the create_dirkey function + module.params['permission'] = object_acl + create_dirkey(module, s3, bucket, dirobj, encrypt, expiry) + else: + # only use valid bucket acls for the create_bucket function + module.params['permission'] = bucket_acl + create_bucket(module, s3, bucket, location) + # only use valid object acls for the create_dirkey function + module.params['permission'] = object_acl + create_dirkey(module, s3, bucket, dirobj, encrypt, expiry) + + # Support for grabbing the time-expired URL for an object in S3/Walrus. + if mode == 'geturl': + if not bucket and not obj: + module.fail_json(msg="Bucket and Object parameters must be set") + + keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) + if keyrtn: + tags = get_current_object_tags_dict(s3, bucket, obj, version=version) + get_download_url(module, s3, bucket, obj, expiry, tags) + else: + module.fail_json(msg="Key %s does not exist." % obj) + + if mode == 'getstr': + if bucket and obj: + keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) + if keyrtn: + try: + download_s3str(module, s3, bucket, obj, version=version) + except Sigv4Required: + s3 = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=True) + download_s3str(module, s3, bucket, obj, version=version) + elif version is not None: + module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) + else: + module.fail_json(msg="Key %s does not exist." % obj) + + if mode == 'copy': + # if copying an object in a bucket yet to be created, acls for the bucket and/or the object may be specified + # these were separated into the variables bucket_acl and object_acl above + d_etag = None + if bucketrtn: + d_etag = get_etag(s3, bucket, obj) + else: + # If the bucket doesn't exist we should create it. + # only use valid bucket acls for create_bucket function + module.params['permission'] = bucket_acl + create_bucket(module, s3, bucket, location) + # only use valid object acls for the copy operation + module.params['permission'] = object_acl + copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, d_etag) + + module.exit_json(failed=False) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py b/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py new file mode 100644 index 000000000..88e66dc4f --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py @@ -0,0 +1,818 @@ +#!/usr/bin/python +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://wwww.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: s3_object_info +version_added: 5.0.0 +short_description: Gather information about objects in S3 +description: + - Describes objects in S3. + - Compatible with AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID (only supports list_keys currently). + - When using non-AWS services, I(endpoint_url) should be specified. +author: + - Mandar Vijay Kulkarni (@mandar242) +options: + bucket_name: + description: + - The name of the bucket that contains the object. + required: true + type: str + object_name: + description: + - The name of the object. + - If not specified, a list of all objects in the specified bucket will be returned. + required: false + type: str + endpoint_url: + description: + - S3 URL endpoint for usage with Ceph, Eucalyptus and fakes3 etc. Otherwise assumes AWS. + type: str + dualstack: + description: + - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6. + type: bool + default: false + ceph: + description: + - Enable API compatibility with Ceph RGW. + - It takes into account the S3 API subset working with Ceph in order to provide the same module + behaviour where possible. + - Requires I(endpoint_url) if I(ceph=true). + aliases: ['rgw'] + default: false + type: bool + object_details: + description: + - Retrieve requested S3 object detailed information. + required: false + type: dict + suboptions: + object_acl: + description: + - Retreive S3 object ACL. + required: false + type: bool + default: false + object_legal_hold: + description: + - Retreive S3 object legal_hold. + required: false + type: bool + default: false + object_lock_configuration: + description: + - Retreive S3 object lock_configuration. + required: false + type: bool + default: false + object_retention: + description: + - Retreive S3 object retention. + required: false + type: bool + default: false + object_tagging: + description: + - Retreive S3 object Tags. + required: false + type: bool + default: false + object_attributes: + description: + - Retreive S3 object attributes. + - Requires minimum botocore version 1.24.7. + required: false + type: bool + default: false + attributes_list: + description: + - The fields/details that should be returned. + - Required when I(object_attributes) is C(true) in I(object_details). + type: list + elements: str + choices: ['ETag', 'Checksum', 'ObjectParts', 'StorageClass', 'ObjectSize'] +notes: + - Support for the C(S3_URL) environment variable has been + deprecated and will be removed in a release after 2024-12-01, please use the I(endpoint_url) parameter + or the C(AWS_URL) environment variable. +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +- amazon.aws.boto3 + +''' + +EXAMPLES = r''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Retrieve a list of objects in S3 bucket + amazon.aws.s3_object_info: + bucket_name: MyTestBucket + +- name: Retrieve a list of objects in Ceph RGW S3 + amazon.aws.s3_object_info: + bucket_name: MyTestBucket + ceph: true + endpoint_url: "http://localhost:8000" + +- name: Retrieve object metadata without object itself + amazon.aws.s3_object_info: + bucket_name: MyTestBucket + object_name: MyTestObjectKey + +- name: Retrieve detailed S3 information for all objects in the bucket + amazon.aws.s3_object_info: + bucket_name: MyTestBucket + object_details: + object_acl: true + object_attributes: true + attributes_list: + - ETag + - ObjectSize + - StorageClass + +- name: Retrieve detailed S3 object information + amazon.aws.s3_object_info: + bucket_name: MyTestBucket + object_name: MyTestObjectKey + object_details: + object_acl: true + object_tagging: true + object_legal_hold: true + object_attributes: true + attributes_list: + - ETag + - ObjectSize + +''' + +RETURN = r''' +s3_keys: + description: List of object keys. + returned: when only I(bucket_name) is specified and I(object_name), I(object_details) are not specified. + type: list + elements: str + sample: + - prefix1/ + - prefix1/key1 + - prefix1/key2 +object_info: + description: S3 object details. + returned: when I(bucket_name) and I(object_name) are specified. + type: list + elements: dict + contains: + object_data: + description: A dict containing the metadata of S3 object. + returned: when I(bucket_name) and I(object_name) are specified but I(object_details) is not specified. + type: dict + elements: str + contains: + accept_ranges: + description: Indicates that a range of bytes was specified. + returned: always + type: str + content_length: + description: Size of the body (object data) in bytes. + returned: always + type: int + content_type: + description: A standard MIME type describing the format of the object data. + returned: always + type: str + e_tag: + description: A opaque identifier assigned by a web server to a specific version of a resource found at a URL. + returned: always + type: str + last_modified: + description: Creation date of the object. + returned: always + type: str + metadata: + description: A map of metadata to store with the object in S3. + returned: always + type: dict + server_side_encryption: + description: The server-side encryption algorithm used when storing this object in Amazon S3. + returned: always + type: str + tag_count: + description: The number of tags, if any, on the object. + returned: always + type: int + object_acl: + description: Access control list (ACL) of an object. + returned: when I(object_acl) is set to I(true). + type: complex + contains: + owner: + description: Bucket owner's display ID and name. + returned: always + type: complex + contains: + id: + description: Bucket owner's ID. + returned: always + type: str + sample: "xxxxxxxxxxxxxxxxxxxxx" + display_name: + description: Bucket owner's display name. + returned: always + type: str + sample: 'abcd' + grants: + description: A list of grants. + returned: always + type: complex + contains: + grantee: + description: The entity being granted permissions. + returned: always + type: complex + contains: + id: + description: The canonical user ID of the grantee. + returned: always + type: str + sample: "xxxxxxxxxxxxxxxxxxx" + type: + description: type of grantee. + returned: always + type: str + sample: "CanonicalUser" + permission: + description: Specifies the permission given to the grantee. + returned: always + type: str + sample: "FULL CONTROL" + object_legal_hold: + description: Object's current legal hold status + returned: when I(object_legal_hold) is set to I(true) and object legal hold is set on the bucket. + type: complex + contains: + legal_hold: + description: The current legal hold status for the specified object. + returned: always + type: complex + contains: + status: + description: Indicates whether the specified object has a legal hold in place. + returned: always + type: str + sample: "ON" + object_lock_configuration: + description: Object Lock configuration for a bucket. + returned: when I(object_lock_configuration) is set to I(true) and object lock configuration is set on the bucket. + type: complex + contains: + object_lock_enabled: + description: Indicates whether this bucket has an Object Lock configuration enabled. + returned: always + type: str + rule: + description: Specifies the Object Lock rule for the specified object. + returned: always + type: complex + contains: + default_retention: + description: The default Object Lock retention mode and period that you want to apply to new objects placed in the specified bucket. + returned: always + type: complex + contains: + mode: + description: + - The default Object Lock retention mode you want to apply to new objects placed in the specified bucket. + - Must be used with either Days or Years. + returned: always + type: str + days: + description: The number of days that you want to specify for the default retention period. + returned: always + type: int + years: + description: The number of years that you want to specify for the default retention period. + returned: always + type: int + object_retention: + description: Object's retention settings. + returned: when I(object_retention) is set to I(true) and object retention is set on the bucket. + type: complex + contains: + retention: + description: The container element for an object's retention settings. + returned: always + type: complex + contains: + mode: + description: Indicates the Retention mode for the specified object. + returned: always + type: str + retain_until_date: + description: The date on which this Object Lock Retention will expire. + returned: always + type: str + object_tagging: + description: The tag-set of an object + returned: when I(object_tagging) is set to I(true). + type: dict + object_attributes: + description: Object attributes. + returned: when I(object_attributes) is set to I(true). + type: complex + contains: + etag: + description: An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL. + returned: always + type: str + sample: "8fa34xxxxxxxxxxxxxxxxxxxxx35c6f3b" + last_modified: + description: The creation date of the object. + returned: always + type: str + sample: "2022-08-10T01:11:03+00:00" + object_size: + description: The size of the object in bytes. + returned: alwayS + type: int + sample: 819 + checksum: + description: The checksum or digest of the object. + returned: always + type: complex + contains: + checksum_crc32: + description: The base64-encoded, 32-bit CRC32 checksum of the object. + returned: if it was upload with the object. + type: str + sample: "xxxxxxxxxxxx" + checksum_crc32c: + description: The base64-encoded, 32-bit CRC32C checksum of the object. + returned: if it was upload with the object. + type: str + sample: "xxxxxxxxxxxx" + checksum_sha1: + description: The base64-encoded, 160-bit SHA-1 digest of the object. + returned: if it was upload with the object. + type: str + sample: "xxxxxxxxxxxx" + checksum_sha256: + description: The base64-encoded, 256-bit SHA-256 digest of the object. + returned: if it was upload with the object. + type: str + sample: "xxxxxxxxxxxx" + object_parts: + description: A collection of parts associated with a multipart upload. + returned: always + type: complex + contains: + total_parts_count: + description: The total number of parts. + returned: always + type: int + part_number_marker: + description: The marker for the current part. + returned: always + type: int + next_part_number_marker: + description: + - When a list is truncated, this element specifies the last part in the list + - As well as the value to use for the PartNumberMarker request parameter in a subsequent request. + returned: always + type: int + max_parts: + description: The maximum number of parts allowed in the response. + returned: always + type: int + is_truncated: + description: Indicates whether the returned list of parts is truncated. + returned: always + type: bool + storage_class: + description: The storage class information of the object. + returned: always + type: str + sample: "STANDARD" + parts: + description: A container for elements related to an individual part. + returned: always + type: complex + contains: + part_number: + description: The part number identifying the part. This value is a positive integer between 1 and 10,000. + returned: always + type: int + size: + description: The size of the uploaded part in bytes. + returned: always + type: int + checksum_crc32: + description: The base64-encoded, 32-bit CRC32 checksum of the object. + returned: if it was upload with the object. + type: str + sample: "xxxxxxxxxxxx" + checksum_crc32c: + description: The base64-encoded, 32-bit CRC32C checksum of the object. + returned: if it was upload with the object. + type: str + sample: "xxxxxxxxxxxx" + checksum_sha1: + description: The base64-encoded, 160-bit SHA-1 digest of the object. + returned: if it was upload with the object. + type: str + sample: "xxxxxxxxxxxx" + checksum_sha256: + description: The base64-encoded, 256-bit SHA-256 digest of the object. + returned: if it was upload with the object. + type: str + sample: "xxxxxxxxxxxx" +''' + +import os + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.basic import to_text +from ansible.module_utils.six.moves.urllib.parse import urlparse + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn + + +def describe_s3_object_acl(connection, bucket_name, object_name): + params = {} + params['Bucket'] = bucket_name + params['Key'] = object_name + + object_acl_info = {} + + try: + object_acl_info = connection.get_object_acl(**params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + pass + + if len(object_acl_info) != 0: + # Remove ResponseMetadata from object_acl_info, convert to snake_case + del object_acl_info['ResponseMetadata'] + object_acl_info = camel_dict_to_snake_dict(object_acl_info) + + return object_acl_info + + +def describe_s3_object_attributes(connection, module, bucket_name, object_name): + params = {} + params['Bucket'] = bucket_name + params['Key'] = object_name + params['ObjectAttributes'] = module.params.get('object_details')['attributes_list'] + + object_attributes_info = {} + + try: + object_attributes_info = connection.get_object_attributes(**params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + object_attributes_info['msg'] = 'Object attributes not found' + + if len(object_attributes_info) != 0 and 'msg' not in object_attributes_info.keys(): + # Remove ResponseMetadata from object_attributes_info, convert to snake_case + del object_attributes_info['ResponseMetadata'] + object_attributes_info = camel_dict_to_snake_dict(object_attributes_info) + + return object_attributes_info + + +def describe_s3_object_legal_hold(connection, bucket_name, object_name): + params = {} + params['Bucket'] = bucket_name + params['Key'] = object_name + + object_legal_hold_info = {} + + try: + object_legal_hold_info = connection.get_object_legal_hold(**params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + pass + + if len(object_legal_hold_info) != 0: + # Remove ResponseMetadata from object_legal_hold_info, convert to snake_case + del object_legal_hold_info['ResponseMetadata'] + object_legal_hold_info = camel_dict_to_snake_dict(object_legal_hold_info) + + return object_legal_hold_info + + +def describe_s3_object_lock_configuration(connection, bucket_name): + params = {} + params['Bucket'] = bucket_name + + object_legal_lock_configuration_info = {} + + try: + object_legal_lock_configuration_info = connection.get_object_lock_configuration(**params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + pass + + if len(object_legal_lock_configuration_info) != 0: + # Remove ResponseMetadata from object_legal_lock_configuration_info, convert to snake_case + del object_legal_lock_configuration_info['ResponseMetadata'] + object_legal_lock_configuration_info = camel_dict_to_snake_dict(object_legal_lock_configuration_info) + + return object_legal_lock_configuration_info + + +def describe_s3_object_retention(connection, bucket_name, object_name): + params = {} + params['Bucket'] = bucket_name + params['Key'] = object_name + + object_retention_info = {} + + try: + object_retention_info = connection.get_object_retention(**params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + pass + + if len(object_retention_info) != 0: + # Remove ResponseMetadata from object_retention_info, convert to snake_case + del object_retention_info['ResponseMetadata'] + object_retention_info = camel_dict_to_snake_dict(object_retention_info) + + return object_retention_info + + +def describe_s3_object_tagging(connection, bucket_name, object_name): + params = {} + params['Bucket'] = bucket_name + params['Key'] = object_name + + object_tagging_info = {} + + try: + object_tagging_info = connection.get_object_tagging(**params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + pass + + if len(object_tagging_info) != 0: + # Remove ResponseMetadata from object_tagging_info, convert to snake_case + del object_tagging_info['ResponseMetadata'] + object_tagging_info = boto3_tag_list_to_ansible_dict(object_tagging_info['TagSet']) + + return object_tagging_info + + +def get_object_details(connection, module, bucket_name, object_name, requested_facts): + + all_facts = {} + + # Remove non-requested facts + requested_facts = {fact: value for fact, value in requested_facts.items() if value is True} + + all_facts['object_data'] = get_object(connection, bucket_name, object_name)['object_data'] + + # Below APIs do not return object_name, need to add it manually + all_facts['object_name'] = object_name + + for key in requested_facts: + if key == 'object_acl': + all_facts[key] = {} + all_facts[key] = describe_s3_object_acl(connection, bucket_name, object_name) + elif key == 'object_attributes': + all_facts[key] = {} + all_facts[key] = describe_s3_object_attributes(connection, module, bucket_name, object_name) + elif key == 'object_legal_hold': + all_facts[key] = {} + all_facts[key] = describe_s3_object_legal_hold(connection, bucket_name, object_name) + elif key == 'object_lock_configuration': + all_facts[key] = {} + all_facts[key] = describe_s3_object_lock_configuration(connection, bucket_name) + elif key == 'object_retention': + all_facts[key] = {} + all_facts[key] = describe_s3_object_retention(connection, bucket_name, object_name) + elif key == 'object_tagging': + all_facts[key] = {} + all_facts[key] = describe_s3_object_tagging(connection, bucket_name, object_name) + + return all_facts + + +def get_object(connection, bucket_name, object_name): + params = {} + params['Bucket'] = bucket_name + params['Key'] = object_name + + result = {} + object_info = {} + + try: + object_info = connection.head_object(**params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + pass + + if len(object_info) != 0: + # Remove ResponseMetadata from object_info, convert to snake_case + del object_info['ResponseMetadata'] + object_info = camel_dict_to_snake_dict(object_info) + + result['object_data'] = object_info + + return result + + +@AWSRetry.jittered_backoff(retries=10) +def _list_bucket_objects(connection, **params): + paginator = connection.get_paginator('list_objects') + return paginator.paginate(**params).build_full_result() + + +def list_bucket_objects(connection, module, bucket_name): + params = {} + params['Bucket'] = bucket_name + + result = [] + list_objects_response = {} + + try: + list_objects_response = _list_bucket_objects(connection, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to list bucket objects.') + + if len(list_objects_response) != 0: + # convert to snake_case + for response_list_item in list_objects_response['Contents']: + result.append(response_list_item['Key']) + + return result + + +def bucket_check(connection, module, bucket_name,): + try: + connection.head_bucket(Bucket=bucket_name) + except is_boto3_error_code(['404', '403']) as e: + module.fail_json_aws(e, msg="The bucket %s does not exist or is missing access permissions." % bucket_name) + + +def object_check(connection, module, bucket_name, object_name): + try: + connection.head_object(Bucket=bucket_name, Key=object_name) + except is_boto3_error_code(['404', '403']) as e: + module.fail_json_aws(e, msg="The object %s does not exist or is missing access permissions." % object_name) + + +# To get S3 connection, in case of dealing with ceph, dualstack, etc. +def is_fakes3(endpoint_url): + """ Return True if endpoint_url has scheme fakes3:// """ + if endpoint_url is not None: + return urlparse(endpoint_url).scheme in ('fakes3', 'fakes3s') + else: + return False + + +def get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=False): + if ceph: # TODO - test this + ceph = urlparse(endpoint_url) + params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', + region=location, endpoint=endpoint_url, **aws_connect_kwargs) + elif is_fakes3(endpoint_url): + fakes3 = urlparse(endpoint_url) + port = fakes3.port + if fakes3.scheme == 'fakes3s': + protocol = "https" + if port is None: + port = 443 + else: + protocol = "http" + if port is None: + port = 80 + params = dict(module=module, conn_type='client', resource='s3', region=location, + endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), + use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) + else: + params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=endpoint_url, **aws_connect_kwargs) + if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms': + params['config'] = botocore.client.Config(signature_version='s3v4') + elif module.params['mode'] in ('get', 'getstr') and sig_4: + params['config'] = botocore.client.Config(signature_version='s3v4') + if module.params['dualstack']: + dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True}) + if 'config' in params: + params['config'] = params['config'].merge(dualconf) + else: + params['config'] = dualconf + return boto3_conn(**params) + + +def main(): + + argument_spec = dict( + object_details=dict(type='dict', options=dict( + object_acl=dict(type='bool', default=False), + object_legal_hold=dict(type='bool', default=False), + object_lock_configuration=dict(type='bool', default=False), + object_retention=dict(type='bool', default=False), + object_tagging=dict(type='bool', default=False), + object_attributes=dict(type='bool', default=False), + attributes_list=dict(type='list', elements='str', choices=['ETag', 'Checksum', 'ObjectParts', 'StorageClass', 'ObjectSize'])), + required_if=[ + ("object_attributes", True, ["attributes_list"]), + ] + ), + bucket_name=dict(required=True, type='str'), + object_name=dict(type='str'), + dualstack=dict(default='no', type='bool'), + ceph=dict(default=False, type='bool', aliases=['rgw']), + ) + + required_if = [ + ['ceph', True, ['endpoint_url']], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + ) + + bucket_name = module.params.get('bucket_name') + object_name = module.params.get('object_name') + requested_object_details = module.params.get('object_details') + endpoint_url = module.params.get('endpoint_url') + dualstack = module.params.get('dualstack') + ceph = module.params.get('ceph') + + if not endpoint_url and 'S3_URL' in os.environ: + endpoint_url = os.environ['S3_URL'] + module.deprecate( + "Support for the 'S3_URL' environment variable has been " + "deprecated. We recommend using the 'endpoint_url' module " + "parameter. Alternatively, the 'AWS_URL' environment variable can " + "be used instead.", + date='2024-12-01', collection_name='amazon.aws', + ) + + if dualstack and endpoint_url is not None and 'amazonaws.com' not in endpoint_url: + module.fail_json(msg='dualstack only applies to AWS S3') + + result = [] + + if endpoint_url: + region, _ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if region in ('us-east-1', '', None): + # default to US Standard region + location = 'us-east-1' + else: + # Boto uses symbolic names for locations but region strings will + # actually work fine for everything except us-east-1 (US Standard) + location = region + for key in ['validate_certs', 'security_token', 'profile_name']: + aws_connect_kwargs.pop(key, None) + connection = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url) + else: + try: + connection = module.client('s3', retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed to connect to AWS') + + # check if specified bucket exists + bucket_check(connection, module, bucket_name) + # check if specified object exists + if object_name: + object_check(connection, module, bucket_name, object_name) + + if requested_object_details and requested_object_details['object_attributes']: + module.require_botocore_at_least('1.24.7', reason='required for s3.get_object_attributes') + + if requested_object_details: + if object_name: + object_details = get_object_details(connection, module, bucket_name, object_name, requested_object_details) + result.append(object_details) + elif object_name is None: + object_list = list_bucket_objects(connection, module, bucket_name) + for object in object_list: + result.append(get_object_details(connection, module, bucket_name, object, requested_object_details)) + + elif not requested_object_details and object_name: + # if specific details are not requested, return object metadata + object_details = get_object(connection, bucket_name, object_name) + result.append(object_details) + else: + # return list of all objects in a bucket if object name and object details not specified + object_list = list_bucket_objects(connection, module, bucket_name) + module.exit_json(s3_keys=object_list) + + module.exit_json(object_info=result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/requirements.txt b/ansible_collections/amazon/aws/requirements.txt new file mode 100644 index 000000000..0a1981f46 --- /dev/null +++ b/ansible_collections/amazon/aws/requirements.txt @@ -0,0 +1,6 @@ +# When updating the minimal requirements please also update +# - tests/unit/constraints.txt +# - tests/integration/constraints.txt +# - tests/integration/targets/setup_botocore_pip +botocore>=1.21.0 +boto3>=1.18.0 diff --git a/ansible_collections/amazon/aws/test-requirements.txt b/ansible_collections/amazon/aws/test-requirements.txt new file mode 100644 index 000000000..7d12621a1 --- /dev/null +++ b/ansible_collections/amazon/aws/test-requirements.txt @@ -0,0 +1,17 @@ +botocore +boto3 + +coverage==4.5.4 +placebo +mock +pytest +pytest-forked +pytest-mock +pytest-xdist + +# Needed for ansible.utils.ipaddr in tests +netaddr +# Sometimes needed where we don't have features we need in modules +awscli +# Used for comparing SSH Public keys to the Amazon fingerprints +pycrypto diff --git a/ansible_collections/amazon/aws/tests/.gitignore b/ansible_collections/amazon/aws/tests/.gitignore new file mode 100644 index 000000000..771252a75 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/.gitignore @@ -0,0 +1,3 @@ +output/ +integration/cloud-config-*.ini +integration/cloud-config-*.yml diff --git a/ansible_collections/amazon/aws/tests/config.yml b/ansible_collections/amazon/aws/tests/config.yml new file mode 100644 index 000000000..5112f7268 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/config.yml @@ -0,0 +1,2 @@ +modules: + python_requires: '>=3.6' diff --git a/ansible_collections/amazon/aws/tests/integration/constraints.txt b/ansible_collections/amazon/aws/tests/integration/constraints.txt new file mode 100644 index 000000000..cd546e7c2 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/constraints.txt @@ -0,0 +1,7 @@ +# Specifically run tests against the oldest versions that we support +boto3==1.18.0 +botocore==1.21.0 + +# AWS CLI has `botocore==` dependencies, provide the one that matches botocore +# to avoid needing to download over a years worth of awscli wheels. +awscli==1.20.0 diff --git a/ansible_collections/amazon/aws/tests/integration/inventory b/ansible_collections/amazon/aws/tests/integration/inventory new file mode 100644 index 000000000..c6f18066e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/inventory @@ -0,0 +1,2 @@ +[testgroup] +testhost ansible_connection="local" ansible_pipelining="yes" ansible_python_interpreter="/home/matthew/.pyenv/versions/3.10.1/bin/python3.10" diff --git a/ansible_collections/amazon/aws/tests/integration/requirements.txt b/ansible_collections/amazon/aws/tests/integration/requirements.txt new file mode 100644 index 000000000..de670082f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/requirements.txt @@ -0,0 +1,11 @@ +# Our code is based on the AWS SDKs +boto3 +botocore + +# netaddr is needed for ansible.utils.ipv6 +netaddr +virtualenv +# Sometimes needed where we don't have features we need in modules +awscli +# Used for comparing SSH Public keys to the Amazon fingerprints +pycrypto diff --git a/ansible_collections/amazon/aws/tests/integration/requirements.yml b/ansible_collections/amazon/aws/tests/integration/requirements.yml new file mode 100644 index 000000000..df4d6171d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/requirements.yml @@ -0,0 +1,4 @@ +--- +collections: +- ansible.windows +- ansible.utils # ipv6 filter diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/aliases b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/aliases new file mode 100644 index 000000000..5619cbdc8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/aliases @@ -0,0 +1,7 @@ +# reason: slow +# Tests take around 30 minutes + +slow +cloud/aws + +autoscaling_group_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/inventory b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/inventory new file mode 100644 index 000000000..edc19ef5f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/inventory @@ -0,0 +1,8 @@ +[tests] +create_update_delete +tag_operations +instance_detach + +[all:vars] +ansible_connection=local +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/main.yml new file mode 100644 index 000000000..d2479e44f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/main.yml @@ -0,0 +1,35 @@ +# Beware: most of our tests here are run in parallel. +# To add new tests you'll need to add a new host to the inventory and a matching +# '{{ inventory_hostname }}'.yml file in roles/ec2_asg/tasks/ + + +# Prepare the VPC and figure out which AMI to use +- hosts: all + gather_facts: no + tasks: + - module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + - include_role: + name: setup_ec2_facts + - include_role: + name: ec2_asg + tasks_from: env_setup.yml + rescue: + - include_role: + name: ec2_asg + tasks_from: env_cleanup.yml + run_once: yes + - fail: + msg: Environment preparation failed + run_once: yes +- hosts: all + gather_facts: no + strategy: free + serial: 6 + roles: + - ec2_asg diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/meta/main.yml new file mode 100644 index 000000000..1d40168d0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: +- setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/defaults/main.yml new file mode 100644 index 000000000..da86a186e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/defaults/main.yml @@ -0,0 +1,2 @@ +load_balancer_name: '{{ tiny_prefix }}-lb' +ec2_asg_setup_run_once: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml new file mode 100644 index 000000000..0e57eaa50 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml @@ -0,0 +1,593 @@ +# tasks file for test_ec2_asg + + # ============================================================ + +- name: Test create/update/delete AutoScalingGroups with ec2_asg + block: + + # ============================================================ + + - name: test without specifying required module options + ec2_asg: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + ignore_errors: true + register: result + - name: assert name is a required module option + assert: + that: + - "result.msg == 'missing required arguments: name'" + + + - name: ensure launch configs exist + ec2_lc: + name: '{{ item }}' + assign_public_ip: true + image_id: '{{ ec2_ami_id }}' + user_data: | + #cloud-config + package_upgrade: true + package_update: true + packages: + - httpd + runcmd: + - "service httpd start" + security_groups: '{{ sg.group_id }}' + instance_type: t3.micro + loop: + - '{{ resource_prefix }}-lc' + - '{{ resource_prefix }}-lc-2' + + # ============================================================ + + - name: launch asg and wait for instances to be deemed healthy (no ELB) + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_config_name: '{{ resource_prefix }}-lc' + desired_capacity: 1 + min_size: 1 + max_size: 1 + vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' + state: present + wait_for_instances: yes + register: output + - assert: + that: + - output.viable_instances == 1 + + - name: Enable metrics collection - check_mode + ec2_asg: + name: '{{ resource_prefix }}-asg' + metrics_collection: yes + register: output + check_mode: true + - assert: + that: + - output is changed + - output is not failed + - '"autoscaling:UpdateAutoScalingGroup" not in output.resource_actions' + + - name: Enable metrics collection + ec2_asg: + name: '{{ resource_prefix }}-asg' + metrics_collection: yes + register: output + - assert: + that: + - output is changed + + - name: Enable metrics collection (idempotency) + ec2_asg: + name: '{{ resource_prefix }}-asg' + metrics_collection: yes + register: output + - assert: + that: + - output is not changed + + - name: Disable metrics collection - check_mode + ec2_asg: + name: '{{ resource_prefix }}-asg' + metrics_collection: no + register: output + check_mode: true + - assert: + that: + - output is changed + - output is not failed + - '"autoscaling:UpdateAutoScalingGroup" not in output.resource_actions' + + + - name: Disable metrics collection + ec2_asg: + name: '{{ resource_prefix }}-asg' + metrics_collection: no + register: output + - assert: + that: + - output is changed + + - name: Disable metrics collection (idempotency) + ec2_asg: + name: '{{ resource_prefix }}-asg' + metrics_collection: no + register: output + - assert: + that: + - output is not changed + + - name: kill asg + ec2_asg: + name: '{{ resource_prefix }}-asg' + state: absent + wait_timeout: 800 + async: 400 + - name: launch asg and do not wait for instances to be deemed healthy (no ELB) + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_config_name: '{{ resource_prefix }}-lc' + desired_capacity: 1 + min_size: 1 + max_size: 1 + vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' + wait_for_instances: no + state: present + register: output + - assert: + that: + - output.viable_instances == 0 + + - name: kill asg + ec2_asg: + name: '{{ resource_prefix }}-asg' + state: absent + wait_timeout: 800 + register: output + retries: 3 + until: output is succeeded + delay: 10 + async: 400 + - name: create asg with asg metrics enabled + ec2_asg: + name: '{{ resource_prefix }}-asg' + metrics_collection: true + launch_config_name: '{{ resource_prefix }}-lc' + desired_capacity: 0 + min_size: 0 + max_size: 0 + vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' + state: present + register: output + - assert: + that: + - "'Group' in output.metrics_collection.0.Metric" + + - name: kill asg + ec2_asg: + name: '{{ resource_prefix }}-asg' + state: absent + wait_timeout: 800 + async: 400 + - name: launch load balancer + ec2_elb_lb: + name: '{{ load_balancer_name }}' + state: present + security_group_ids: + - '{{ sg.group_id }}' + subnets: '{{ testing_subnet.subnet.id }}' + connection_draining_timeout: 60 + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + health_check: + ping_protocol: tcp + ping_port: 80 + ping_path: / + response_timeout: 5 + interval: 10 + unhealthy_threshold: 4 + healthy_threshold: 2 + register: load_balancer + - name: launch asg and wait for instances to be deemed healthy (ELB) + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_config_name: '{{ resource_prefix }}-lc' + health_check_type: ELB + desired_capacity: 1 + min_size: 1 + max_size: 1 + health_check_period: 300 + vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' + load_balancers: '{{ load_balancer_name }}' + wait_for_instances: yes + wait_timeout: 900 + state: present + register: output + - assert: + that: + - output.viable_instances == 1 + + # ============================================================ + + # grow scaling group to 3 + - name: add 2 more instances wait for instances to be deemed healthy (ELB) + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_config_name: '{{ resource_prefix }}-lc' + health_check_type: ELB + desired_capacity: 3 + min_size: 3 + max_size: 5 + health_check_period: 600 + vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' + load_balancers: '{{ load_balancer_name }}' + wait_for_instances: yes + wait_timeout: 1200 + state: present + register: output + - assert: + that: + - output.viable_instances == 3 + + # ============================================================ + + # Test max_instance_lifetime option + - name: enable asg max_instance_lifetime + ec2_asg: + name: '{{ resource_prefix }}-asg' + max_instance_lifetime: 604801 + register: output + - name: ensure max_instance_lifetime is set + assert: + that: + - output.max_instance_lifetime == 604801 + + - name: run without max_instance_lifetime + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_config_name: '{{ resource_prefix }}-lc' + - name: ensure max_instance_lifetime not affected by defaults + assert: + that: + - output.max_instance_lifetime == 604801 + + - name: disable asg max_instance_lifetime + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_config_name: '{{ resource_prefix }}-lc' + max_instance_lifetime: 0 + register: output + - name: ensure max_instance_lifetime is not set + assert: + that: + - not output.max_instance_lifetime + + # ============================================================ + + # perform rolling replace with different launch configuration + - name: perform rolling update to new AMI + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_config_name: '{{ resource_prefix }}-lc-2' + health_check_type: ELB + desired_capacity: 3 + min_size: 1 + max_size: 5 + health_check_period: 900 + load_balancers: '{{ load_balancer_name }}' + vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' + wait_for_instances: yes + replace_all_instances: yes + wait_timeout: 1800 + state: present + register: output + - assert: + that: + - item.value.launch_config_name == '{{ resource_prefix }}-lc-2' + loop: '{{ output.instance_facts | dict2items }}' + - assert: + that: + - output.viable_instances == 3 + + # ============================================================ + + # perform rolling replace with the original launch configuration + - name: perform rolling update to new AMI while removing the load balancer + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_config_name: '{{ resource_prefix }}-lc' + health_check_type: EC2 + desired_capacity: 3 + min_size: 1 + max_size: 5 + health_check_period: 900 + load_balancers: [] + vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' + wait_for_instances: yes + replace_all_instances: yes + wait_timeout: 1800 + state: present + register: output + - assert: + that: + - item.value.launch_config_name == '{{ resource_prefix }}-lc' + loop: '{{ output.instance_facts | dict2items }}' + - assert: + that: + - output.viable_instances == 3 + + # ============================================================ + + # perform rolling replace with new launch configuration and lc_check:false + - name: 'perform rolling update to new AMI with lc_check: false' + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_config_name: '{{ resource_prefix }}-lc-2' + health_check_type: EC2 + desired_capacity: 3 + min_size: 1 + max_size: 5 + health_check_period: 900 + load_balancers: [] + vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' + wait_for_instances: yes + replace_all_instances: yes + replace_batch_size: 3 + lc_check: false + wait_timeout: 1800 + state: present + - name: get ec2_asg info + ec2_asg_info: + name: '{{ resource_prefix }}-asg' + register: output + - assert: + that: + - output.results[0].instances | length == 3 + + # ============================================================ + + - name: kill asg + ec2_asg: + name: '{{ resource_prefix }}-asg' + state: absent + wait_timeout: 800 + async: 400 + - name: 'new asg with lc_check: false' + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_config_name: '{{ resource_prefix }}-lc' + health_check_type: EC2 + desired_capacity: 3 + min_size: 1 + max_size: 5 + health_check_period: 900 + load_balancers: [] + vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' + wait_for_instances: yes + replace_all_instances: yes + replace_batch_size: 3 + lc_check: false + wait_timeout: 1800 + state: present + - name: get ec2_asg information + ec2_asg_info: + name: '{{ resource_prefix }}-asg' + register: output + - assert: + that: + - output.results[0].instances | length == 3 + + # we need a launch template, otherwise we cannot test the mixed instance policy + - name: create launch template for autoscaling group to test its mixed instances + policy + ec2_launch_template: + template_name: '{{ resource_prefix }}-lt' + image_id: '{{ ec2_ami_id }}' + instance_type: t3.micro + credit_specification: + cpu_credits: standard + network_interfaces: + - associate_public_ip_address: yes + delete_on_termination: yes + device_index: 0 + groups: + - '{{ sg.group_id }}' + + - name: update autoscaling group with mixed-instances policy with mixed instances + types - check_mode + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_template: + launch_template_name: '{{ resource_prefix }}-lt' + desired_capacity: 1 + min_size: 1 + max_size: 1 + vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' + state: present + mixed_instances_policy: + instance_types: + - t3.micro + - t2.nano + wait_for_instances: yes + register: output + check_mode: true + - assert: + that: + - output is changed + - output is not failed + - '"autoscaling:CreateOrUpdateTags" not in output.resource_actions' + + - name: update autoscaling group with mixed-instances policy with mixed instances + types + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_template: + launch_template_name: '{{ resource_prefix }}-lt' + desired_capacity: 1 + min_size: 1 + max_size: 1 + vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' + state: present + mixed_instances_policy: + instance_types: + - t3.micro + - t2.nano + wait_for_instances: yes + register: output + - assert: + that: + - output.mixed_instances_policy | length == 2 + - output.mixed_instances_policy[0] == 't3.micro' + - output.mixed_instances_policy[1] == 't2.nano' + + - name: update autoscaling group with mixed-instances policy with instances_distribution + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_template: + launch_template_name: '{{ resource_prefix }}-lt' + desired_capacity: 1 + min_size: 1 + max_size: 1 + vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' + state: present + mixed_instances_policy: + instance_types: + - t3.micro + - t2.nano + instances_distribution: + on_demand_percentage_above_base_capacity: 0 + spot_allocation_strategy: capacity-optimized + wait_for_instances: yes + register: output + - assert: + that: + - output.mixed_instances_policy_full['launch_template']['overrides'][0]['instance_type'] + == 't3.micro' + - output.mixed_instances_policy_full['launch_template']['overrides'][1]['instance_type'] + == 't2.nano' + - output.mixed_instances_policy_full['instances_distribution']['on_demand_percentage_above_base_capacity'] + == 0 + - output.mixed_instances_policy_full['instances_distribution']['spot_allocation_strategy'] + == 'capacity-optimized' + + # ============================================================ + + # Target group names have max length of 32 characters + - set_fact: + tg1_name: "ansible-test-{{tiny_prefix}}-asg-t1" + tg2_name: "ansible-test-{{tiny_prefix}}-asg-t2" + - name: create target group 1 + elb_target_group: + name: '{{ tg1_name }}' + protocol: tcp + port: 80 + health_check_protocol: tcp + health_check_port: 80 + healthy_threshold_count: 2 + unhealthy_threshold_count: 2 + vpc_id: '{{ testing_vpc.vpc.id }}' + state: present + register: out_tg1 + - name: create target group 2 + elb_target_group: + name: '{{ tg2_name }}' + protocol: tcp + port: 80 + health_check_protocol: tcp + health_check_port: 80 + healthy_threshold_count: 2 + unhealthy_threshold_count: 2 + vpc_id: '{{ testing_vpc.vpc.id }}' + state: present + register: out_tg2 + - name: update autoscaling group with tg1 + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_template: + launch_template_name: '{{ resource_prefix }}-lt' + target_group_arns: + - '{{ out_tg1.target_group_arn }}' + desired_capacity: 1 + min_size: 1 + max_size: 1 + state: present + wait_for_instances: yes + register: output + - assert: + that: + - output.target_group_arns[0] == out_tg1.target_group_arn + + - name: update autoscaling group add tg2 + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_template: + launch_template_name: '{{ resource_prefix }}-lt' + target_group_arns: + - '{{ out_tg1.target_group_arn }}' + - '{{ out_tg2.target_group_arn }}' + desired_capacity: 1 + min_size: 1 + max_size: 1 + state: present + wait_for_instances: yes + register: output + - assert: + that: + - output.target_group_arns | length == 2 + + - name: update autoscaling group remove tg1 + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_template: + launch_template_name: '{{ resource_prefix }}-lt' + target_group_arns: + - '{{ out_tg2.target_group_arn }}' + desired_capacity: 1 + min_size: 1 + max_size: 1 + state: present + wait_for_instances: yes + register: output + - assert: + that: + - output.target_group_arns | length == 1 + - output.target_group_arns[0] == out_tg2.target_group_arn + + - name: update autoscaling group remove tg2 and add tg1 + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_template: + launch_template_name: '{{ resource_prefix }}-lt' + target_group_arns: + - '{{ out_tg1.target_group_arn }}' + desired_capacity: 1 + min_size: 1 + max_size: 1 + state: present + wait_for_instances: yes + register: output + - assert: + that: + - output.target_group_arns | length == 1 + - output.target_group_arns[0] == out_tg1.target_group_arn + + - name: target group no change + ec2_asg: + name: '{{ resource_prefix }}-asg' + launch_template: + launch_template_name: '{{ resource_prefix }}-lt' + target_group_arns: + - '{{ out_tg1.target_group_arn }}' + desired_capacity: 1 + min_size: 1 + max_size: 1 + state: present + wait_for_instances: yes + register: output + - assert: + that: + - output.target_group_arns | length == 1 + - output.target_group_arns[0] == out_tg1.target_group_arn + - output.changed == false diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_cleanup.yml new file mode 100644 index 000000000..e2e6c02f6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_cleanup.yml @@ -0,0 +1,116 @@ +- name: kill asg + ec2_asg: + name: '{{ resource_prefix }}-asg' + state: absent + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 +- name: remove target group + elb_target_group: + name: '{{ item }}' + state: absent + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 + loop: + - '{{ tg1_name }}' + - '{{ tg2_name }}' + +- name: remove the load balancer + ec2_elb_lb: + name: '{{ load_balancer_name }}' + state: absent + security_group_ids: + - '{{ sg.group_id }}' + subnets: '{{ testing_subnet.subnet.id }}' + wait: true + connection_draining_timeout: 60 + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + health_check: + ping_protocol: tcp + ping_port: 80 + ping_path: / + response_timeout: 5 + interval: 10 + unhealthy_threshold: 4 + healthy_threshold: 2 + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 +- name: remove launch configs + ec2_lc: + name: '{{ item }}' + state: absent + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 + loop: + - '{{ resource_prefix }}-lc' + - '{{ resource_prefix }}-lc-2' + +- name: delete launch template + ec2_launch_template: + name: '{{ resource_prefix }}-lt' + state: absent + register: del_lt + retries: 10 + until: del_lt is not failed + ignore_errors: true +- name: remove the security group + ec2_group: + name: '{{ resource_prefix }}-sg' + description: a security group for ansible tests + vpc_id: '{{ testing_vpc.vpc.id }}' + state: absent + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 +- name: remove routing rules + ec2_vpc_route_table: + state: absent + vpc_id: '{{ testing_vpc.vpc.id }}' + tags: + created: '{{ resource_prefix }}-route' + routes: + - dest: 0.0.0.0/0 + gateway_id: '{{ igw.gateway_id }}' + subnets: + - '{{ testing_subnet.subnet.id }}' + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 +- name: remove internet gateway + ec2_vpc_igw: + vpc_id: '{{ testing_vpc.vpc.id }}' + state: absent + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 +- name: remove the subnet + ec2_vpc_subnet: + state: absent + vpc_id: '{{ testing_vpc.vpc.id }}' + cidr: 10.55.77.0/24 + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 +- name: remove the VPC + ec2_vpc_net: + name: '{{ resource_prefix }}-vpc' + cidr_block: 10.55.77.0/24 + state: absent + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_setup.yml new file mode 100644 index 000000000..2bff18c5f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_setup.yml @@ -0,0 +1,51 @@ +- name: Run ec2_asg integration tests. + run_once: '{{ ec2_asg_setup_run_once }}' + block: + + # Set up the testing dependencies: VPC, subnet, security group, and two launch configurations + - name: Create VPC for use in testing + ec2_vpc_net: + name: '{{ resource_prefix }}-vpc' + cidr_block: 10.55.77.0/24 + tenancy: default + register: testing_vpc + - name: Create internet gateway for use in testing + ec2_vpc_igw: + vpc_id: '{{ testing_vpc.vpc.id }}' + state: present + register: igw + - name: Create subnet for use in testing + ec2_vpc_subnet: + state: present + vpc_id: '{{ testing_vpc.vpc.id }}' + cidr: 10.55.77.0/24 + az: '{{ aws_region }}a' + resource_tags: + Name: '{{ resource_prefix }}-subnet' + register: testing_subnet + - name: create routing rules + ec2_vpc_route_table: + vpc_id: '{{ testing_vpc.vpc.id }}' + tags: + created: '{{ resource_prefix }}-route' + routes: + - dest: 0.0.0.0/0 + gateway_id: '{{ igw.gateway_id }}' + subnets: + - '{{ testing_subnet.subnet.id }}' + + - name: create a security group with the vpc created in the ec2_setup + ec2_group: + name: '{{ resource_prefix }}-sg' + description: a security group for ansible tests + vpc_id: '{{ testing_vpc.vpc.id }}' + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + register: sg diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/instance_detach.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/instance_detach.yml new file mode 100644 index 000000000..a938ce5b0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/instance_detach.yml @@ -0,0 +1,256 @@ +- name: Running instance detach tests + block: + #---------------------------------------------------------------------- + - name: create a launch configuration + ec2_lc: + name: '{{ resource_prefix }}-lc-detach-test' + image_id: '{{ ec2_ami_id }}' + region: '{{ aws_region }}' + instance_type: t2.micro + assign_public_ip: yes + register: create_lc + - name: ensure that lc is created + assert: + that: + - create_lc is changed + - create_lc.failed is false + - '"autoscaling:CreateLaunchConfiguration" in create_lc.resource_actions' + + #---------------------------------------------------------------------- + + - name: create a AutoScalingGroup to be used for instance_detach test - check_mode + ec2_asg: + name: '{{ resource_prefix }}-asg-detach-test' + launch_config_name: '{{ resource_prefix }}-lc-detach-test' + health_check_period: 60 + health_check_type: ELB + replace_all_instances: yes + min_size: 3 + max_size: 6 + desired_capacity: 3 + region: '{{ aws_region }}' + register: create_asg + check_mode: true + - assert: + that: + - create_asg is changed + - create_asg is not failed + - '"autoscaling:CreateAutoScalingGroup" not in create_asg.resource_actions' + + - name: create a AutoScalingGroup to be used for instance_detach test + ec2_asg: + name: '{{ resource_prefix }}-asg-detach-test' + launch_config_name: '{{ resource_prefix }}-lc-detach-test' + health_check_period: 60 + health_check_type: ELB + replace_all_instances: yes + min_size: 3 + max_size: 6 + desired_capacity: 3 + region: '{{ aws_region }}' + register: create_asg + - name: ensure that AutoScalingGroup is created + assert: + that: + - create_asg is changed + - create_asg.failed is false + - create_asg.instances | length == 3 + - create_asg.desired_capacity == 3 + - create_asg.in_service_instances == 3 + - '"autoscaling:CreateAutoScalingGroup" in create_asg.resource_actions' + + - name: gather info about asg, get instance ids + ec2_asg_info: + name: '{{ resource_prefix }}-asg-detach-test' + register: asg_info + - set_fact: + init_instance_1: '{{ asg_info.results[0].instances[0].instance_id }}' + init_instance_2: '{{ asg_info.results[0].instances[1].instance_id }}' + init_instance_3: '{{ asg_info.results[0].instances[2].instance_id }}' + - name: Gather information about recently detached instances + amazon.aws.ec2_instance_info: + instance_ids: + - '{{ init_instance_1 }}' + - '{{ init_instance_2 }}' + - '{{ init_instance_3 }}' + register: instances_info + - assert: + that: + - asg_info.results[0].instances | length == 3 + - "'{{ instances_info.instances[0].state.name }}' == 'running'" + - "'{{ instances_info.instances[1].state.name }}' == 'running'" + - "'{{ instances_info.instances[2].state.name }}' == 'running'" + + #---------------------------------------------------------------------- + + - name: detach 2 instance from the asg and replace with other instances - check_mode + ec2_asg: + name: '{{ resource_prefix }}-asg-detach-test' + launch_config_name: '{{ resource_prefix }}-lc-detach-test' + health_check_period: 60 + health_check_type: ELB + min_size: 3 + max_size: 3 + desired_capacity: 3 + region: '{{ aws_region }}' + detach_instances: + - '{{ init_instance_1 }}' + - '{{ init_instance_2 }}' + register: detach_result + check_mode: true + - assert: + that: + - detach_result is changed + - detach_result is not failed + - '"autoscaling:DetachInstances" not in detach_result.resource_actions' + + - name: detach 2 instance from the asg and replace with other instances + ec2_asg: + name: '{{ resource_prefix }}-asg-detach-test' + launch_config_name: '{{ resource_prefix }}-lc-detach-test' + health_check_period: 60 + health_check_type: ELB + min_size: 3 + max_size: 3 + desired_capacity: 3 + region: '{{ aws_region }}' + detach_instances: + - '{{ init_instance_1 }}' + - '{{ init_instance_2 }}' + + # pause to allow completion of instance replacement + - name: Pause for 30 seconds + wait_for: + timeout: 30 + - ec2_asg_info: + name: '{{ resource_prefix }}-asg-detach-test' + register: asg_info_replaced + - set_fact: + instance_replace_1: '{{ asg_info_replaced.results[0].instances[0].instance_id + }}' + instance_replace_2: '{{ asg_info_replaced.results[0].instances[1].instance_id + }}' + instance_replace_3: '{{ asg_info_replaced.results[0].instances[2].instance_id + }}' + - set_fact: + asg_instance_detach_replace: "{{ asg_info_replaced.results[0].instances | map(attribute='instance_id')\ + \ | list }}" + - name: Gather information about recently detached instances + amazon.aws.ec2_instance_info: + instance_ids: + - '{{ init_instance_1 }}' + - '{{ init_instance_2 }}' + register: detached_instances_info + - assert: + that: + - asg_info_replaced.results[0].desired_capacity == 3 + - asg_info_replaced.results[0].instances | length == 3 + - "'{{ init_instance_1 }}' not in {{ asg_instance_detach_replace }}" + - "'{{ init_instance_2 }}' not in {{ asg_instance_detach_replace }}" + - "'{{ detached_instances_info.instances[0].state.name }}' == 'running'" + - "'{{ detached_instances_info.instances[1].state.name }}' == 'running'" + + #---------------------------------------------------------------------- + + # detach 2 instances from the asg and reduce the desired capacity from 3 to 1 + - name: detach 2 instance from the asg and reduce the desired capacity from 3 to + 1 + ec2_asg: + name: '{{ resource_prefix }}-asg-detach-test' + launch_config_name: '{{ resource_prefix }}-lc-detach-test' + health_check_period: 60 + health_check_type: ELB + min_size: 1 + max_size: 5 + desired_capacity: 3 + region: '{{ aws_region }}' + decrement_desired_capacity: true + detach_instances: + - '{{ instance_replace_1 }}' + - '{{ instance_replace_2 }}' + + - name: Pause for 30 seconds to allow completion of above task + wait_for: + timeout: 30 + - ec2_asg_info: + name: '{{ resource_prefix }}-asg-detach-test' + register: asg_info_decrement + - set_fact: + instance_detach_decrement: '{{ asg_info_decrement.results[0].instances[0].instance_id + }}' + - set_fact: + asg_instance_detach_decrement: "{{ asg_info_decrement.results[0].instances |\ + \ map(attribute='instance_id') | list }}" + - name: Gather information about recently detached instances + amazon.aws.ec2_instance_info: + instance_ids: + - '{{ instance_replace_1 }}' + - '{{ instance_replace_2 }}' + register: detached_instances_info + - assert: + that: + - asg_info_decrement.results[0].instances | length == 1 + - asg_info_decrement.results[0].desired_capacity == 1 + - "'{{ instance_replace_1 }}' not in {{ asg_instance_detach_decrement }}" + - "'{{ instance_replace_2 }}' not in {{ asg_instance_detach_decrement }}" + - "'{{ detached_instances_info.instances[0].state.name }}' == 'running'" + - "'{{ detached_instances_info.instances[1].state.name }}' == 'running'" + - "'{{ instance_replace_3 }}' == '{{ instance_detach_decrement }}'" + + #---------------------------------------------------------------------- + + always: + + - name: terminate any instances created during this test + amazon.aws.ec2_instance: + instance_ids: + - '{{ item }}' + state: absent + loop: + - '{{ init_instance_1 }}' + - '{{ init_instance_2 }}' + - '{{ init_instance_3 }}' + - '{{ instance_replace_1 }}' + - '{{ instance_replace_2 }}' + - '{{ instance_replace_3 }}' + + - name: kill asg created in this test - check_mode + ec2_asg: + name: '{{ resource_prefix }}-asg-detach-test' + state: absent + register: removed + check_mode: true + - assert: + that: + - removed is changed + - removed is not failed + - '"autoscaling:DeleteAutoScalingGroup" not in removed.resource_actions' + + - name: kill asg created in this test + ec2_asg: + name: '{{ resource_prefix }}-asg-detach-test' + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + - name: kill asg created in this test - check_mode (idempotent) + ec2_asg: + name: '{{ resource_prefix }}-asg-detach-test' + state: absent + register: removed + check_mode: true + - assert: + that: + - removed is not changed + - removed is not failed + - '"autoscaling:DeleteAutoScalingGroup" not in removed.resource_actions' + + - name: remove launch config created in this test + ec2_lc: + name: '{{ resource_prefix }}-lc-detach-test' + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/main.yml new file mode 100644 index 000000000..70e23a642 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/main.yml @@ -0,0 +1,40 @@ +# Beware: most of our tests here are run in parallel. +# To add new tests you'll need to add a new host to the inventory and a matching +# '{{ inventory_hostname }}'.yml file in roles/ec2_asg/tasks/ + +- name: Wrap up all tests and setup AWS credentials + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + aws_config: + retries: + # Unfortunately AWSRetry doesn't support paginators and boto3's paginators + # don't support any configuration of the delay between retries. + max_attempts: 20 + collections: + - community.aws + block: + - debug: + msg: "{{ inventory_hostname }} start: {{ lookup('pipe','date') }}" + - include_tasks: '{{ inventory_hostname }}.yml' + - debug: + msg: "{{ inventory_hostname }} finish: {{ lookup('pipe','date') }}" + always: + - set_fact: + _role_complete: true + - vars: + completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete") + | list | select("defined") | list | length }}' + hosts_in_play: '{{ ansible_play_hosts_all | length }}' + debug: + msg: '{{ completed_hosts }} of {{ hosts_in_play }} complete' + - include_tasks: env_cleanup.yml + vars: + completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete") + | list | select("defined") | list | length }}' + hosts_in_play: '{{ ansible_play_hosts_all | length }}' + when: + - completed_hosts == hosts_in_play diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/tag_operations.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/tag_operations.yml new file mode 100644 index 000000000..4f62faa31 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/tag_operations.yml @@ -0,0 +1,339 @@ +- name: Running AutoScalingGroup Tag operations test + block: + #---------------------------------------------------------------------- + - name: create a launch configuration + ec2_lc: + name: '{{ resource_prefix }}-lc-tag-test' + image_id: '{{ ec2_ami_id }}' + region: '{{ aws_region }}' + instance_type: t2.micro + assign_public_ip: yes + register: create_lc + - name: ensure that lc is created + assert: + that: + - create_lc is changed + - create_lc.failed is false + - '"autoscaling:CreateLaunchConfiguration" in create_lc.resource_actions' + + #---------------------------------------------------------------------- + - name: create a AutoScalingGroup to be used for tag_operations test + ec2_asg: + name: '{{ resource_prefix }}-asg-tag-test' + launch_config_name: '{{ resource_prefix }}-lc-tag-test' + health_check_period: 60 + health_check_type: ELB + replace_all_instances: yes + min_size: 1 + max_size: 1 + desired_capacity: 1 + region: '{{ aws_region }}' + register: create_asg + - name: ensure that AutoScalingGroup is created + assert: + that: + - create_asg is changed + - create_asg.failed is false + - '"autoscaling:CreateAutoScalingGroup" in create_asg.resource_actions' + + #---------------------------------------------------------------------- + + - name: Get asg info + ec2_asg_info: + name: '{{ resource_prefix }}-asg-tag-test' + register: info_result + - assert: + that: + - info_result.results[0].tags | length == 0 + + - name: Tag asg - check_mode + ec2_asg: + name: '{{ resource_prefix }}-asg-tag-test' + tags: + - tag_a: value 1 + propagate_at_launch: no + - tag_b: value 2 + propagate_at_launch: yes + register: output + check_mode: true + - assert: + that: + - output is changed + - output is not failed + - '"autoscaling:CreateOrUpdateTags" not in output.resource_actions' + + - name: Tag asg + ec2_asg: + name: '{{ resource_prefix }}-asg-tag-test' + tags: + - tag_a: value 1 + propagate_at_launch: no + - tag_b: value 2 + propagate_at_launch: yes + register: output + - assert: + that: + - output.tags | length == 2 + - output is changed + + - name: Re-Tag asg (different order) + ec2_asg: + name: '{{ resource_prefix }}-asg-tag-test' + tags: + - tag_b: value 2 + propagate_at_launch: yes + - tag_a: value 1 + propagate_at_launch: no + register: output + - assert: + that: + - output.tags | length == 2 + - output is not changed + + - name: Re-Tag asg new tags + ec2_asg: + name: '{{ resource_prefix }}-asg-tag-test' + tags: + - tag_c: value 3 + propagate_at_launch: no + purge_tags: true + register: output + - assert: + that: + - output.tags | length == 1 + - output is changed + + - name: Re-Tag asg update propagate_at_launch + ec2_asg: + name: '{{ resource_prefix }}-asg-tag-test' + tags: + - tag_c: value 3 + propagate_at_launch: yes + register: output + - assert: + that: + - output.tags | length == 1 + - output is changed + + - name: Remove all tags + ec2_asg: + name: '{{ resource_prefix }}-asg-tag-test' + tags: [] + purge_tags: true + register: add_empty + - name: Get asg info + ec2_asg_info: + name: '{{ resource_prefix }}-asg-tag-test' + register: info_result + - set_fact: + tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" + - assert: + that: + - add_empty is changed + - info_result.results[0].tags | length == 0 + - '"autoscaling:CreateOrUpdateTags" not in add_empty.resource_actions' + - '"autoscaling:DeleteTags" in add_empty.resource_actions' + + - name: Add 4 new tags - do not purge existing tags + ec2_asg: + name: '{{ resource_prefix }}-asg-tag-test' + tags: + - lowercase spaced: hello cruel world + propagate_at_launch: no + - Title Case: Hello Cruel World + propagate_at_launch: yes + - CamelCase: SimpleCamelCase + propagate_at_launch: yes + - snake_case: simple_snake_case + propagate_at_launch: no + register: add_result + - name: Get asg info + ec2_asg_info: + name: '{{ resource_prefix }}-asg-tag-test' + register: info_result + - set_fact: + tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" + - assert: + that: + - add_result is changed + - info_result.results[0].tags | length == 4 + - '"lowercase spaced" in tag_keys' + - '"Title Case" in tag_keys' + - '"CamelCase" in tag_keys' + - '"snake_case" in tag_keys' + - '"autoscaling:CreateOrUpdateTags" in add_result.resource_actions' + + - name: Add 4 new tags - do not purge existing tags - idempotency + ec2_asg: + name: '{{ resource_prefix }}-asg-tag-test' + tags: + - lowercase spaced: hello cruel world + propagate_at_launch: no + - Title Case: Hello Cruel World + propagate_at_launch: yes + - CamelCase: SimpleCamelCase + propagate_at_launch: yes + - snake_case: simple_snake_case + propagate_at_launch: no + register: add_result + - name: Get asg info + ec2_asg_info: + name: '{{ resource_prefix }}-asg-tag-test' + register: info_result + - assert: + that: + - add_result is not changed + - info_result.results[0].tags | length == 4 + - '"autoscaling:CreateOrUpdateTags" not in add_result.resource_actions' + + - name: Add 2 new tags - purge existing tags + ec2_asg: + name: '{{ resource_prefix }}-asg-tag-test' + tags: + - tag_a: val_a + propagate_at_launch: no + - tag_b: val_b + propagate_at_launch: yes + purge_tags: true + register: add_purge_result + - name: Get asg info + ec2_asg_info: + name: '{{ resource_prefix }}-asg-tag-test' + register: info_result + - set_fact: + tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" + - assert: + that: + - add_purge_result is changed + - info_result.results[0].tags | length == 2 + - '"tag_a" in tag_keys' + - '"tag_b" in tag_keys' + - '"lowercase spaced" not in tag_keys' + - '"Title Case" not in tag_keys' + - '"CamelCase" not in tag_keys' + - '"snake_case" not in tag_keys' + - '"autoscaling:CreateOrUpdateTags" in add_purge_result.resource_actions' + + - name: Re-tag ASG - modify values + ec2_asg: + name: '{{ resource_prefix }}-asg-tag-test' + tags: + - tag_a: new_val_a + propagate_at_launch: no + - tag_b: new_val_b + propagate_at_launch: yes + register: add_purge_result + - name: Get asg info + ec2_asg_info: + name: '{{ resource_prefix }}-asg-tag-test' + register: info_result + - set_fact: + tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" + - set_fact: + tag_values: "{{ info_result.results[0].tags | map(attribute='value') | list\ + \ }}" + - assert: + that: + - add_purge_result is changed + - info_result.results[0].tags | length == 2 + - '"tag_a" in tag_keys' + - '"tag_b" in tag_keys' + - '"new_val_a" in tag_values' + - '"new_val_b" in tag_values' + - '"lowercase spaced" not in tag_keys' + - '"Title Case" not in tag_keys' + - '"CamelCase" not in tag_keys' + - '"snake_case" not in tag_keys' + - '"autoscaling:CreateOrUpdateTags" in add_purge_result.resource_actions' + + - name: Add 2 more tags - do not purge existing tags + ec2_asg: + name: '{{ resource_prefix }}-asg-tag-test' + tags: + - lowercase spaced: hello cruel world + propagate_at_launch: no + - Title Case: Hello Cruel World + propagate_at_launch: yes + register: add_result + - name: Get asg info + ec2_asg_info: + name: '{{ resource_prefix }}-asg-tag-test' + register: info_result + - set_fact: + tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" + - assert: + that: + - add_result is changed + - info_result.results[0].tags | length == 4 + - '"tag_a" in tag_keys' + - '"tag_b" in tag_keys' + - '"lowercase spaced" in tag_keys' + - '"Title Case" in tag_keys' + - '"autoscaling:CreateOrUpdateTags" in add_result.resource_actions' + + - name: Add empty tags with purge set to false to assert that existing tags are + retained + ec2_asg: + name: '{{ resource_prefix }}-asg-tag-test' + tags: [] + purge_tags: false + register: add_empty + - name: Get asg info + ec2_asg_info: + name: '{{ resource_prefix }}-asg-tag-test' + register: info_result + - set_fact: + tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" + - assert: + that: + - add_empty is not changed + - info_result.results[0].tags | length == 4 + - '"tag_a" in tag_keys' + - '"tag_b" in tag_keys' + - '"lowercase spaced" in tag_keys' + - '"Title Case" in tag_keys' + - '"autoscaling:CreateOrUpdateTags" not in add_empty.resource_actions' + + - name: Add empty tags with purge set to true to assert that existing tags are removed + ec2_asg: + name: '{{ resource_prefix }}-asg-tag-test' + tags: [] + purge_tags: true + register: add_empty + - name: Get asg info + ec2_asg_info: + name: '{{ resource_prefix }}-asg-tag-test' + register: info_result + - set_fact: + tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" + - assert: + that: + - add_empty is changed + - info_result.results[0].tags | length == 0 + - '"tag_a" not in tag_keys' + - '"tag_b" not in tag_keys' + - '"lowercase spaced" not in tag_keys' + - '"Title Case" not in tag_keys' + - '"autoscaling:CreateOrUpdateTags" not in add_empty.resource_actions' + - '"autoscaling:DeleteTags" in add_empty.resource_actions' + + #---------------------------------------------------------------------- + + always: + + - name: kill asg created in this test + ec2_asg: + name: '{{ resource_prefix }}-asg-tag-test' + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + - name: remove launch config created in this test + ec2_lc: + name: '{{ resource_prefix }}-lc-tag-test' + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/runme.sh new file mode 100755 index 000000000..aa324772b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/runme.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# +# Beware: most of our tests here are run in parallel. +# To add new tests you'll need to add a new host to the inventory and a matching +# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/ + + +set -eux + +export ANSIBLE_ROLES_PATH=../ + +ansible-playbook main.yml -i inventory "$@" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/aliases b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/main.yml new file mode 100644 index 000000000..2fe745f07 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/main.yml @@ -0,0 +1,5 @@ +- hosts: localhost + connection: local + environment: "{{ ansible_test.environment }}" + tasks: + - include_tasks: 'tasks/main.yml' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/main.yml new file mode 100644 index 000000000..0787ea121 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/main.yml @@ -0,0 +1,193 @@ +--- +- module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key | default(omit) }}' + aws_secret_key: '{{ aws_secret_key | default(omit) }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region | default(omit) }}' + + block: + - name: 'List available AZs in current Region' + aws_az_info: + register: region_azs + + - name: check task return attributes + vars: + first_az: '{{ region_azs.availability_zones[0] }}' + assert: + that: + - region_azs is successful + - '"availability_zones" in region_azs' + - '"group_name" in first_az' + - '"messages" in first_az' + - '"network_border_group" in first_az' + - '"opt_in_status" in first_az' + - '"region_name" in first_az' + - '"state" in first_az' + - '"zone_id" in first_az' + - '"zone_name" in first_az' + # botocore >= 1.17.18 + #- '"zone_type" in first_az' + + - name: 'List available AZs in current Region - check_mode' + aws_az_info: + check_mode: yes + register: check_azs + + - name: check task return attributes + vars: + first_az: '{{ check_azs.availability_zones[0] }}' + assert: + that: + - check_azs is successful + - '"availability_zones" in check_azs' + - '"group_name" in first_az' + - '"messages" in first_az' + - '"network_border_group" in first_az' + - '"opt_in_status" in first_az' + - '"region_name" in first_az' + - '"state" in first_az' + - '"zone_id" in first_az' + - '"zone_name" in first_az' + # botocore >= 1.17.18 + #- '"zone_type" in first_az' + + + # Be specific - aws_region isn't guaranteed to be any specific value + - name: 'List Available AZs in us-east-1' + aws_az_info: + region: 'us-east-1' + register: us_east_1 + + - name: 'Check that an AZ from us-east-1 has valid looking attributes' + vars: + first_az: '{{ us_east_1.availability_zones[0] }}' + assert: + that: + - us_east_1 is successful + - '"availability_zones" in us_east_1' + - '"group_name" in first_az' + - '"messages" in first_az' + - '"network_border_group" in first_az' + - '"opt_in_status" in first_az' + - '"region_name" in first_az' + - '"state" in first_az' + - '"zone_id" in first_az' + - '"zone_name" in first_az' + # botocore >= 1.17.18 + #- '"zone_type" in first_az' + - first_az.group_name.startswith('us-east-1') + - first_az.network_border_group.startswith('us-east-1') + - first_az.region_name == 'us-east-1' + - first_az.zone_id.startswith('use1-az') + - not first_az.zone_id == "use1-az" + - first_az.zone_name.startswith('us-east-1') + - not first_az.zone_name == 'us-east-1' + # botocore >= 1.17.18 + #- first_az.zone_type == 'availability-zone' + + - name: 'Filter Available AZs in us-west-2 using - ("zone-name")' + aws_az_info: + region: 'us-west-2' + filters: + zone-name: 'us-west-2c' + register: us_west_2 + + - name: 'Check that an AZ from us-west-2 has attributes we expect' + vars: + first_az: '{{ us_west_2.availability_zones[0] }}' + assert: + that: + - us_west_2 is successful + - '"availability_zones" in us_west_2' + - us_west_2.availability_zones | length == 1 + - '"group_name" in first_az' + - '"messages" in first_az' + - '"network_border_group" in first_az' + - '"opt_in_status" in first_az' + - '"region_name" in first_az' + - '"state" in first_az' + - '"zone_id" in first_az' + - '"zone_name" in first_az' + # botocore >= 1.17.18 + #- '"zone_type" in first_az' + - first_az.group_name == 'us-west-2' + - first_az.network_border_group == 'us-west-2' + - first_az.region_name == 'us-west-2' + # AZs are mapped to the 'real' AZs on a per-account basis + - first_az.zone_id.startswith('usw2-az') + - not first_az.zone_id == 'usw2-az' + - first_az.zone_name == 'us-west-2c' + # botocore >= 1.17.18 + #- first_az.zone_type == 'availability-zone' + + - name: 'Filter Available AZs in eu-central-1 using _ ("zone_name")' + aws_az_info: + region: 'eu-central-1' + filters: + zone_name: 'eu-central-1b' + register: eu_central_1 + + - name: 'Check that eu-central-1b has the attributes we expect' + vars: + first_az: '{{ eu_central_1.availability_zones[0] }}' + assert: + that: + - eu_central_1 is successful + - '"availability_zones" in eu_central_1' + - eu_central_1.availability_zones | length == 1 + - '"group_name" in first_az' + - '"messages" in first_az' + - '"network_border_group" in first_az' + - '"opt_in_status" in first_az' + - '"region_name" in first_az' + - '"state" in first_az' + - '"zone_id" in first_az' + - '"zone_name" in first_az' + # botocore >= 1.17.18 + #- '"zone_type" in first_az' + - first_az.group_name == 'eu-central-1' + - first_az.network_border_group == 'eu-central-1' + - first_az.region_name == 'eu-central-1' + # AZs are mapped to the 'real' AZs on a per-account basis + - first_az.zone_id.startswith('euc1-az') + - not first_az.zone_id == "euc1-az" + - first_az.zone_name == 'eu-central-1b' + # botocore >= 1.17.18 + #- first_az.zone_type == 'availability-zone' + + - name: 'Filter Available AZs in eu-west-2 using _ and - ("zone_name" and "zone-name") : _ wins ' + aws_az_info: + region: 'eu-west-2' + filters: + zone-name: 'eu-west-2a' + zone_name: 'eu-west-2c' + register: eu_west_2 + + - name: 'Check that we get the AZ specified by zone_name rather than zone-name' + vars: + first_az: '{{ eu_west_2.availability_zones[0] }}' + assert: + that: + - eu_west_2 is successful + - '"availability_zones" in eu_west_2' + - eu_west_2.availability_zones | length == 1 + - '"group_name" in first_az' + - '"messages" in first_az' + - '"network_border_group" in first_az' + - '"opt_in_status" in first_az' + - '"region_name" in first_az' + - '"state" in first_az' + - '"zone_id" in first_az' + - '"zone_name" in first_az' + # botocore >= 1.17.18 + #- '"zone_type" in first_az' + - first_az.group_name == 'eu-west-2' + - first_az.network_border_group == 'eu-west-2' + - first_az.region_name == 'eu-west-2' + # AZs are mapped to the 'real' AZs on a per-account basis + - first_az.zone_id.startswith('euw2-az') + - not first_az.zone_id == "euw2-az" + - first_az.zone_name == 'eu-west-2c' + # botocore >= 1.17.18 + #- first_az.zone_type == 'availability-zone' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/aliases b/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/tasks/main.yaml new file mode 100644 index 000000000..c40d0f11b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/tasks/main.yaml @@ -0,0 +1,18 @@ +- module_defaults: + group/aws: + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + block: + - name: retrieve caller facts + aws_caller_info: + register: result + + - name: assert correct keys are returned + assert: + that: + - result.account is not none + - result.arn is not none + - result.user_id is not none + - result.account_alias is not none diff --git a/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/aliases b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/inventory b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/inventory new file mode 100644 index 000000000..5093e8582 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/inventory @@ -0,0 +1,6 @@ +[tests] +localhost + +[all:vars] +ansible_connection=local +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/main.yml new file mode 100644 index 000000000..b3c3fa155 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/main.yml @@ -0,0 +1,35 @@ +- hosts: localhost + gather_facts: no + collections: + - amazon.aws + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + tasks: + - ec2_instance_info: + register: ec2_info + + - assert: + that: + - '"resource_actions" in ec2_info' + - '"ec2:DescribeInstances" in ec2_info.resource_actions' + + - aws_az_info: + register: az_info + + - assert: + that: + - '"resource_actions" in az_info' + - '"ec2:DescribeAvailabilityZones" in az_info.resource_actions' + + - aws_caller_info: + register: caller_info + + - assert: + that: + - '"resource_actions" in caller_info' + - '"sts:GetCallerIdentity" in caller_info.resource_actions' + - '"iam:ListAccountAliases" in caller_info.resource_actions' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/runme.sh new file mode 100755 index 000000000..a2c41e1f5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/runme.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -eux + +export ANSIBLE_CALLBACK_WHITELIST="aws_resource_actions" + +OUTFILE="callback_aws_resource_actions.out" +trap 'rm -rvf "${OUTFILE}" "${OUTFILE}.actions"' EXIT + +# Tests that the resource_actions are added to each task +ansible-playbook main.yml -i localhost "$@" | tee "${OUTFILE}" + +# There should be a summary at the end of the run with the actions performed: +# AWS ACTIONS: ['ec2:DescribeAvailabilityZones', 'ec2:DescribeInstances', 'iam:ListAccountAliases', 'sts:GetCallerIdentity'] +grep -E "AWS ACTIONS: \[" "${OUTFILE}" > "${OUTFILE}.actions" +for action in 'ec2:DescribeAvailabilityZones' 'ec2:DescribeInstances' 'iam:ListAccountAliases' 'sts:GetCallerIdentity' +do + grep "${action}" "${OUTFILE}.actions" +done diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/aliases b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/aliases new file mode 100644 index 000000000..d393681d6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/aliases @@ -0,0 +1,2 @@ +cloud/aws +cloudformation_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/defaults/main.yml new file mode 100644 index 000000000..2f2a70c55 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/defaults/main.yml @@ -0,0 +1,8 @@ +stack_name: "{{ resource_prefix }}" + +availability_zone: '{{ ec2_availability_zone_names[0] }}' + +vpc_name: '{{ resource_prefix }}-vpc' +vpc_seed: '{{ resource_prefix }}' +vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16' +subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/cf_template.json b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/cf_template.json new file mode 100644 index 000000000..ff4c5693b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/cf_template.json @@ -0,0 +1,37 @@ +{ + "AWSTemplateFormatVersion" : "2010-09-09", + + "Description" : "Create an Amazon EC2 instance.", + + "Parameters" : { + "InstanceType" : { + "Description" : "EC2 instance type", + "Type" : "String", + "Default" : "t3.nano", + "AllowedValues" : [ "t3.micro", "t3.nano"] + }, + "ImageId" : { + "Type" : "String" + }, + "SubnetId" : { + "Type" : "String" + } + }, + + "Resources" : { + "EC2Instance" : { + "Type" : "AWS::EC2::Instance", + "Properties" : { + "InstanceType" : { "Ref" : "InstanceType" }, + "ImageId" : { "Ref" : "ImageId" }, + "SubnetId": { "Ref" : "SubnetId" } + } + } + }, + + "Outputs" : { + "InstanceId" : { + "Value" : { "Ref" : "EC2Instance" } + } + } +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/update_policy.json b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/update_policy.json new file mode 100644 index 000000000..6a3513825 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/update_policy.json @@ -0,0 +1,10 @@ +{ + "Statement" : [ + { + "Effect" : "Allow", + "Action" : "Update:*", + "Principal": "*", + "Resource" : "*" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/meta/main.yml new file mode 100644 index 000000000..2bff8543a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: +- role: setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/main.yml new file mode 100644 index 000000000..b9f174137 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/main.yml @@ -0,0 +1,491 @@ +--- +- module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key | default(omit) }}' + aws_secret_key: '{{ aws_secret_key | default(omit) }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region | default(omit) }}' + + block: + + # ==== Env setup ========================================================== + + - name: Create a test VPC + ec2_vpc_net: + name: "{{ vpc_name }}" + cidr_block: "{{ vpc_cidr }}" + tags: + Name: Cloudformation testing + register: testing_vpc + + - name: Create a test subnet + ec2_vpc_subnet: + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" + az: "{{ availability_zone }}" + register: testing_subnet + + # ==== Cloudformation tests =============================================== + + # 1. Basic stack creation (check mode, actual run and idempotency) + # 2. Tags + # 3. cloudformation_info tests (basic + all_facts) + # 4. termination_protection + # 5. create_changeset + changeset_name + + # There is still scope to add tests for - + # 1. capabilities + # 2. stack_policy + # 3. on_create_failure (covered in unit tests) + # 4. Passing in a role + # 5. nested stacks? + + + - name: create a cloudformation stack (check mode) + cloudformation: + stack_name: "{{ stack_name }}" + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.nano" + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: cf_stack + check_mode: yes + + - name: check task return attributes + assert: + that: + - cf_stack.changed + - "'msg' in cf_stack and 'New stack would be created' in cf_stack.msg" + + - name: create a cloudformation stack + cloudformation: + stack_name: "{{ stack_name }}" + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.nano" + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: cf_stack + + - name: check task return attributes + assert: + that: + - cf_stack.changed + - "'events' in cf_stack" + - "'output' in cf_stack and 'Stack CREATE complete' in cf_stack.output" + - "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs" + - "'stack_resources' in cf_stack" + + - name: create a cloudformation stack (check mode) (idempotent) + cloudformation: + stack_name: "{{ stack_name }}" + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.nano" + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: cf_stack + check_mode: yes + + - name: check task return attributes + assert: + that: + - not cf_stack.changed + + - name: create a cloudformation stack (idempotent) + cloudformation: + stack_name: "{{ stack_name }}" + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.nano" + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: cf_stack + + - name: check task return attributes + assert: + that: + - not cf_stack.changed + - "'output' in cf_stack and 'Stack is already up-to-date.' in cf_stack.output" + - "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs" + - "'stack_resources' in cf_stack" + + - name: get all stacks details + cloudformation_info: + register: all_stacks_info + + - name: assert all stacks info + assert: + that: + - all_stacks_info | length > 0 + + - name: get stack details + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + + - name: assert stack info + assert: + that: + - "'cloudformation' in stack_info" + - "stack_info.cloudformation | length == 1" + - "stack_name in stack_info.cloudformation" + - "'stack_description' in stack_info.cloudformation[stack_name]" + - "'stack_outputs' in stack_info.cloudformation[stack_name]" + - "'stack_parameters' in stack_info.cloudformation[stack_name]" + - "'stack_tags' in stack_info.cloudformation[stack_name]" + - "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name" + + - name: get stack details (checkmode) + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + check_mode: yes + + - name: assert stack info + assert: + that: + - "'cloudformation' in stack_info" + - "stack_info.cloudformation | length == 1" + - "stack_name in stack_info.cloudformation" + - "'stack_description' in stack_info.cloudformation[stack_name]" + - "'stack_outputs' in stack_info.cloudformation[stack_name]" + - "'stack_parameters' in stack_info.cloudformation[stack_name]" + - "'stack_tags' in stack_info.cloudformation[stack_name]" + - "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name" + + - name: get stack details (all_facts) + cloudformation_info: + stack_name: "{{ stack_name }}" + all_facts: yes + register: stack_info + + - name: assert stack info + assert: + that: + - "'stack_events' in stack_info.cloudformation[stack_name]" + - "'stack_policy' in stack_info.cloudformation[stack_name]" + - "'stack_resource_list' in stack_info.cloudformation[stack_name]" + - "'stack_resources' in stack_info.cloudformation[stack_name]" + - "'stack_template' in stack_info.cloudformation[stack_name]" + + - name: get stack details (all_facts) (checkmode) + cloudformation_info: + stack_name: "{{ stack_name }}" + all_facts: yes + register: stack_info + check_mode: yes + + - name: assert stack info + assert: + that: + - "'stack_events' in stack_info.cloudformation[stack_name]" + - "'stack_policy' in stack_info.cloudformation[stack_name]" + - "'stack_resource_list' in stack_info.cloudformation[stack_name]" + - "'stack_resources' in stack_info.cloudformation[stack_name]" + - "'stack_template' in stack_info.cloudformation[stack_name]" + + # ==== Cloudformation tests (create changeset) ============================ + + # try to create a changeset by changing instance type + - name: create a changeset + cloudformation: + stack_name: "{{ stack_name }}" + create_changeset: yes + changeset_name: "test-changeset" + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.micro" + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: create_changeset_result + + - name: assert changeset created + assert: + that: + - "create_changeset_result.changed" + - "'change_set_id' in create_changeset_result" + - "'Stack CREATE_CHANGESET complete' in create_changeset_result.output" + + - name: get stack details with changesets + cloudformation_info: + stack_name: "{{ stack_name }}" + stack_change_sets: True + register: stack_info + + - name: assert changesets in info + assert: + that: + - "'stack_change_sets' in stack_info.cloudformation[stack_name]" + + - name: get stack details with changesets (checkmode) + cloudformation_info: + stack_name: "{{ stack_name }}" + stack_change_sets: True + register: stack_info + check_mode: yes + + - name: assert changesets in info + assert: + that: + - "'stack_change_sets' in stack_info.cloudformation[stack_name]" + + # try to create an empty changeset by passing in unchanged template + - name: create a changeset + cloudformation: + stack_name: "{{ stack_name }}" + create_changeset: yes + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.nano" + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: create_changeset_result + + - name: assert changeset created + assert: + that: + - "not create_changeset_result.changed" + - "'The created Change Set did not contain any changes to this stack and was deleted.' in create_changeset_result.output" + + # ==== Cloudformation tests (termination_protection) ====================== + + - name: set termination protection to true + cloudformation: + stack_name: "{{ stack_name }}" + termination_protection: yes + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.nano" + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: cf_stack + +# This fails - #65592 +# - name: check task return attributes +# assert: +# that: +# - cf_stack.changed + + - name: get stack details + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + + - name: assert stack info + assert: + that: + - "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection" + + - name: get stack details (checkmode) + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + check_mode: yes + + - name: assert stack info + assert: + that: + - "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection" + + - name: set termination protection to false + cloudformation: + stack_name: "{{ stack_name }}" + termination_protection: no + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.nano" + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: cf_stack + +# This fails - #65592 +# - name: check task return attributes +# assert: +# that: +# - cf_stack.changed + + - name: get stack details + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + + - name: assert stack info + assert: + that: + - "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection" + + - name: get stack details (checkmode) + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + check_mode: yes + + - name: assert stack info + assert: + that: + - "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection" + + # ==== Cloudformation tests (update_policy) ====================== + + - name: setting an stack policy with json body + cloudformation: + stack_name: "{{ stack_name }}" + stack_policy_body: "{{ lookup('file','update_policy.json') }}" + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.nano" + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: cf_stack + + - name: get stack details + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + + - name: setting an stack policy on update + cloudformation: + stack_name: "{{ stack_name }}" + stack_policy_on_update_body: "{{ lookup('file','update_policy.json') }}" + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: "t3.nano" + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + tags: + Stack: "{{ stack_name }}" + test: "{{ resource_prefix }}" + register: cf_stack + + - name: get stack details + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + + # ==== Cloudformation tests (delete stack tests) ========================== + + - name: delete cloudformation stack (check mode) + cloudformation: + stack_name: "{{ stack_name }}" + state: absent + check_mode: yes + register: cf_stack + + - name: check task return attributes + assert: + that: + - cf_stack.changed + - "'msg' in cf_stack and 'Stack would be deleted' in cf_stack.msg" + + - name: delete cloudformation stack + cloudformation: + stack_name: "{{ stack_name }}" + state: absent + register: cf_stack + + - name: check task return attributes + assert: + that: + - cf_stack.changed + - "'output' in cf_stack and 'Stack Deleted' in cf_stack.output" + + - name: delete cloudformation stack (check mode) (idempotent) + cloudformation: + stack_name: "{{ stack_name }}" + state: absent + check_mode: yes + register: cf_stack + + - name: check task return attributes + assert: + that: + - not cf_stack.changed + - "'msg' in cf_stack" + - >- + "Stack doesn't exist" in cf_stack.msg + + - name: delete cloudformation stack (idempotent) + cloudformation: + stack_name: "{{ stack_name }}" + state: absent + register: cf_stack + + - name: check task return attributes + assert: + that: + - not cf_stack.changed + - "'output' in cf_stack and 'Stack not found.' in cf_stack.output" + + - name: get stack details + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + + - name: assert stack info + assert: + that: + - "not stack_info.cloudformation" + + - name: get stack details (checkmode) + cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + check_mode: yes + + - name: assert stack info + assert: + that: + - "not stack_info.cloudformation" + + # ==== Cleanup ============================================================ + + always: + + - name: delete stack + cloudformation: + stack_name: "{{ stack_name }}" + state: absent + ignore_errors: yes + + - name: Delete test subnet + ec2_vpc_subnet: + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" + state: absent + ignore_errors: yes + + - name: Delete test VPC + ec2_vpc_net: + name: "{{ vpc_name }}" + cidr_block: "{{ vpc_cidr }}" + state: absent + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/aliases b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/aliases new file mode 100644 index 000000000..3cbc2a485 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/aliases @@ -0,0 +1,2 @@ +# reason: missing-policy +unsupported diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/defaults/main.yml new file mode 100644 index 000000000..2174b31ae --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/defaults/main.yml @@ -0,0 +1,8 @@ +cloudtrail_name: '{{ resource_prefix }}-cloudtrail' +s3_bucket_name: '{{ resource_prefix }}-cloudtrail-bucket' +kms_alias: '{{ resource_prefix }}-cloudtrail' +sns_topic: '{{ resource_prefix }}-cloudtrail-notifications' +cloudtrail_prefix: 'ansible-test-prefix' +cloudwatch_log_group: '{{ resource_prefix }}-cloudtrail' +cloudwatch_role: '{{ resource_prefix }}-cloudtrail' +cloudwatch_no_kms_role: '{{ resource_prefix }}-cloudtrail2' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/main.yml new file mode 100644 index 000000000..b20eb2ad4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/main.yml @@ -0,0 +1,6 @@ +--- +- hosts: localhost + gather_facts: no + #serial: 10 + roles: + - cloudtrail diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/runme.sh new file mode 100755 index 000000000..14d1958ff --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/runme.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# +# Beware: most of our tests here are run in parallel. +# To add new tests you'll need to add a new host to the inventory and a matching +# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/ + + +set -eux + +export ANSIBLE_ROLES_PATH=../ +tiny_prefix="$(uuidgen -r|cut -d- -f1)" + +# shellcheck disable=SC2016,SC2086 +echo ' +{ +"ansible_test": { + "environment": { + "ANSIBLE_DEBUG_BOTOCORE_LOGS": "True" + }, + "module_defaults": null +}, +"resource_prefix": "'${tiny_prefix}'", +"tiny_prefix": "'${tiny_prefix}'", +"aws_region": "us-east-2" +}' > _config-file.json + +ansible-playbook main.yml -i inventory "$@" -e @_config-file.json diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml new file mode 100644 index 000000000..e35136d5d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml @@ -0,0 +1,1595 @@ +--- +# General Tests: +# - s3_bucket_name required when state is 'present' +# - Creation / Deletion +# - Enable/Disable logging +# - Enable/Disable log file validation option +# - Manipulation of Global Event logging option +# - Manipulation of Multi-Region logging option +# - Manipulation of S3 bucket option +# - Manipulation of Encryption option +# - Manipulation of SNS options +# - Manipulation of CloudWatch Log group options +# - Manipulation of Tags +# +# Notes: +# - results include the updates, even when check_mode is true +# - Poor handling of disable global + enable multi-region +# botocore.errorfactory.InvalidParameterCombinationException: An error +# occurred (InvalidParameterCombinationException) when calling the +# UpdateTrail operation: Multi-Region trail must include global service +# events. +# - Using blank string for KMS ID doesn't remove encryption +# - Using blank string for SNS Topic doesn't remove it +# - Using blank string for CloudWatch Log Group / Role doesn't remove them +# +# Possible Bugs: + +- module_defaults: + # Add this as a default because we (almost) always need it + amazon.aws.cloudtrail: + s3_bucket_name: '{{ s3_bucket_name }}' + region: '{{ aws_region }}' + collections: + - amazon.aws + block: + + # ============================================================ + # Argument Tests + # ============================================================ + - name: 'S3 Bucket required when state is "present"' + module_defaults: { cloudtrail: {} } + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + register: output + ignore_errors: yes + - assert: + that: + - output is failed + + - name: 'CloudWatch cloudwatch_logs_log_group_arn required when cloudwatch_logs_role_arn passed' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + cloudwatch_logs_role_arn: 'SomeValue' + register: output + ignore_errors: yes + - assert: + that: + - output is failed + - '"parameters are required together" in output.msg' + - '"cloudwatch_logs_log_group_arn" in output.msg' + + - name: 'CloudWatch cloudwatch_logs_role_arn required when cloudwatch_logs_log_group_arn passed' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + cloudwatch_logs_log_group_arn: 'SomeValue' + register: output + ignore_errors: yes + - assert: + that: + - output is failed + - '"parameters are required together" in output.msg' + - '"cloudwatch_logs_role_arn" in output.msg' + + #- name: 'Global Logging must be enabled when enabling Multi-region' + # cloudtrail: + # state: present + # name: '{{ cloudtrail_name }}' + # include_global_events: no + # is_multi_region_trail: yes + # register: output + # ignore_errors: yes + #- assert: + # that: + # - output is failed + + # ============================================================ + # Preparation + # ============================================================ + - name: 'Retrieve caller facts' + aws_caller_info: {} + register: aws_caller_info + + - name: 'Create S3 bucket' + vars: + bucket_name: '{{ s3_bucket_name }}' + s3_bucket: + state: present + name: '{{ bucket_name }}' + policy: '{{ lookup("template", "s3-policy.j2") }}' + - name: 'Create second S3 bucket' + vars: + bucket_name: '{{ s3_bucket_name }}-2' + s3_bucket: + state: present + name: '{{ bucket_name }}' + policy: '{{ lookup("template", "s3-policy.j2") }}' + + - name: 'Create SNS Topic' + vars: + sns_topic_name: '{{ sns_topic }}' + sns_topic: + state: present + name: '{{ sns_topic_name }}' + display_name: 'Used for testing SNS/CloudWatch integration' + policy: "{{ lookup('template', 'sns-policy.j2') | to_json }}" + register: output_sns_topic + - name: 'Create second SNS Topic' + vars: + sns_topic_name: '{{ sns_topic }}-2' + sns_topic: + state: present + name: '{{ sns_topic_name }}' + display_name: 'Used for testing SNS/CloudWatch integration' + policy: "{{ lookup('template', 'sns-policy.j2') | to_json }}" + + - name: 'Create KMS Key' + aws_kms: + state: present + alias: '{{ kms_alias }}' + enabled: yes + policy: "{{ lookup('template', 'kms-policy.j2') | to_json }}" + register: kms_key + - name: 'Create second KMS Key' + aws_kms: + state: present + alias: '{{ kms_alias }}-2' + enabled: yes + policy: "{{ lookup('template', 'kms-policy.j2') | to_json }}" + register: kms_key2 + + - name: 'Create CloudWatch IAM Role' + iam_role: + state: present + name: '{{ cloudwatch_role }}' + assume_role_policy_document: "{{ lookup('template', 'cloudwatch-assume-policy.j2') }}" + register: output_cloudwatch_role + - name: 'Create CloudWatch Log Group' + cloudwatchlogs_log_group: + state: present + log_group_name: '{{ cloudwatch_log_group }}' + retention: 1 + register: output_cloudwatch_log_group + - name: 'Create second CloudWatch Log Group' + cloudwatchlogs_log_group: + state: present + log_group_name: '{{ cloudwatch_log_group }}-2' + retention: 1 + register: output_cloudwatch_log_group2 + - name: 'Add inline policy to CloudWatch Role' + iam_policy: + state: present + iam_type: role + iam_name: '{{ cloudwatch_role }}' + policy_name: 'CloudWatch' + policy_json: "{{ lookup('template', 'cloudwatch-policy.j2') | to_json }}" + + - name: 'Create CloudWatch IAM Role with no kms permissions' + iam_role: + state: present + name: '{{ cloudwatch_no_kms_role }}' + assume_role_policy_document: "{{ lookup('template', 'cloudtrail-no-kms-assume-policy.j2') }}" + managed_policies: + - "arn:aws:iam::aws:policy/AWSCloudTrail_FullAccess" + register: output_cloudwatch_no_kms_role + + - name: pause to ensure role exists before attaching policy + pause: + seconds: 15 + + - name: 'Add inline policy to CloudWatch Role' + iam_policy: + state: present + iam_type: role + iam_name: '{{ cloudwatch_no_kms_role }}' + policy_name: 'CloudWatchNokms' + policy_json: "{{ lookup('template', 'cloudtrail-no-kms-policy.j2') }}" + + # ============================================================ + # Tests + # ============================================================ + + - name: 'Create a trail (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Create a trail' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + register: output + - assert: + that: + - output is changed + - output.exists == True + - output.trail.name == cloudtrail_name + + - name: 'No-op update to trail' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + register: output + - assert: + that: + - output is not changed + - output.exists == True + # Check everything is what we expect before we start making changes + - output.trail.name == cloudtrail_name + - output.trail.home_region == aws_region + - output.trail.include_global_service_events == True + - output.trail.is_multi_region_trail == False + - output.trail.is_logging == True + - output.trail.log_file_validation_enabled == False + - output.trail.s3_bucket_name == s3_bucket_name + - output.trail.s3_key_prefix is none + - output.trail.kms_key_id is none + - output.trail.sns_topic_arn is none + - output.trail.sns_topic_name is none + - output.trail.tags | length == 0 + + - name: 'Get the trail info' + cloudtrail_info: + register: info + + - name: 'Get the trail name from the cloud trail info' + set_fact: + trail_present: true + trail_arn: '{{ item.resource_id }}' + when: item.name == cloudtrail_name + loop: "{{ info.trail_list }}" + + - name: 'Assert that the trail name is present in the info' + assert: + that: + - trail_present is defined + - trail_present == True + + # ============================================================ + + - name: 'Set S3 prefix (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + s3_key_prefix: '{{ cloudtrail_prefix }}' + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Set S3 prefix' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + s3_key_prefix: '{{ cloudtrail_prefix }}' + register: output + - assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.s3_key_prefix == cloudtrail_prefix + + - name: 'Set S3 prefix (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + s3_key_prefix: '{{ cloudtrail_prefix }}' + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.s3_key_prefix == cloudtrail_prefix + + - name: 'No-op update to trail' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.s3_key_prefix == cloudtrail_prefix + + - name: 'Get the trail info' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the s3_key_prefix is correct' + assert: + that: + - info.trail_list[0].s3_key_prefix == cloudtrail_prefix + + - name: 'Update S3 prefix (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + s3_key_prefix: '{{ cloudtrail_prefix }}-2' + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Update S3 prefix' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + s3_key_prefix: '{{ cloudtrail_prefix }}-2' + register: output + - assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - 'output.trail.s3_key_prefix == "{{ cloudtrail_prefix }}-2"' + + - name: 'Update S3 prefix (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + s3_key_prefix: '{{ cloudtrail_prefix }}-2' + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - 'output.trail.s3_key_prefix == "{{ cloudtrail_prefix }}-2"' + + - name: 'Get the trail info after updating S3 prefix' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the s3_key_prefix is correct' + assert: + that: + - 'info.trail_list[0].s3_key_prefix == "{{ cloudtrail_prefix }}-2"' + + - name: 'Remove S3 prefix (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + s3_key_prefix: '/' + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Remove S3 prefix' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + s3_key_prefix: '/' + register: output + - assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.s3_key_prefix is none + + - name: 'Remove S3 prefix (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + s3_key_prefix: '/' + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.s3_key_prefix is none + + - name: 'Get the trail info after removing S3 prefix' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the s3_key_prefix is None' + assert: + that: + - info.trail_list[0].s3_key_prefix is not defined + + # ============================================================ + + - include_tasks: 'tagging.yml' + + # ============================================================ + + - name: 'Set SNS Topic (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + sns_topic_name: '{{ sns_topic }}' + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Set SNS Topic' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + sns_topic_name: '{{ sns_topic }}' + register: output + - assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.sns_topic_name == sns_topic + + - name: 'Set SNS Topic (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + sns_topic_name: '{{ sns_topic }}' + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.sns_topic_name == sns_topic + + - name: 'No-op update to trail' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.sns_topic_name == sns_topic + + - name: 'Get the trail info with SNS topic' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the sns_topic is correctly set' + assert: + that: + - info.trail_list[0].sns_topic_name == sns_topic + + - name: 'Update SNS Topic (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + sns_topic_name: '{{ sns_topic }}-2' + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Update SNS Topic' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + sns_topic_name: '{{ sns_topic }}-2' + register: output + - assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - 'output.trail.sns_topic_name == "{{ sns_topic }}-2"' + + - name: 'Update SNS Topic (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + sns_topic_name: '{{ sns_topic }}-2' + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - 'output.trail.sns_topic_name == "{{ sns_topic }}-2"' + + - name: 'Get the trail info with SNS topic after update' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the sns_topic is correctly set' + assert: + that: + - 'info.trail_list[0].sns_topic_name == "{{ sns_topic }}-2"' + + #- name: 'Remove SNS Topic (CHECK MODE)' + # cloudtrail: + # state: present + # name: '{{ cloudtrail_name }}' + # sns_topic_name: '' + # register: output + # check_mode: yes + #- assert: + # that: + # - output is changed + + #- name: 'Remove SNS Topic' + # cloudtrail: + # state: present + # name: '{{ cloudtrail_name }}' + # sns_topic_name: '' + # register: output + #- assert: + # that: + # - output is changed + # - output.trail.name == cloudtrail_name + # - output.trail.sns_topic_name is none + + #- name: 'Remove SNS Topic (no change)' + # cloudtrail: + # state: present + # name: '{{ cloudtrail_name }}' + # sns_topic_name: '' + # register: output + #- assert: + # that: + # - output is not changed + # - output.trail.name == cloudtrail_name + # - output.trail.sns_topic_name is none + + + # ============================================================ + + - name: 'Set CloudWatch Log Group (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}' + cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}' + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Set CloudWatch Log Group' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}' + cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}' + register: output + - assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn + - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn + + - name: 'Set CloudWatch Log Group (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}' + cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}' + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn + - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn + + - name: 'No-op update to trail' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn + - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn + + - name: 'Get the trail info with CloudWatch Log Group' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the cloud watch log group is correctly set' + assert: + that: + - info.trail_list[0].cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn + - info.trail_list[0].cloud_watch_logs_role_arn == output_cloudwatch_role.arn + + - name: 'Update CloudWatch Log Group (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group2.arn }}' + cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}' + register: output + check_mode: yes + - assert: + that: + - output is changed + - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn + - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn + + - name: 'Update CloudWatch Log Group' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group2.arn }}' + cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}' + register: output + - assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn + - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn + + - name: 'Update CloudWatch Log Group (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group2.arn }}' + cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}' + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn + - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn + + - name: 'Get the trail info with CloudWatch Log Group after update' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the cloud watch log group is correctly set after update' + assert: + that: + - info.trail_list[0].cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn + - info.trail_list[0].cloud_watch_logs_role_arn == output_cloudwatch_role.arn + + #- name: 'Remove CloudWatch Log Group (CHECK MODE)' + # cloudtrail: + # state: present + # name: '{{ cloudtrail_name }}' + # cloudwatch_logs_log_group_arn: '' + # cloudwatch_logs_role_arn: '' + # register: output + # check_mode: yes + #- assert: + # that: + # - output is changed + # - output.trail.name == cloudtrail_name + # - output.trail.cloud_watch_logs_log_group_arn is none + # - output.trail.cloud_watch_logs_role_arn is none + + #- name: 'Remove CloudWatch Log Group' + # cloudtrail: + # state: present + # name: '{{ cloudtrail_name }}' + # cloudwatch_logs_log_group_arn: '' + # cloudwatch_logs_role_arn: '' + # register: output + #- assert: + # that: + # - output is changed + # - output.trail.name == cloudtrail_name + # - output.trail.cloud_watch_logs_log_group_arn is none + # - output.trail.cloud_watch_logs_role_arn is none + + #- name: 'Remove CloudWatch Log Group (no change)' + # cloudtrail: + # state: present + # name: '{{ cloudtrail_name }}' + # cloudwatch_logs_log_group_arn: '' + # cloudwatch_logs_role_arn: '' + # register: output + #- assert: + # that: + # - output is not changed + # - output.trail.name == cloudtrail_name + # - output.trail.cloud_watch_logs_log_group_arn is none + # - output.trail.cloud_watch_logs_role_arn is none + + # ============================================================ + + - name: 'Update S3 bucket (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + s3_bucket_name: '{{ s3_bucket_name }}-2' + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Update S3 bucket' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + s3_bucket_name: '{{ s3_bucket_name }}-2' + register: output + - assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - 'output.trail.s3_bucket_name == "{{ s3_bucket_name }}-2"' + + - name: 'Update S3 bucket (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + s3_bucket_name: '{{ s3_bucket_name }}-2' + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - 'output.trail.s3_bucket_name == "{{ s3_bucket_name }}-2"' + + - name: 'Get the trail info with S3 bucket name' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the S3 Bucket name is correctly set' + assert: + that: + - 'info.trail_list[0].s3_bucket_name == "{{ s3_bucket_name }}-2"' + + - name: 'Reset S3 bucket' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + register: output + - assert: + that: + - output.trail.name == cloudtrail_name + - output.trail.s3_bucket_name == s3_bucket_name + + # ============================================================ + + - name: 'Disable logging (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + enable_logging: no + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Disable logging' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + enable_logging: no + register: output + - assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.is_logging == False + + - name: 'Disable logging (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + enable_logging: no + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.is_logging == False + + - name: 'Get the trail info to check the logging state' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the logging state is correctly set' + assert: + that: + - info.trail_list[0].is_logging == False + + # Ansible Documentation lists logging as explicitly defaulting to enabled + + - name: 'Enable logging (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + enable_logging: yes + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Enable logging' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + enable_logging: yes + register: output + - assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.is_logging == True + + - name: 'Enable logging (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + enable_logging: yes + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.is_logging == True + + - name: 'Get the trail info to check the logging state' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the logging state is correctly set' + assert: + that: + - info.trail_list[0].is_logging == True + + # ============================================================ + + - name: 'Disable global logging (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + include_global_events: no + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Disable global logging' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + include_global_events: no + register: output + - assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.include_global_service_events == False + + - name: 'Disable global logging (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + include_global_events: no + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.include_global_service_events == False + + - name: 'Get the trail info to check the global logging state' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the global logging state is correctly set' + assert: + that: + - info.trail_list[0].include_global_service_events == False + + # Ansible Documentation lists Global-logging as explicitly defaulting to enabled + + - name: 'Enable global logging (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + include_global_events: yes + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Enable global logging' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + include_global_events: yes + register: output + - assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.include_global_service_events == True + + - name: 'Enable global logging (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + include_global_events: yes + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.include_global_service_events == True + + - name: 'Get the trail info to check the global logging state (default)' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the global logging state is correctly set (default)' + assert: + that: + - info.trail_list[0].include_global_service_events == True + + # ============================================================ + + - name: 'Enable multi-region logging (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + is_multi_region_trail: yes + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Enable multi-region logging' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + is_multi_region_trail: yes + register: output + - assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.is_multi_region_trail == True + + - name: 'Enable multi-region logging (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + is_multi_region_trail: yes + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.is_multi_region_trail == True + + - name: 'Get the trail info to check the multi-region logging state (default)' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the global logging state is correctly set (default)' + assert: + that: + - info.trail_list[0].is_multi_region_trail == True + + # Ansible Documentation lists Multi-Region-logging as explicitly defaulting to disabled + + - name: 'Disable multi-region logging (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + is_multi_region_trail: no + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Disable multi-region logging' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + is_multi_region_trail: no + register: output + - assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.is_multi_region_trail == False + + - name: 'Disable multi-region logging (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + is_multi_region_trail: no + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.is_multi_region_trail == False + + - name: 'Get the trail info to check the multi-region logging state (default)' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the global logging state is correctly set (default)' + assert: + that: + - info.trail_list[0].is_multi_region_trail == False + + # ============================================================ + + - name: 'Enable logfile validation (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + enable_log_file_validation: yes + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Enable logfile validation' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + enable_log_file_validation: yes + register: output + - assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.log_file_validation_enabled == True + + - name: 'Enable logfile validation (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + enable_log_file_validation: yes + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.log_file_validation_enabled == True + + - name: 'No-op update to trail' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.log_file_validation_enabled == True + + - name: 'Get the trail info to check the log file validation' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the log file validation is correctly set' + assert: + that: + - info.trail_list[0].log_file_validation_enabled == True + + - name: 'Disable logfile validation (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + enable_log_file_validation: no + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Disable logfile validation' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + enable_log_file_validation: no + register: output + - assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.log_file_validation_enabled == False + + - name: 'Disable logfile validation (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + enable_log_file_validation: no + register: output + - assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.log_file_validation_enabled == False + + - name: 'Get the trail info to check the log file validation' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the log file validation is disabled' + assert: + that: + - info.trail_list[0].log_file_validation_enabled == False + + # ============================================================ + + - name: 'Enable logging encryption (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + kms_key_id: '{{ kms_key.key_arn }}' + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Enable logging encryption' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + kms_key_id: '{{ kms_key.key_arn }}' + register: output + - assert: + that: + - output is changed + - output.trail.kms_key_id == kms_key.key_arn + + - name: 'Enable logging encryption (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + kms_key_id: '{{ kms_key.key_arn }}' + register: output + - assert: + that: + - output is not changed + - output.trail.kms_key_id == kms_key.key_arn + + - name: 'Enable logging encryption (no change, check mode)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + kms_key_id: '{{ kms_key.key_arn }}' + check_mode: yes + register: output + - assert: + that: + - output is not changed + - output.trail.kms_key_id == kms_key.key_arn + + - name: 'No-op update to trail' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + register: output + - assert: + that: + - output is not changed + - output.trail.kms_key_id == kms_key.key_arn + + - name: 'Get the trail info to check the logging encryption' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the logging encryption is correctly set' + assert: + that: + - info.trail_list[0].kms_key_id == kms_key.key_arn + + - name: 'Update logging encryption key (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + kms_key_id: '{{ kms_key2.key_arn }}' + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Update logging encryption key' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + kms_key_id: '{{ kms_key2.key_arn }}' + register: output + - assert: + that: + - output is changed + - output.trail.kms_key_id == kms_key2.key_arn + + - name: 'Update logging encryption key (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + kms_key_id: '{{ kms_key2.key_arn }}' + register: output + - assert: + that: + - output is not changed + - output.trail.kms_key_id == kms_key2.key_arn + + - name: 'Get the trail info to check the logging key encryption' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the logging key encryption is correctly set' + assert: + that: + - info.trail_list[0].kms_key_id == kms_key2.key_arn + + - name: 'Update logging encryption to alias (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + kms_key_id: 'alias/{{ kms_alias }}' + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Update logging encryption to alias' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + kms_key_id: 'alias/{{ kms_alias }}' + register: output + - assert: + that: + - output is changed + - output.trail.kms_key_id == kms_key.key_arn + + - name: 'Update logging encryption to alias (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + kms_key_id: 'alias/{{ kms_alias }}' + register: output + - assert: + that: + - output is not changed + - output.trail.kms_key_id == kms_key.key_arn + + - name: 'Update logging encryption to alias (CHECK MODE, no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + kms_key_id: '{{ kms_key.key_id }}' # Test when using key id + register: output + check_mode: yes + - assert: + that: + - output is not changed + - output.trail.kms_key_id == kms_key.key_id + + - debug: + msg: '{{ output }}' + + - name: 'Get the trail info to check the logging key encryption after update' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the logging key encryption is correctly updated' + assert: + that: + - kms_key.key_id in info.trail_list[0].kms_key_id + + # Assume role to a role with Denied access to KMS + + - community.aws.sts_assume_role: + role_arn: '{{ output_cloudwatch_no_kms_role.arn }}' + role_session_name: "cloudtrailNoKms" + region: '{{ aws_region }}' + register: noKms_assumed_role + + - name: 'Enable logging encryption w/ alias (no change, no kms permmissions, check mode)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + kms_key_id: 'alias/{{ kms_alias }}' + aws_access_key: "{{ noKms_assumed_role.sts_creds.access_key }}" + aws_secret_key: "{{ noKms_assumed_role.sts_creds.secret_key }}" + security_token: "{{ noKms_assumed_role.sts_creds.session_token }}" + check_mode: yes + register: output + - assert: + that: + - output is changed + # when using check_mode, with no kms permissions, and not giving kms_key_id as a key arn + # output will always be marked as changed. + + - name: 'Disable logging encryption (CHECK MODE)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + kms_key_id: '' + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Disable logging encryption' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + kms_key_id: '' + register: output + - assert: + that: + - output.trail.kms_key_id == "" + - output is changed + + - name: 'Disable logging encryption (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + kms_key_id: '' + register: output + - assert: + that: + - output.kms_key_id == "" + - output is not changed + + # ============================================================ + + - name: 'Delete a trail without providing bucket_name (CHECK MODE)' + module_defaults: { cloudtrail: {} } + cloudtrail: + state: absent + name: '{{ cloudtrail_name }}' + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Delete a trail while providing bucket_name (CHECK MODE)' + cloudtrail: + state: absent + name: '{{ cloudtrail_name }}' + register: output + check_mode: yes + - assert: + that: + - output is changed + + - name: 'Delete a trail' + cloudtrail: + state: absent + name: '{{ cloudtrail_name }}' + register: output + - assert: + that: + - output is changed + - output.exists == False + + - name: 'Delete a non-existent trail' + cloudtrail: + state: absent + name: '{{ cloudtrail_name }}' + register: output + - assert: + that: + - output is not changed + - output.exists == False + + # ============================================================ + + - name: 'Test creation of a complex Trail (all features)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + s3_key_prefix: '{{ cloudtrail_prefix }}' + sns_topic_name: '{{ sns_topic }}' + cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}' + cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}' + is_multi_region_trail: yes + include_global_events: yes + enable_log_file_validation: yes + kms_key_id: '{{ kms_key.key_arn }}' + register: output + - assert: + that: + - output is changed + #- output.exists == True + - output.trail.name == cloudtrail_name + - output.trail.home_region == aws_region + - output.trail.include_global_service_events == True + - output.trail.is_multi_region_trail == True + - output.trail.is_logging == True + - output.trail.log_file_validation_enabled == True + - output.trail.s3_bucket_name == s3_bucket_name + - output.trail.s3_key_prefix == cloudtrail_prefix + - output.trail.kms_key_id == kms_key.key_arn + - output.trail.sns_topic_arn == output_sns_topic.sns_arn + - output.trail.sns_topic_name == sns_topic + - output.trail.tags | length == 0 + + - name: 'Test creation of a complex Trail (no change)' + cloudtrail: + state: present + name: '{{ cloudtrail_name }}' + s3_key_prefix: '{{ cloudtrail_prefix }}' + sns_topic_name: '{{ sns_topic }}' + cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}' + cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}' + is_multi_region_trail: yes + include_global_events: yes + enable_log_file_validation: yes + kms_key_id: '{{ kms_key.key_arn }}' + register: output + - assert: + that: + - output is not changed + - output.exists == True + - output.trail.name == cloudtrail_name + - output.trail.home_region == aws_region + - output.trail.include_global_service_events == True + - output.trail.is_multi_region_trail == True + - output.trail.is_logging == True + - output.trail.log_file_validation_enabled == True + - output.trail.s3_bucket_name == s3_bucket_name + - output.trail.s3_key_prefix == cloudtrail_prefix + - output.trail.kms_key_id == kms_key.key_arn + - output.trail.sns_topic_arn == output_sns_topic.sns_arn + - output.trail.sns_topic_name == sns_topic + - output.trail.tags | length == 0 + + - name: 'Get the trail info of the created trail' + cloudtrail_info: + trail_names: + - '{{ trail_arn }}' + register: info + + - name: 'Assert that the logging key encryption is correctly updated' + assert: + that: + - info.trail_list[0].name == cloudtrail_name + - info.trail_list[0].home_region == aws_region + - info.trail_list[0].include_global_service_events == True + - info.trail_list[0].is_multi_region_trail == True + - info.trail_list[0].is_logging == True + - info.trail_list[0].log_file_validation_enabled == True + - info.trail_list[0].s3_bucket_name == s3_bucket_name + - info.trail_list[0].s3_key_prefix == cloudtrail_prefix + - info.trail_list[0].kms_key_id == kms_key.key_arn + - info.trail_list[0].sns_topic_arn == output_sns_topic.sns_arn + - info.trail_list[0].sns_topic_name == sns_topic + - info.trail_list[0].tags | length == 0 + + always: + # ============================================================ + # Cleanup + # ============================================================ + - name: 'Delete test trail' + cloudtrail: + state: absent + name: '{{ cloudtrail_name }}' + ignore_errors: yes + - name: 'Delete S3 bucket' + s3_bucket: + state: absent + name: '{{ s3_bucket_name }}' + force: yes + ignore_errors: yes + - name: 'Delete second S3 bucket' + s3_bucket: + state: absent + name: '{{ s3_bucket_name }}-2' + force: yes + ignore_errors: yes + - name: 'Delete KMS Key' + aws_kms: + state: absent + alias: '{{ kms_alias }}' + ignore_errors: yes + - name: 'Delete second KMS Key' + aws_kms: + state: absent + alias: '{{ kms_alias }}-2' + ignore_errors: yes + - name: 'Delete SNS Topic' + sns_topic: + state: absent + name: '{{ sns_topic }}' + ignore_errors: yes + - name: 'Delete second SNS Topic' + sns_topic: + state: absent + name: '{{ sns_topic }}-2' + ignore_errors: yes + - name: 'Delete CloudWatch Log Group' + cloudwatchlogs_log_group: + state: absent + log_group_name: '{{ cloudwatch_log_group }}' + ignore_errors: yes + - name: 'Delete second CloudWatch Log Group' + cloudwatchlogs_log_group: + state: absent + log_group_name: '{{ cloudwatch_log_group }}-2' + ignore_errors: yes + - name: 'Remove inline policy to CloudWatch Role' + iam_policy: + state: absent + iam_type: role + iam_name: '{{ cloudwatch_role }}' + policy_name: 'CloudWatch' + ignore_errors: yes + - name: 'Delete CloudWatch IAM Role' + iam_role: + state: absent + name: '{{ cloudwatch_role }}' + ignore_errors: yes + - name: 'Remove inline policy to CloudWatch Role' + iam_policy: + state: absent + iam_type: role + iam_name: '{{ cloudwatch_no_kms_role }}' + policy_name: 'CloudWatchNokms' + ignore_errors: yes + - name: 'Delete CloudWatch No KMS IAM Role' + iam_role: + state: absent + name: '{{ cloudwatch_no_kms_role }}' + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/tagging.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/tagging.yml new file mode 100644 index 000000000..df537c67e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/tagging.yml @@ -0,0 +1,252 @@ +- name: Tests relating to tagging cloudtrails + vars: + first_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + second_tags: + 'New Key with Spaces': Value with spaces + NewCamelCaseKey: CamelCaseValue + newPascalCaseKey: pascalCaseValue + new_snake_case_key: snake_case_value + third_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + 'New Key with Spaces': Updated Value with spaces + final_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + 'New Key with Spaces': Updated Value with spaces + NewCamelCaseKey: CamelCaseValue + newPascalCaseKey: pascalCaseValue + new_snake_case_key: snake_case_value + # Mandatory settings + module_defaults: + amazon.aws.cloudtrail: + name: '{{ cloudtrail_name }}' + s3_bucket_name: '{{ s3_bucket_name }}' + state: present +# community.aws.cloudtrail_info: +# name: '{{ cloudtrail_name }}' + block: + + ### + + - name: test adding tags to cloudtrail (check mode) + cloudtrail: + tags: '{{ first_tags }}' + purge_tags: True + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is changed + + - name: test adding tags to cloudtrail + cloudtrail: + tags: '{{ first_tags }}' + purge_tags: True + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is changed + - update_result.trail.tags == first_tags + + - name: test adding tags to cloudtrail - idempotency (check mode) + cloudtrail: + tags: '{{ first_tags }}' + purge_tags: True + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is not changed + + - name: test adding tags to cloudtrail - idempotency + cloudtrail: + tags: '{{ first_tags }}' + purge_tags: True + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is not changed + - update_result.trail.tags == first_tags + + ### + + - name: test updating tags with purge on cloudtrail (check mode) + cloudtrail: + tags: '{{ second_tags }}' + purge_tags: True + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is changed + + - name: test updating tags with purge on cloudtrail + cloudtrail: + tags: '{{ second_tags }}' + purge_tags: True + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is changed + - update_result.trail.tags == second_tags + + - name: test updating tags with purge on cloudtrail - idempotency (check mode) + cloudtrail: + tags: '{{ second_tags }}' + purge_tags: True + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is not changed + + - name: test updating tags with purge on cloudtrail - idempotency + cloudtrail: + tags: '{{ second_tags }}' + purge_tags: True + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is not changed + - update_result.trail.tags == second_tags + + ### + + - name: test updating tags without purge on cloudtrail (check mode) + cloudtrail: + tags: '{{ third_tags }}' + purge_tags: False + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is changed + + - name: test updating tags without purge on cloudtrail + cloudtrail: + tags: '{{ third_tags }}' + purge_tags: False + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is changed + - update_result.trail.tags == final_tags + + - name: test updating tags without purge on cloudtrail - idempotency (check mode) + cloudtrail: + tags: '{{ third_tags }}' + purge_tags: False + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is not changed + + - name: test updating tags without purge on cloudtrail - idempotency + cloudtrail: + tags: '{{ third_tags }}' + purge_tags: False + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is not changed + - update_result.trail.tags == final_tags + +# ### +# +# - name: test that cloudtrail_info returns the tags +# cloudtrail_info: +# register: tag_info +# - name: assert tags present +# assert: +# that: +# - tag_info.trail.tags == final_tags +# +# ### + + - name: test no tags param cloudtrail (check mode) + cloudtrail: {} + register: update_result + check_mode: yes + - name: assert no change + assert: + that: + - update_result is not changed + - update_result.trail.tags == final_tags + + + - name: test no tags param cloudtrail + cloudtrail: {} + register: update_result + - name: assert no change + assert: + that: + - update_result is not changed + - update_result.trail.tags == final_tags + + ### + + - name: test removing tags from cloudtrail (check mode) + cloudtrail: + tags: {} + purge_tags: True + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is changed + + - name: test removing tags from cloudtrail + cloudtrail: + tags: {} + purge_tags: True + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is changed + - update_result.trail.tags == {} + + - name: test removing tags from cloudtrail - idempotency (check mode) + cloudtrail: + tags: {} + purge_tags: True + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is not changed + + - name: test removing tags from cloudtrail - idempotency + cloudtrail: + tags: {} + purge_tags: True + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is not changed + - update_result.trail.tags == {} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-assume-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-assume-policy.j2 new file mode 100644 index 000000000..f3bfd14ec --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-assume-policy.j2 @@ -0,0 +1,11 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AssumeRole", + "Effect": "Allow", + "Principal": { "AWS": "arn:aws:iam::{{ aws_caller_info.account }}:root" }, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-policy.j2 new file mode 100644 index 000000000..d85b650b7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-policy.j2 @@ -0,0 +1,11 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "kmsDeny", + "Effect": "Deny", + "Action": [ "kms:*" ], + "Resource": [ "*" ] + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j2 new file mode 100644 index 000000000..6d7fb7b88 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j2 @@ -0,0 +1,13 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AssumeFromCloudTrails", + "Effect": "Allow", + "Principal": { + "Service": "cloudtrail.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-policy.j2 new file mode 100644 index 000000000..8f354a702 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-policy.j2 @@ -0,0 +1,17 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "CloudTrail2CloudWatch", + "Effect": "Allow", + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Resource": [ + "arn:aws:logs:{{ aws_region }}:{{ aws_caller_info.account }}:log-group:{{ cloudwatch_log_group }}:log-stream:*", + "arn:aws:logs:{{ aws_region }}:{{ aws_caller_info.account }}:log-group:{{ cloudwatch_log_group }}-2:log-stream:*" + ] + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/kms-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/kms-policy.j2 new file mode 100644 index 000000000..35730f1d2 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/kms-policy.j2 @@ -0,0 +1,34 @@ +{ + "Version": "2012-10-17", + "Id": "CloudTrailPolicy", + "Statement": [ + { + "Sid": "EncryptLogs", + "Effect": "Allow", + "Principal": { "Service": "cloudtrail.amazonaws.com" }, + "Action": "kms:GenerateDataKey*", + "Resource": "*", + "Condition": { + "StringLike": { + "kms:EncryptionContext:aws:cloudtrail:arn": [ + "arn:aws:cloudtrail:*:{{ aws_caller_info.account }}:trail/{{ resource_prefix }}*" + ] + } + } + }, + { + "Sid": "DescribeKey", + "Effect": "Allow", + "Principal": { "Service": "cloudtrail.amazonaws.com" }, + "Action": "kms:DescribeKey", + "Resource": "*" + }, + { + "Sid": "AnsibleTestManage", + "Effect": "Allow", + "Principal": { "AWS": "{{ aws_caller_info.arn }}" }, + "Action": "*", + "Resource": "*" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/s3-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/s3-policy.j2 new file mode 100644 index 000000000..78c056e30 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/s3-policy.j2 @@ -0,0 +1,34 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "CloudTrailCheckAcl", + "Effect": "Allow", + "Principal": { "Service": "cloudtrail.amazonaws.com" }, + "Action": "s3:GetBucketAcl", + "Resource": "arn:aws:s3:::{{ bucket_name }}", + }, + { + "Sid": "CloudTrailWriteLogs", + "Effect": "Allow", + "Principal": { "Service": "cloudtrail.amazonaws.com" }, + "Action": "s3:PutObject", + "Resource": [ + "arn:aws:s3:::{{ bucket_name }}/AWSLogs/{{ aws_caller_info.account }}/*", + "arn:aws:s3:::{{ bucket_name }}/{{ cloudtrail_prefix }}*/AWSLogs/{{ aws_caller_info.account }}/*" + ], + "Condition": { + "StringEquals": { + "s3:x-amz-acl": "bucket-owner-full-control" + } + } + }, + { + "Sid": "AnsibleTestManage", + "Effect": "Allow", + "Principal": { "AWS": "{{ aws_caller_info.arn }}" }, + "Action": "*", + "Resource": "arn:aws:s3:::{{ bucket_name }}" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/sns-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/sns-policy.j2 new file mode 100644 index 000000000..3c267b800 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/sns-policy.j2 @@ -0,0 +1,34 @@ +{ + "Version": "2008-10-17", + "Id": "AnsibleSNSTesting", + "Statement": [ + { + "Sid": "CloudTrailSNSPolicy", + "Effect": "Allow", + "Principal": { + "Service": "cloudtrail.amazonaws.com" + }, + "Action": "sns:Publish", + "Resource": "arn:aws:sns:{{ aws_region }}:{{ aws_caller_info.account }}:{{ sns_topic_name }}" + }, + { + "Sid": "AnsibleTestManage", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "sns:Subscribe", + "sns:ListSubscriptionsByTopic", + "sns:DeleteTopic", + "sns:GetTopicAttributes", + "sns:Publish", + "sns:RemovePermission", + "sns:AddPermission", + "sns:Receive", + "sns:SetTopicAttributes" + ], + "Resource": "arn:aws:sns:{{ aws_region }}:{{ aws_caller_info.account }}:{{ sns_topic_name }}" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/aliases b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/defaults/main.yml new file mode 100644 index 000000000..f65410b95 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/defaults/main.yml @@ -0,0 +1,4 @@ +# defaults file for ec2_instance +ec2_instance_name: '{{ resource_prefix }}-node' +ec2_instance_owner: integration-run-{{ resource_prefix }} +alarm_prefix: ansible-test diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/meta/main.yml new file mode 100644 index 000000000..1d40168d0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: +- setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_cleanup.yml new file mode 100644 index 000000000..104f57984 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_cleanup.yml @@ -0,0 +1,94 @@ +- name: remove any instances in the test VPC + ec2_instance: + filters: + vpc_id: '{{ testing_vpc.vpc.id }}' + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove ENIs + ec2_eni_info: + filters: + vpc-id: '{{ testing_vpc.vpc.id }}' + register: enis + +- name: delete all ENIs + ec2_eni: + eni_id: '{{ item.id }}' + state: absent + until: removed is not failed + with_items: '{{ enis.network_interfaces }}' + ignore_errors: yes + retries: 10 + +- name: remove the security group + ec2_group: + name: '{{ resource_prefix }}-sg' + description: a security group for ansible tests + vpc_id: '{{ testing_vpc.vpc.id }}' + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove routing rules + ec2_vpc_route_table: + state: absent + vpc_id: '{{ testing_vpc.vpc.id }}' + tags: + created: '{{ resource_prefix }}-route' + routes: + - dest: 0.0.0.0/0 + gateway_id: '{{ igw.gateway_id }}' + subnets: + - '{{ testing_subnet_a.subnet.id }}' + - '{{ testing_subnet_b.subnet.id }}' + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove internet gateway + ec2_vpc_igw: + vpc_id: '{{ testing_vpc.vpc.id }}' + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove subnet A + ec2_vpc_subnet: + state: absent + vpc_id: '{{ testing_vpc.vpc.id }}' + cidr: 10.22.32.0/24 + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove subnet B + ec2_vpc_subnet: + state: absent + vpc_id: '{{ testing_vpc.vpc.id }}' + cidr: 10.22.33.0/24 + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove the VPC + ec2_vpc_net: + name: '{{ resource_prefix }}-vpc' + cidr_block: 10.22.32.0/23 + state: absent + tags: + Name: Ansible Testing VPC + tenancy: default + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_setup.yml new file mode 100644 index 000000000..2153d876a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_setup.yml @@ -0,0 +1,62 @@ +- name: Create VPC for use in testing + ec2_vpc_net: + name: '{{ resource_prefix }}-vpc' + cidr_block: 10.22.32.0/23 + tags: + Name: Ansible ec2_instance Testing VPC + tenancy: default + register: testing_vpc + +- name: Create internet gateway for use in testing + ec2_vpc_igw: + vpc_id: '{{ testing_vpc.vpc.id }}' + state: present + register: igw + +- name: Create default subnet in zone A + ec2_vpc_subnet: + state: present + vpc_id: '{{ testing_vpc.vpc.id }}' + cidr: 10.22.32.0/24 + az: '{{ aws_region }}a' + resource_tags: + Name: '{{ resource_prefix }}-subnet-a' + register: testing_subnet_a + +- name: Create secondary subnet in zone B + ec2_vpc_subnet: + state: present + vpc_id: '{{ testing_vpc.vpc.id }}' + cidr: 10.22.33.0/24 + az: '{{ aws_region }}b' + resource_tags: + Name: '{{ resource_prefix }}-subnet-b' + register: testing_subnet_b + +- name: create routing rules + ec2_vpc_route_table: + vpc_id: '{{ testing_vpc.vpc.id }}' + tags: + created: '{{ resource_prefix }}-route' + routes: + - dest: 0.0.0.0/0 + gateway_id: '{{ igw.gateway_id }}' + subnets: + - '{{ testing_subnet_a.subnet.id }}' + - '{{ testing_subnet_b.subnet.id }}' + +- name: create a security group with the vpc + ec2_group: + name: '{{ resource_prefix }}-sg' + description: a security group for ansible tests + vpc_id: '{{ testing_vpc.vpc.id }}' + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + register: sg diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/main.yml new file mode 100644 index 000000000..d3f522c97 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/main.yml @@ -0,0 +1,518 @@ +- name: run ec2_metric_alarm tests + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + - set_fact: + alarm_full_name: '{{ alarm_prefix }}-{{ resource_prefix }}-cpu-low' + + - name: set up environment for testing. + include_tasks: env_setup.yml + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info_query + + - name: Make instance in a default subnet of the VPC + ec2_instance: + name: '{{ resource_prefix }}-test-default-vpc' + image_id: '{{ ec2_ami_id }}' + tags: + TestId: '{{ resource_prefix }}' + security_groups: '{{ sg.group_id }}' + vpc_subnet_id: '{{ testing_subnet_a.subnet.id }}' + instance_type: t2.micro + wait: true + register: ec2_instance_results + + - name: ensure alarm doesn't exist for a clean test + ec2_metric_alarm: + state: absent + name: '{{ alarm_full_name }}' + + - name: create ec2 metric alarm on ec2 instance (check mode) + ec2_metric_alarm: + dimensions: + InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}' + state: present + name: '{{ alarm_full_name }}' + metric: CPUUtilization + namespace: AWS/EC2 + treat_missing_data: missing + statistic: Average + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + period: 300 + evaluation_periods: 3 + unit: Percent + description: This will alarm when an instance's cpu usage average is lower than + 5% for 15 minutes + check_mode: true + register: ec2_instance_metric_alarm_check + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info_check + + - name: "verify that an alarm was not created in check mode" + assert: + that: + - 'ec2_instance_metric_alarm_check.changed' + - 'not ec2_instance_metric_alarm_check.alarm_arn' + - 'alarm_info_check.metric_alarms | length == 0' + + - name: create ec2 metric alarm on ec2 instance + ec2_metric_alarm: + dimensions: + InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}' + state: present + name: '{{ alarm_full_name }}' + metric: CPUUtilization + namespace: AWS/EC2 + treat_missing_data: missing + statistic: Average + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + period: 300 + evaluation_periods: 3 + unit: Percent + description: This will alarm when an instance's cpu usage average is lower than + 5% for 15 minutes + register: ec2_instance_metric_alarm + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info + + - name: "verify that an alarm was created" + assert: + that: + - 'ec2_instance_metric_alarm.changed' + - 'ec2_instance_metric_alarm.alarm_arn' + - 'ec2_instance_metric_alarm.statistic == alarm_info.metric_alarms[0].statistic' + - 'ec2_instance_metric_alarm.name == alarm_info.metric_alarms[0].alarm_name' + - 'ec2_instance_metric_alarm.metric == alarm_info.metric_alarms[0].metric_name' + - 'ec2_instance_metric_alarm.namespace == alarm_info.metric_alarms[0].namespace' + - 'ec2_instance_metric_alarm.comparison == alarm_info.metric_alarms[0].comparison_operator' + - 'ec2_instance_metric_alarm.threshold == alarm_info.metric_alarms[0].threshold' + - 'ec2_instance_metric_alarm.period == alarm_info.metric_alarms[0].period' + - 'ec2_instance_metric_alarm.unit == alarm_info.metric_alarms[0].unit' + - 'ec2_instance_metric_alarm.evaluation_periods == alarm_info.metric_alarms[0].evaluation_periods' + - 'ec2_instance_metric_alarm.description == alarm_info.metric_alarms[0].alarm_description' + - 'ec2_instance_metric_alarm.treat_missing_data == alarm_info.metric_alarms[0].treat_missing_data' + + - name: create ec2 metric alarm on ec2 instance (idempotent) (check mode) + ec2_metric_alarm: + dimensions: + InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}' + state: present + name: '{{ alarm_full_name }}' + metric: CPUUtilization + namespace: AWS/EC2 + treat_missing_data: missing + statistic: Average + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + period: 300 + evaluation_periods: 3 + unit: Percent + description: This will alarm when an instance's cpu usage average is lower than + 5% for 15 minutes + check_mode: true + register: ec2_instance_metric_alarm_idempotent_check + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info_idempotent_check + + - name: "Verify alarm does not register as changed after update in check mode" + assert: + that: + - not ec2_instance_metric_alarm_idempotent_check.changed + + - name: "Verify alarm did not change after updating in check mode" + assert: + that: + - "alarm_info.metric_alarms[0]['{{item}}'] == alarm_info_idempotent_check.metric_alarms[0]['{{ item }}']" + with_items: + - alarm_arn + - statistic + - alarm_name + - metric_name + - namespace + - comparison_operator + - threshold + - period + - unit + - evaluation_periods + - alarm_description + - treat_missing_data + + - name: create ec2 metric alarm on ec2 instance (idempotent) + ec2_metric_alarm: + dimensions: + InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}' + state: present + name: '{{ alarm_full_name }}' + metric: CPUUtilization + namespace: AWS/EC2 + treat_missing_data: missing + statistic: Average + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + period: 300 + evaluation_periods: 3 + unit: Percent + description: This will alarm when an instance's cpu usage average is lower than + 5% for 15 minutes + register: ec2_instance_metric_alarm_idempotent + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info_idempotent_check + + - name: "Verify alarm does not register as changed after update in check mode" + assert: + that: + - not ec2_instance_metric_alarm_idempotent_check.changed + + - name: "Verify alarm did not change after updating in check mode" + assert: + that: + - "alarm_info.metric_alarms[0]['{{item}}'] == alarm_info_idempotent_check.metric_alarms[0]['{{ item }}']" + with_items: + - alarm_arn + - statistic + - alarm_name + - metric_name + - namespace + - comparison_operator + - threshold + - period + - unit + - evaluation_periods + - alarm_description + - treat_missing_data + + - name: update alarm (check mode) + ec2_metric_alarm: + dimensions: + InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}' + state: present + name: '{{ alarm_full_name }}' + metric: CPUUtilization + namespace: AWS/EC2 + statistic: Average + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + period: 60 + evaluation_periods: 3 + unit: Percent + description: This will alarm when an instance's cpu usage average is lower than + 5% for 3 minutes + check_mode: true + register: ec2_instance_metric_alarm_update_check + + - name: verify that alarm registers as updated in check mode + assert: + that: + - ec2_instance_metric_alarm_check.changed + + - name: verify that properties were not changed in check mode + assert: + that: + - ec2_instance_metric_alarm_update_check.changed + - 'ec2_instance_metric_alarm_update_check.period == alarm_info.metric_alarms[0].period' # Period of actual alarm should not change + - 'ec2_instance_metric_alarm_update_check.alarm_arn == ec2_instance_metric_alarm.alarm_arn' + - 'ec2_instance_metric_alarm_update_check.statistic == alarm_info.metric_alarms[0].statistic' + - 'ec2_instance_metric_alarm_update_check.name == alarm_info.metric_alarms[0].alarm_name' + - 'ec2_instance_metric_alarm_update_check.metric == alarm_info.metric_alarms[0].metric_name' + - 'ec2_instance_metric_alarm_update_check.namespace == alarm_info.metric_alarms[0].namespace' + - 'ec2_instance_metric_alarm_update_check.statistic == alarm_info.metric_alarms[0].statistic' + - 'ec2_instance_metric_alarm_update_check.comparison == alarm_info.metric_alarms[0].comparison_operator' + - 'ec2_instance_metric_alarm_update_check.threshold == alarm_info.metric_alarms[0].threshold' + - 'ec2_instance_metric_alarm_update_check.unit == alarm_info.metric_alarms[0].unit' + - 'ec2_instance_metric_alarm_update_check.evaluation_periods == alarm_info.metric_alarms[0].evaluation_periods' + - 'ec2_instance_metric_alarm_update_check.treat_missing_data == alarm_info.metric_alarms[0].treat_missing_data' + + - name: update alarm + ec2_metric_alarm: + dimensions: + InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}' + state: present + name: '{{ alarm_full_name }}' + metric: CPUUtilization + namespace: AWS/EC2 + statistic: Average + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + period: 60 + evaluation_periods: 3 + unit: Percent + description: This will alarm when an instance's cpu usage average is lower than + 5% for 3 minutes + register: ec2_instance_metric_alarm_update + + - name: verify that alarm registers as updated + assert: + that: + - ec2_instance_metric_alarm.changed + + - name: verify that properties were changed + assert: + that: + - ec2_instance_metric_alarm_update.changed + - ec2_instance_metric_alarm_update.period == 60 # Period should be 60, not matching old value + - ec2_instance_metric_alarm_update.alarm_arn == ec2_instance_metric_alarm.alarm_arn + - 'ec2_instance_metric_alarm_update.statistic == alarm_info.metric_alarms[0].statistic' + - 'ec2_instance_metric_alarm_update.name == alarm_info.metric_alarms[0].alarm_name' + - 'ec2_instance_metric_alarm_update.metric == alarm_info.metric_alarms[0].metric_name' + - 'ec2_instance_metric_alarm_update.namespace == alarm_info.metric_alarms[0].namespace' + - 'ec2_instance_metric_alarm_update.statistic == alarm_info.metric_alarms[0].statistic' + - 'ec2_instance_metric_alarm_update.comparison == alarm_info.metric_alarms[0].comparison_operator' + - 'ec2_instance_metric_alarm_update.threshold == alarm_info.metric_alarms[0].threshold' + - 'ec2_instance_metric_alarm_update.unit == alarm_info.metric_alarms[0].unit' + - 'ec2_instance_metric_alarm_update.evaluation_periods == alarm_info.metric_alarms[0].evaluation_periods' + - 'ec2_instance_metric_alarm_update.treat_missing_data == alarm_info.metric_alarms[0].treat_missing_data' + + - name: try to remove the alarm (check mode) + ec2_metric_alarm: + state: absent + name: '{{ alarm_full_name }}' + check_mode: true + register: ec2_instance_metric_alarm_deletion_check + + - name: Verify that the alarm reports deleted/changed + assert: + that: + - ec2_instance_metric_alarm_deletion_check.changed + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info_query_check + + - name: Verify that the alarm was not deleted in check mode using cli + assert: + that: + - 'alarm_info.metric_alarms | length > 0' + + - name: try to remove the alarm + ec2_metric_alarm: + state: absent + name: '{{ alarm_full_name }}' + register: ec2_instance_metric_alarm_deletion + + - name: Verify that the alarm reports deleted/changed + assert: + that: + - ec2_instance_metric_alarm_deletion.changed + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info + + - name: Verify that the alarm was deleted using cli + assert: + that: + - 'alarm_info.metric_alarms | length == 0' + + - name: create ec2 metric alarm with no unit on ec2 instance + ec2_metric_alarm: + dimensions: + InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}' + state: present + name: '{{ alarm_full_name }}' + metric: CPUUtilization + namespace: AWS/EC2 + treat_missing_data: missing + statistic: Average + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + period: 300 + evaluation_periods: 3 + description: This will alarm when an instance's cpu usage average is lower than + 5% for 15 minutes + register: ec2_instance_metric_alarm_no_unit + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info_no_unit + + - name: verify that an alarm was created + assert: + that: + - ec2_instance_metric_alarm_no_unit.changed + - ec2_instance_metric_alarm_no_unit.alarm_arn + - 'ec2_instance_metric_alarm_no_unit.statistic == alarm_info_no_unit.metric_alarms[0].statistic' + - 'ec2_instance_metric_alarm_no_unit.name == alarm_info_no_unit.metric_alarms[0].alarm_name' + - 'ec2_instance_metric_alarm_no_unit.metric == alarm_info_no_unit.metric_alarms[0].metric_name' + - 'ec2_instance_metric_alarm_no_unit.namespace == alarm_info_no_unit.metric_alarms[0].namespace' + - 'ec2_instance_metric_alarm_no_unit.comparison == alarm_info_no_unit.metric_alarms[0].comparison_operator' + - 'ec2_instance_metric_alarm_no_unit.threshold == alarm_info_no_unit.metric_alarms[0].threshold' + - 'ec2_instance_metric_alarm_no_unit.period == alarm_info_no_unit.metric_alarms[0].period' + - 'alarm_info_no_unit.metric_alarms[0].Unit is not defined' + - 'ec2_instance_metric_alarm_no_unit.evaluation_periods == alarm_info_no_unit.metric_alarms[0].evaluation_periods' + - 'ec2_instance_metric_alarm_no_unit.description == alarm_info_no_unit.metric_alarms[0].alarm_description' + - 'ec2_instance_metric_alarm_no_unit.treat_missing_data == alarm_info_no_unit.metric_alarms[0].treat_missing_data' + + - name: try to remove the alarm + ec2_metric_alarm: + state: absent + name: '{{ alarm_full_name }}' + register: ec2_instance_metric_alarm_deletion + + - name: Verify that the alarm reports deleted/changed + assert: + that: + - ec2_instance_metric_alarm_deletion.changed + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info + + - name: Verify that the alarm was deleted using cli + assert: + that: + - 'alarm_info.metric_alarms | length == 0' + + - name: create ec2 metric alarm with metrics + ec2_metric_alarm: + state: present + name: '{{ alarm_full_name }}' + treat_missing_data: missing + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + evaluation_periods: 3 + description: This will alarm when an instance's cpu usage average is lower than + 5% for 15 minutes + metrics: + - id: cpu + metric_stat: + metric: + dimensions: + - name: "InstanceId" + value: "{{ ec2_instance_results.instances[0].instance_id }}" + metric_name: "CPUUtilization" + namespace: "AWS/EC2" + period: 300 + stat: "Average" + unit: "Percent" + return_data: true + register: ec2_instance_metric_alarm_metrics + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info_metrics + + - name: verify that an alarm was created + assert: + that: + - ec2_instance_metric_alarm_metrics.changed + - ec2_instance_metric_alarm_metrics.alarm_arn + - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.stat == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.stat' + - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.metric.namespace == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.metric.namespace' + - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.metric.metric_name == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.metric.metric_name' + - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.metric.dimensions[0].name == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.metric.dimensions[0].name' + - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.metric.dimensions[0].value == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.metric.dimensions[0].value' + - 'ec2_instance_metric_alarm_metrics.metrics[0].id == alarm_info_metrics.metric_alarms[0].metrics[0].id' + - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.period == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.period' + - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.unit == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.unit' + - 'ec2_instance_metric_alarm_metrics.metrics[0].return_data == alarm_info_metrics.metric_alarms[0].metrics[0].return_data' + + + - name: try to remove the alarm + ec2_metric_alarm: + state: absent + name: '{{ alarm_full_name }}' + register: ec2_instance_metric_alarm_deletion_no_unit + + - name: Verify that the alarm reports deleted/changed + assert: + that: + - ec2_instance_metric_alarm_deletion_no_unit.changed + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info_no_unit + + - name: Verify that the alarm was deleted using cli + assert: + that: + - 'alarm_info_no_unit.metric_alarms | length == 0' + + - name: create ec2 metric alarm by providing mutually exclusive values + ec2_metric_alarm: + dimensions: + InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}' + state: present + name: '{{ alarm_full_name }}' + metric: CPUUtilization + namespace: AWS/EC2 + treat_missing_data: missing + statistic: Average + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + period: 300 + evaluation_periods: 3 + description: This will alarm when an instance's cpu usage average is lower than + 5% for 15 minutes + metrics: + - id: cpu + metric_stat: + metric: + dimensions: + - name: "InstanceId" + value: "{{ ec2_instance_results.instances[0].instance_id }}" + metric_name: "CPUUtilization" + namespace: "AWS/EC2" + period: 300 + stat: "Average" + unit: "Percent" + return_data: true + register: ec2_instance_metric_mutually_exclusive + ignore_errors: true + + - assert: + that: + - ec2_instance_metric_mutually_exclusive.failed + - '"parameters are mutually exclusive" in ec2_instance_metric_mutually_exclusive.msg' + + always: + - name: try to delete the alarm + ec2_metric_alarm: + state: absent + name: '{{ alarm_full_name }}' + ignore_errors: true + + - name: try to stop the ec2 instance + ec2_instance: + instance_ids: '{{ ec2_instance_results.instances[0].instance_id }}' + state: terminated + ignore_errors: true + + - include_tasks: env_cleanup.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/aliases b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/defaults/main.yml new file mode 100644 index 000000000..3b6964ade --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/defaults/main.yml @@ -0,0 +1,9 @@ +--- +name_pattern: "cloudwatch_event_rule-{{ tiny_prefix }}" + +test_event_names: + - "{{ name_pattern }}-1" + - "{{ name_pattern }}-2" + +input_transformer_event_name: "{{ name_pattern }}-3" +input_event_name: "{{ name_pattern }}-4" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml new file mode 100644 index 000000000..0047831a7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml @@ -0,0 +1,96 @@ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + + block: + - name: Create SNS topic + sns_topic: + name: "TestSNSTopic" + state: present + display_name: "Test SNS Topic" + register: sns_topic_output + + - name: Create classic cloudwatch event rules + cloudwatchevent_rule: + name: "{{ item }}" + description: "Rule for {{ item }}" + state: present + schedule_expression: "cron(0 20 * * ? *)" + targets: + - id: "{{ sns_topic_output.sns_topic.name }}" + arn: "{{ sns_topic_output.sns_topic.topic_arn }}" + register: event_rules_classic_output + loop: "{{ test_event_names }}" + + - name: Assert that classic event rules were created + assert: + that: + - event_rules_classic_output.changed + - event_rules_classic_output.msg == "All items completed" + + - name: Create cloudwatch event rule with input transformer + cloudwatchevent_rule: + name: "{{ input_transformer_event_name }}" + description: "Event rule with input transformer configuration" + state: present + event_pattern: '{"source":["aws.ec2"],"detail-type":["EC2 Instance State-change Notification"],"detail":{"state":["pending"]}}' + targets: + - id: "{{ sns_topic_output.sns_topic.name }}" + arn: "{{ sns_topic_output.sns_topic.topic_arn }}" + input_transformer: + input_paths_map: + instance: "$.detail.instance-id" + state: "$.detail.state" + input_template: " is in state " + register: event_rule_input_transformer_output + + - name: Assert that input transformer event rule was created + assert: + that: + - event_rule_input_transformer_output.changed + + - name: Create cloudwatch event rule with inputs + cloudwatchevent_rule: + name: "{{ input_event_name }}" + description: "Event rule with input configuration" + state: present + event_pattern: '{"source":["aws.ec2"],"detail-type":["EC2 Instance State-change Notification"],"detail":{"state":["pending"]}}' + targets: + - id: "{{ sns_topic_output.sns_topic.name }}" + arn: "{{ sns_topic_output.sns_topic.topic_arn }}" + input: 'Hello World' + - id: "{{ sns_topic_output.sns_topic.name }}2" + arn: "{{ sns_topic_output.sns_topic.topic_arn }}" + input: + start: 'Hello World' + end: 'Goodbye oh cruel World' + register: event_rule_input_transformer_output + + - name: Assert that input transformer event rule was created + assert: + that: + - event_rule_input_transformer_output.changed + + always: + + - name: Delete classic CloudWatch event rules + cloudwatchevent_rule: + name: "{{ item }}" + state: absent + loop: "{{ test_event_names }}" + + - name: Delete input transformer CloudWatch event rules + cloudwatchevent_rule: + name: "{{ item }}" + state: absent + loop: + - "{{ input_transformer_event_name }}" + - "{{ input_event_name }}" + + - name: Delete SNS topic + sns_topic: + name: "TestSNSTopic" + state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/aliases b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/aliases new file mode 100644 index 000000000..f289eb392 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/aliases @@ -0,0 +1,5 @@ +cloud/aws + +cloudwatchlogs_log_group +cloudwatchlogs_log_group_info +cloudwatchlogs_log_group_metric_filter diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/defaults/main.yml new file mode 100644 index 000000000..178ae143f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/defaults/main.yml @@ -0,0 +1,2 @@ +log_group_name: '{{ resource_prefix }}/integrationtest' +filter_name: '{{ resource_prefix }}/AnsibleTest' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/cloudwatchlogs_tests.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/cloudwatchlogs_tests.yml new file mode 100644 index 000000000..00545385a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/cloudwatchlogs_tests.yml @@ -0,0 +1,151 @@ +# Tests for changes to the cloudwatchlogs_log_group and cloudwatchlogs_log_group_metric_filter + +- block: + + - name: create cloudwatch log group for integration test + cloudwatchlogs_log_group: + state: present + log_group_name: '{{ log_group_name }}' + retention: 1 + + - name: check_mode set metric filter on '{{ log_group_name }}' + cloudwatchlogs_log_group_metric_filter: + log_group_name: '{{ log_group_name }}' + filter_name: '{{ filter_name }}' + filter_pattern: '{ ($.value = *) && ($.hostname = "box")}' + state: present + metric_transformation: + metric_name: box_free_space + metric_namespace: fluentd_metrics + metric_value: $.value + check_mode: yes + register: out + + - name: check_mode state must be changed + assert: + that: + - out is changed + - out.metric_filters | count == 1 + + - name: set metric filter on '{{ log_group_name }}' + cloudwatchlogs_log_group_metric_filter: + log_group_name: '{{ log_group_name }}' + filter_name: '{{ filter_name }}' + filter_pattern: '{ ($.value = *) && ($.hostname = "box")}' + state: present + metric_transformation: + metric_name: box_free_space + metric_namespace: fluentd_metrics + metric_value: $.value + register: out + + - name: create metric filter + assert: + that: + - out is changed + - out.metric_filters | count == 1 + + - name: re-set metric filter on '{{ log_group_name }}' + cloudwatchlogs_log_group_metric_filter: + log_group_name: '{{ log_group_name }}' + filter_name: '{{ filter_name }}' + filter_pattern: '{ ($.value = *) && ($.hostname = "box")}' + state: present + metric_transformation: + metric_name: box_free_space + metric_namespace: fluentd_metrics + metric_value: $.value + register: out + + - name: metric filter must not change + assert: + that: + - out is not changed + + - name: update metric transformation on '{{ log_group_name }}' + cloudwatchlogs_log_group_metric_filter: + log_group_name: '{{ log_group_name }}' + filter_name: '{{ filter_name }}' + filter_pattern: '{ ($.value = *) && ($.hostname = "box")}' + state: present + metric_transformation: + metric_name: box_free_space + metric_namespace: made_with_ansible + metric_value: $.value + default_value: 3.1415 + register: out + + - name: update metric filter + assert: + that: + - out is changed + - out.metric_filters[0].metric_namespace == "made_with_ansible" + - out.metric_filters[0].default_value == 3.1415 + + - name: update filter_pattern on '{{ log_group_name }}' + cloudwatchlogs_log_group_metric_filter: + log_group_name: '{{ log_group_name }}' + filter_name: '{{ filter_name }}' + filter_pattern: '{ ($.value = *) && ($.hostname = "ansible")}' + state: present + metric_transformation: + metric_name: box_free_space + metric_namespace: made_with_ansible + metric_value: $.value + register: out + + - name: update metric filter + assert: + that: + - out is changed + - out.metric_filters[0].metric_namespace == "made_with_ansible" + + - name: checkmode delete metric filter on '{{ log_group_name }}' + cloudwatchlogs_log_group_metric_filter: + log_group_name: '{{ log_group_name }}' + filter_name: '{{ filter_name }}' + state: absent + check_mode: yes + register: out + + - name: check_mode state must be changed + assert: + that: + - out is changed + + - name: delete metric filter on '{{ log_group_name }}' + cloudwatchlogs_log_group_metric_filter: + log_group_name: '{{ log_group_name }}' + filter_name: '{{ filter_name }}' + state: absent + register: out + + - name: delete metric filter + assert: + that: + - out is changed + + - name: delete metric filter on '{{ log_group_name }}' which does not exist + cloudwatchlogs_log_group_metric_filter: + log_group_name: '{{ log_group_name }}' + filter_name: '{{ filter_name }}' + state: absent + register: out + + - name: delete metric filter + assert: + that: + - out is not changed + + always: + - name: delete metric filter + cloudwatchlogs_log_group_metric_filter: + log_group_name: '{{ log_group_name }}' + filter_name: '{{ filter_name }}' + state: absent + + - name: delete cloudwatch log group for integration test + cloudwatchlogs_log_group: + state: absent + log_group_name: '{{ log_group_name }}' + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/create-delete-tags.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/create-delete-tags.yml new file mode 100644 index 000000000..b6f1da59e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/create-delete-tags.yml @@ -0,0 +1,444 @@ +# Tests relating to create/delete and set tags on cloudwatchlogs_log_group + +- name: Tests relating to setting tags on cloudwatchlogs_log_group + vars: + first_tags: + Key with Spaces: Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + second_tags: + New Key with Spaces: Value with spaces + NewCamelCaseKey: CamelCaseValue + newPascalCaseKey: pascalCaseValue + new_snake_case_key: snake_case_value + third_tags: + Key with Spaces: Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + New Key with Spaces: Updated Value with spaces + final_tags: + Key with Spaces: Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + New Key with Spaces: Updated Value with spaces + NewCamelCaseKey: CamelCaseValue + newPascalCaseKey: pascalCaseValue + new_snake_case_key: snake_case_value + # Mandatory settings + module_defaults: + amazon.aws.cloudwatchlogs_log_group: + state: present + log_group_name: '{{ log_group_name }}' + amazon.aws.cloudwatchlogs_log_group_info: + log_group_name: '{{ log_group_name }}' + block: + + - name: create cloudwatch log group for integration test (check_mode) + cloudwatchlogs_log_group: + state: present + log_group_name: '{{ log_group_name }}' + retention: 1 + tags: + CamelCase: Value + snake_case: value + check_mode: true + register: result + + - assert: + that: + - result is changed + - '"log_groups" not in result' + - '"logs:CreateLogGroup" not in result.resource_actions' + + - name: create cloudwatch log group for integration test + cloudwatchlogs_log_group: + state: present + log_group_name: '{{ log_group_name }}' + retention: 1 + tags: + CamelCase: Value + snake_case: value + register: result + + - assert: + that: + - result is changed + - '"log_groups" in result' + - result.log_groups | length == 1 + - '"log_group_name" in log_group' + - '"creation_time" in log_group' + - '"retention_in_days" in log_group' + - '"metric_filter_count" in log_group' + - '"arn" in log_group' + - '"stored_bytes" in log_group' + # - '"kms_key_id" in log_group' + # pre-4.0.0 upgrade compatibility + - '"log_group_name" in result' + - '"creation_time" in result' + - '"retention_in_days" in result' + - '"metric_filter_count" in result' + - '"arn" in result' + - '"stored_bytes" in result' + # - '"kms_key_id" in result' + - '"CamelCase" in log_group.tags' + - '"snake_case" in log_group.tags' + vars: + log_group: '{{ result.log_groups[0] }}' + + - name: create cloudwatch log group for integration test (check_mode - idempotent) + cloudwatchlogs_log_group: + state: present + log_group_name: '{{ log_group_name }}' + retention: 1 + check_mode: true + register: result + + - assert: + that: + - result is not changed + - '"log_groups" in result' + - result.log_groups | length == 1 + + - name: create cloudwatch log group for integration test (idempotent) + cloudwatchlogs_log_group: + state: present + log_group_name: '{{ log_group_name }}' + retention: 1 + register: result + + - assert: + that: + - result is not changed + - '"log_groups" in result' + - result.log_groups | length == 1 + vars: + log_group: '{{ result.log_groups[0] }}' + + - name: describe all log groups + cloudwatchlogs_log_group_info: {} + register: result + + - assert: + that: + - '"log_groups" in result' + - result.log_groups | length >= 1 + + - name: describe log group + cloudwatchlogs_log_group_info: + log_group_name: '{{ log_group_name }}' + register: result + + - assert: + that: + - '"log_groups" in result' + - result.log_groups | length == 1 + - '"log_group_name" in log_group' + - '"creation_time" in log_group' + - '"retention_in_days" in log_group' + - '"metric_filter_count" in log_group' + - '"arn" in log_group' + - '"stored_bytes" in log_group' + # - '"kms_key_id" in log_group' + - '"tags" in log_group' + vars: + log_group: '{{ result.log_groups[0] }}' + - name: test adding tags to cloudwatchlogs_log_group (check_mode) + cloudwatchlogs_log_group: + tags: '{{ first_tags }}' + purge_tags: true + check_mode: true + register: update_result + + - name: assert that update succeeded + assert: + that: + - update_result is changed + - '"logs:UntagLogGroup" not in update_result' + - '"logs:TagLogGroup" not in update_result' + + - name: test adding tags to cloudwatchlogs_log_group + cloudwatchlogs_log_group: + tags: '{{ first_tags }}' + purge_tags: true + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is changed + - update_result.log_groups[0].tags == first_tags + + - name: test adding tags to cloudwatchlogs_log_group - idempotency (check mode) + cloudwatchlogs_log_group: + tags: '{{ first_tags }}' + purge_tags: true + check_mode: true + register: update_result + + - name: assert that update succeeded + assert: + that: + - update_result is not changed + - '"logs:UntagLogGroup" not in update_result' + - '"logs:TagLogGroup" not in update_result' + + - name: test adding tags to cloudwatchlogs_log_group - idempotency + cloudwatchlogs_log_group: + tags: '{{ first_tags }}' + purge_tags: true + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is not changed + - update_result.log_groups[0].tags == first_tags + + ### + + - name: test updating tags with purge on cloudwatchlogs_log_group (check mode) + cloudwatchlogs_log_group: + tags: '{{ second_tags }}' + purge_tags: true + check_mode: true + register: update_result + + - name: assert that update succeeded + assert: + that: + - update_result is changed + - '"logs:UntagLogGroup" not in update_result' + - '"logs:TagLogGroup" not in update_result' + + - name: test updating tags with purge on cloudwatchlogs_log_group + cloudwatchlogs_log_group: + tags: '{{ second_tags }}' + purge_tags: true + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is changed + - update_result.log_groups[0].tags == second_tags + + - name: test updating tags with purge on cloudwatchlogs_log_group - idempotency + (check mode) + cloudwatchlogs_log_group: + tags: '{{ second_tags }}' + purge_tags: true + check_mode: true + register: update_result + + - name: assert that update succeeded + assert: + that: + - update_result is not changed + - '"logs:UntagLogGroup" not in update_result' + - '"logs:TagLogGroup" not in update_result' + + - name: test updating tags with purge on cloudwatchlogs_log_group - idempotency + cloudwatchlogs_log_group: + tags: '{{ second_tags }}' + purge_tags: true + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is not changed + - update_result.log_groups[0].tags == second_tags + + ### + + - name: test updating tags without purge on cloudwatchlogs_log_group (check mode) + cloudwatchlogs_log_group: + tags: '{{ third_tags }}' + purge_tags: false + check_mode: true + register: update_result + + - name: assert that update succeeded + assert: + that: + - update_result is changed + - '"logs:UntagLogGroup" not in update_result' + - '"logs:TagLogGroup" not in update_result' + + - name: test updating tags without purge on cloudwatchlogs_log_group + cloudwatchlogs_log_group: + tags: '{{ third_tags }}' + purge_tags: false + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is changed + - update_result.log_groups[0].tags == final_tags + + - name: test updating tags without purge on cloudwatchlogs_log_group - idempotency + (check mode) + cloudwatchlogs_log_group: + tags: '{{ third_tags }}' + purge_tags: false + check_mode: true + register: update_result + + - name: assert that update succeeded + assert: + that: + - update_result is not changed + - '"logs:UntagLogGroup" not in update_result' + - '"logs:TagLogGroup" not in update_result' + + - name: test updating tags without purge on cloudwatchlogs_log_group - idempotency + cloudwatchlogs_log_group: + tags: '{{ third_tags }}' + purge_tags: false + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is not changed + - update_result.log_groups[0].tags == final_tags + + ### + + - name: test that cloudwatchlogs_log_group_info returns the tags + cloudwatchlogs_log_group_info: + register: tag_info + - name: assert tags present + assert: + that: + - tag_info.log_groups | length == 1 + - tag_info.log_groups[0].tags == final_tags + + ### + + - name: test no tags param cloudwatchlogs_log_group (check mode) + cloudwatchlogs_log_group: {} + check_mode: true + register: update_result + + - name: assert no change + assert: + that: + - update_result is not changed + - update_result.log_groups[0].tags == final_tags + + - name: test no tags param cloudwatchlogs_log_group + cloudwatchlogs_log_group: {} + register: update_result + - name: assert no change + assert: + that: + - update_result is not changed + - update_result.log_groups[0].tags == final_tags + + ### + + - name: test removing tags from cloudwatchlogs_log_group (check mode) + cloudwatchlogs_log_group: + tags: {} + purge_tags: true + check_mode: true + register: update_result + + - name: assert that update succeeded + assert: + that: + - update_result is changed + + - name: test removing tags from cloudwatchlogs_log_group + cloudwatchlogs_log_group: + tags: {} + purge_tags: true + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is changed + - update_result.log_groups[0].tags == {} + + - name: test removing tags from cloudwatchlogs_log_group - idempotency (check mode) + cloudwatchlogs_log_group: + tags: {} + purge_tags: true + check_mode: true + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is not changed + + - name: test removing tags from cloudwatchlogs_log_group - idempotency + cloudwatchlogs_log_group: + tags: {} + purge_tags: true + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is not changed + - update_result.log_groups[0].tags == {} + + - name: delete cloudwatch log group for integration test (check_mode) + cloudwatchlogs_log_group: + state: absent + log_group_name: '{{ log_group_name }}' + check_mode: true + register: result + + - assert: + that: + - result is changed + - '"logs:DeleteLogGroup" not in result.resource_actions' + + - name: delete cloudwatch log group for integration test + cloudwatchlogs_log_group: + state: absent + log_group_name: '{{ log_group_name }}' + register: result + + - assert: + that: + - result is changed + + - name: delete cloudwatch log group for integration test (check_mode - idempotent) + cloudwatchlogs_log_group: + state: absent + log_group_name: '{{ log_group_name }}' + check_mode: true + register: result + + - assert: + that: + - result is not changed + - '"logs:DeleteLogGroup" not in result.resource_actions' + + - name: delete cloudwatch log group for integration test (idempotent) + cloudwatchlogs_log_group: + state: absent + log_group_name: '{{ log_group_name }}' + register: result + + - assert: + that: + - result is not changed + + - name: describe missing log group + cloudwatchlogs_log_group_info: + log_group_name: '{{ log_group_name }}' + register: result + + - assert: + that: + - '"log_groups" in result' + - result.log_groups | length == 0 + + always: + + - name: delete cloudwatch log group for integration test + cloudwatchlogs_log_group: + state: absent + log_group_name: '{{ log_group_name }}' + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/main.yml new file mode 100644 index 000000000..e5e0f072b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/main.yml @@ -0,0 +1,16 @@ +# Tests for cloudwatchlogs_log_group, cloudwatchlogs_log_group_info, and cloudwatchlogs_log_group_metric_filter modules + +- module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + + block: + + - name: Run tests for changes to the cloudwatchlogs_log_group and cloudwatchlogs_log_group_metric_filter + include_tasks: cloudwatchlogs_tests.yml + + - name: Run tests relating to create/delete and set tags on cloudwatchlogs_log_group + include_tasks: create-delete-tags.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/aliases new file mode 100644 index 000000000..9b0b03cbf --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/aliases @@ -0,0 +1,5 @@ +# duration: 15 +slow + +cloud/aws +ec2_ami_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/defaults/main.yml new file mode 100644 index 000000000..8dd565191 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/defaults/main.yml @@ -0,0 +1,11 @@ +--- +availability_zone: '{{ ec2_availability_zone_names[0] }}' + +# defaults file for test_ec2_ami +ec2_ami_name: '{{resource_prefix}}' +ec2_ami_description: 'Created by ansible integration tests' + +ec2_ami_image: '{{ ec2_ami_id }}' + +vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' +subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/meta/main.yml new file mode 100644 index 000000000..3dc000aba --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/meta/main.yml @@ -0,0 +1,5 @@ +dependencies: + - setup_ec2_facts + - role: setup_botocore_pip + vars: + botocore_version: '1.26.0' \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/tasks/main.yml new file mode 100644 index 000000000..3bfbcbf13 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/tasks/main.yml @@ -0,0 +1,786 @@ +--- +# Test suite for ec2_ami +- module_defaults: + group/aws: + aws_region: '{{ aws_region }}' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + collections: + - amazon.aws + block: + + # AWS CLI is needed until there's a module to get instance uefi data + - name: Install AWS CLI + pip: + name: awscli==1.25.83 + state: present + + # ============================================================ + + # SETUP: vpc, ec2 key pair, subnet, security group, ec2 instance, snapshot + - name: create a VPC to work in + ec2_vpc_net: + cidr_block: '{{ vpc_cidr }}' + state: present + name: '{{ ec2_ami_name }}_setup' + resource_tags: + Name: '{{ ec2_ami_name }}_setup' + register: setup_vpc + + - name: create a key pair to use for creating an ec2 instance + ec2_key: + name: '{{ ec2_ami_name }}_setup' + state: present + register: setup_key + + - name: create a subnet to use for creating an ec2 instance + ec2_vpc_subnet: + az: '{{ availability_zone }}' + tags: '{{ ec2_ami_name }}_setup' + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: '{{ subnet_cidr }}' + state: present + resource_tags: + Name: '{{ ec2_ami_name }}_setup' + register: setup_subnet + + - name: create a security group to use for creating an ec2 instance + ec2_group: + name: '{{ ec2_ami_name }}_setup' + description: 'created by Ansible integration tests' + state: present + vpc_id: '{{ setup_vpc.vpc.id }}' + register: setup_sg + + - name: provision ec2 instance to create an image + ec2_instance: + state: running + key_name: '{{ setup_key.key.name }}' + instance_type: t2.micro + image_id: '{{ ec2_ami_id }}' + tags: + '{{ec2_ami_name}}_instance_setup': 'integration_tests' + security_group: '{{ setup_sg.group_id }}' + vpc_subnet_id: '{{ setup_subnet.subnet.id }}' + volumes: + - device_name: /dev/sdc + virtual_name: ephemeral1 + wait: yes + register: setup_instance + + - name: Store EC2 Instance ID + set_fact: + ec2_instance_id: '{{ setup_instance.instances[0].instance_id }}' + + - name: take a snapshot of the instance to create an image + ec2_snapshot: + instance_id: '{{ ec2_instance_id }}' + device_name: '{{ ec2_ami_root_disk }}' + state: present + register: setup_snapshot + + # note: the current CI supported instance types (t2, t3, m1) do not support uefi boot mode + tpm_support + # disabling the task as aws documentation states that support for t3 will be coming soon + # - name: get instance UEFI data + # command: aws ec2 get-instance-uefi-data --instance-id {{ ec2_instance_id }} --region {{ aws_region }} + # environment: + # AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + # AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + # AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + # AWS_DEFAULT_REGION: "{{ aws_region }}" + # register: instance_uefi_data_output + + # - name: Convert it to an object + # set_fact: + # instance_uefi_data: "{{ instance_uefi_data_output.stdout | from_json }}" + + # ============================================================ + + - name: test clean failure if not providing image_id or name with state=present + ec2_ami: + instance_id: '{{ ec2_instance_id }}' + state: present + description: '{{ ec2_ami_description }}' + tags: + Name: '{{ ec2_ami_name }}_ami' + wait: yes + root_device_name: '{{ ec2_ami_root_disk }}' + register: result + ignore_errors: yes + + - name: assert error message is helpful + assert: + that: + - result.failed + - "result.msg == 'one of the following is required: name, image_id'" + + # ============================================================ + + - name: create an image from the instance (check mode) + ec2_ami: + instance_id: '{{ ec2_instance_id }}' + state: present + name: '{{ ec2_ami_name }}_ami' + description: '{{ ec2_ami_description }}' + tags: + Name: '{{ ec2_ami_name }}_ami' + wait: yes + root_device_name: '{{ ec2_ami_root_disk }}' + check_mode: true + register: check_mode_result + + - name: assert that check_mode result is changed + assert: + that: + - check_mode_result is changed + + - name: create an image from the instance + ec2_ami: + instance_id: '{{ ec2_instance_id }}' + state: present + name: '{{ ec2_ami_name }}_ami' + description: '{{ ec2_ami_description }}' + tags: + Name: '{{ ec2_ami_name }}_ami' + wait: yes + root_device_name: '{{ ec2_ami_root_disk }}' + register: result + + - name: set image id fact for deletion later + set_fact: + ec2_ami_image_id: "{{ result.image_id }}" + + - name: assert that image has been created + assert: + that: + - "result.changed" + - "result.image_id.startswith('ami-')" + - "'Name' in result.tags and result.tags.Name == ec2_ami_name + '_ami'" + + - name: get related snapshot info and ensure the tags have been propagated + ec2_snapshot_info: + snapshot_ids: + - "{{ result.block_device_mapping[ec2_ami_root_disk].snapshot_id }}" + register: snapshot_result + + - name: ensure the tags have been propagated to the snapshot + assert: + that: + - "'tags' in snapshot_result.snapshots[0]" + - "'Name' in snapshot_result.snapshots[0].tags and snapshot_result.snapshots[0].tags.Name == ec2_ami_name + '_ami'" + + # ============================================================ + + - name: create an image from the instance with attached devices with no_device true (check mode) + ec2_ami: + name: '{{ ec2_ami_name }}_no_device_true_ami' + instance_id: '{{ ec2_instance_id }}' + device_mapping: + - device_name: /dev/sda1 + volume_size: 10 + delete_on_termination: true + volume_type: gp2 + - device_name: /dev/sdf + no_device: yes + state: present + wait: yes + root_device_name: '{{ ec2_ami_root_disk }}' + check_mode: true + register: check_mode_result + + - name: assert that check_mode result is changed + assert: + that: + - check_mode_result is changed + + - name: create an image from the instance with attached devices with no_device true + ec2_ami: + name: '{{ ec2_ami_name }}_no_device_true_ami' + instance_id: '{{ ec2_instance_id }}' + device_mapping: + - device_name: /dev/sda1 + volume_size: 10 + delete_on_termination: true + volume_type: gp2 + - device_name: /dev/sdf + no_device: yes + state: present + wait: yes + root_device_name: '{{ ec2_ami_root_disk }}' + register: result_no_device_true + + - name: set image id fact for deletion later + set_fact: + ec2_ami_no_device_true_image_id: "{{ result_no_device_true.image_id }}" + + - name: assert that image with no_device option yes has been created + assert: + that: + - "result_no_device_true.changed" + - "'/dev/sdf' not in result_no_device_true.block_device_mapping" + + - name: create an image from the instance with attached devices with no_device false + ec2_ami: + name: '{{ ec2_ami_name }}_no_device_false_ami' + instance_id: '{{ ec2_instance_id }}' + device_mapping: + - device_name: /dev/sda1 + volume_size: 10 + delete_on_termination: true + volume_type: gp2 + no_device: no + state: present + wait: yes + root_device_name: '{{ ec2_ami_root_disk }}' + register: result_no_device_false + + - name: set image id fact for deletion later + set_fact: + ec2_ami_no_device_false_image_id: "{{ result_no_device_false.image_id }}" + + - name: assert that image with no_device option no has been created + assert: + that: + - "result_no_device_false.changed" + - "'/dev/sda1' in result_no_device_false.block_device_mapping" + + # ============================================================ + + - name: gather facts about the image created + ec2_ami_info: + image_ids: '{{ ec2_ami_image_id }}' + register: ami_facts_result + ignore_errors: true + + - name: assert that the right image was found + assert: + that: + - "ami_facts_result.images[0].image_id == ec2_ami_image_id" + + # some ec2_ami_info tests to test if the filtering is working fine. + # ============================================================ + + - name: gather info about the image + ec2_ami_info: + image_ids: '{{ ec2_region_images[ec2_region] }}' + register: ami_info_result + ignore_errors: true + + - name: assert that the right image was found + assert: + that: + - "ami_info_result.images[0].image_id == '{{ ec2_region_images[ec2_region] }}'" + + # ============================================================ + + - name: gather info about the image using boolean filter + ec2_ami_info: + image_ids: '{{ ec2_region_images[ec2_region] }}' + filters: + is-public: true + register: ami_info_result + ignore_errors: true + + - name: assert that the right image was found + assert: + that: + - "ami_info_result.images[0].image_id == '{{ ec2_region_images[ec2_region] }}'" + + # ============================================================ + + - name: gather info about the image using integer filter + ec2_ami_info: + image_ids: '{{ ec2_region_images[ec2_region] }}' + filters: + # Amazon owned + owner-id: 137112412989 + register: ami_info_result + ignore_errors: true + + - name: assert that the right image was found + assert: + that: + - "ami_info_result.images[0].image_id == '{{ ec2_region_images[ec2_region] }}'" + + # ============================================================ + + - name: gather info about the image using string filter + ec2_ami_info: + image_ids: '{{ ec2_region_images[ec2_region] }}' + filters: + name: 'amzn-ami-hvm-2017.09.0.20170930-x86_64-gp2' + register: ami_info_result + ignore_errors: true + + - name: assert that the right image was found + assert: + that: + - "ami_info_result.images[0].image_id == '{{ ec2_region_images[ec2_region] }}'" + + # e2_ami_info filtering tests ends + # ============================================================ + + - name: delete the image (check mode) + ec2_ami: + instance_id: '{{ ec2_instance_id }}' + state: absent + delete_snapshot: yes + name: '{{ ec2_ami_name }}_ami' + description: '{{ ec2_ami_description }}' + image_id: '{{ result.image_id }}' + tags: + Name: '{{ ec2_ami_name }}_ami' + wait: yes + ignore_errors: true + check_mode: true + register: check_mode_result + + - name: assert that check_mode result is changed + assert: + that: + - check_mode_result is changed + + - name: delete the image + ec2_ami: + instance_id: '{{ ec2_instance_id }}' + state: absent + delete_snapshot: yes + name: '{{ ec2_ami_name }}_ami' + description: '{{ ec2_ami_description }}' + image_id: '{{ result.image_id }}' + tags: + Name: '{{ ec2_ami_name }}_ami' + wait: yes + ignore_errors: true + register: result + + - name: assert that the image has been deleted + assert: + that: + - "result.changed" + - "'image_id' not in result" + - "result.snapshots_deleted" + + # ============================================================== + + - name: test removing an ami if no image ID is provided (expected failed=true) + ec2_ami: + state: absent + register: result + ignore_errors: yes + + - name: assert that an image ID is required + assert: + that: + - "result.failed" + - "result.msg == 'state is absent but all of the following are missing: image_id'" + + # ============================================================ + + - name: create an image from the snapshot + ec2_ami: + name: '{{ ec2_ami_name }}_ami' + description: '{{ ec2_ami_description }}' + state: present + launch_permissions: + user_ids: [] + tags: + Name: '{{ ec2_ami_name }}_ami' + root_device_name: '{{ ec2_ami_root_disk }}' + device_mapping: + - device_name: '{{ ec2_ami_root_disk }}' + volume_type: gp2 + size: 8 + delete_on_termination: true + snapshot_id: '{{ setup_snapshot.snapshot_id }}' + register: result + ignore_errors: true + + - name: set image id fact for deletion later + set_fact: + ec2_ami_image_id: "{{ result.image_id }}" + ec2_ami_snapshot: "{{ result.block_device_mapping[ec2_ami_root_disk].snapshot_id }}" + + - name: assert a new ami has been created + assert: + that: + - "result.changed" + - "result.image_id.startswith('ami-')" + + # ============================================================ + + - name: test default launch permissions idempotence (check mode) + ec2_ami: + description: '{{ ec2_ami_description }}' + state: present + name: '{{ ec2_ami_name }}_ami' + tags: + Name: '{{ ec2_ami_name }}_ami' + root_device_name: '{{ ec2_ami_root_disk }}' + image_id: '{{ result.image_id }}' + launch_permissions: + user_ids: [] + device_mapping: + - device_name: '{{ ec2_ami_root_disk }}' + volume_type: gp2 + size: 8 + delete_on_termination: true + snapshot_id: '{{ setup_snapshot.snapshot_id }}' + check_mode: true + register: check_mode_result + + - name: assert that check_mode result is not changed + assert: + that: + - check_mode_result is not changed + + - name: test default launch permissions idempotence + ec2_ami: + description: '{{ ec2_ami_description }}' + state: present + name: '{{ ec2_ami_name }}_ami' + tags: + Name: '{{ ec2_ami_name }}_ami' + root_device_name: '{{ ec2_ami_root_disk }}' + image_id: '{{ result.image_id }}' + launch_permissions: + user_ids: [] + device_mapping: + - device_name: '{{ ec2_ami_root_disk }}' + volume_type: gp2 + size: 8 + delete_on_termination: true + snapshot_id: '{{ setup_snapshot.snapshot_id }}' + register: result + + - name: assert a new ami has not been created + assert: + that: + - "not result.changed" + - "result.image_id.startswith('ami-')" + + # ============================================================ + + - name: add a tag to the AMI + ec2_ami: + state: present + description: '{{ ec2_ami_description }}' + image_id: '{{ result.image_id }}' + name: '{{ ec2_ami_name }}_ami' + tags: + New: Tag + purge_tags: no + register: result + + - name: assert a tag was added + assert: + that: + - "'Name' in result.tags and result.tags.Name == ec2_ami_name + '_ami'" + - "'New' in result.tags and result.tags.New == 'Tag'" + + - name: use purge_tags to remove a tag from the AMI + ec2_ami: + state: present + description: '{{ ec2_ami_description }}' + image_id: '{{ result.image_id }}' + name: '{{ ec2_ami_name }}_ami' + tags: + New: Tag + register: result + + - name: assert a tag was removed + assert: + that: + - "'Name' not in result.tags" + - "'New' in result.tags and result.tags.New == 'Tag'" + + # ============================================================ + + - name: update AMI launch permissions (check mode) + ec2_ami: + state: present + image_id: '{{ result.image_id }}' + description: '{{ ec2_ami_description }}' + tags: + Name: '{{ ec2_ami_name }}_ami' + launch_permissions: + group_names: ['all'] + check_mode: true + register: check_mode_result + + - name: assert that check_mode result is changed + assert: + that: + - check_mode_result is changed + + - name: update AMI launch permissions + ec2_ami: + state: present + image_id: '{{ result.image_id }}' + description: '{{ ec2_ami_description }}' + tags: + Name: '{{ ec2_ami_name }}_ami' + launch_permissions: + group_names: ['all'] + register: result + + - name: assert launch permissions were updated + assert: + that: + - "result.changed" + + # ============================================================ + + - name: modify the AMI description (check mode) + ec2_ami: + state: present + image_id: '{{ result.image_id }}' + name: '{{ ec2_ami_name }}_ami' + description: '{{ ec2_ami_description }}CHANGED' + tags: + Name: '{{ ec2_ami_name }}_ami' + launch_permissions: + group_names: ['all'] + check_mode: true + register: check_mode_result + + - name: assert that check_mode result is changed + assert: + that: + - check_mode_result is changed + + - name: modify the AMI description + ec2_ami: + state: present + image_id: '{{ result.image_id }}' + name: '{{ ec2_ami_name }}_ami' + description: '{{ ec2_ami_description }}CHANGED' + tags: + Name: '{{ ec2_ami_name }}_ami' + launch_permissions: + group_names: ['all'] + register: result + + - name: assert the description changed + assert: + that: + - "result.changed" + + # ============================================================ + + - name: remove public launch permissions + ec2_ami: + state: present + image_id: '{{ result.image_id }}' + name: '{{ ec2_ami_name }}_ami' + tags: + Name: '{{ ec2_ami_name }}_ami' + launch_permissions: + group_names: [] + register: result + + - name: assert launch permissions were updated + assert: + that: + - "result.changed" + + # ============================================================ + + - name: delete ami without deleting the snapshot (default is not to delete) + ec2_ami: + instance_id: '{{ ec2_instance_id }}' + state: absent + name: '{{ ec2_ami_name }}_ami' + image_id: '{{ ec2_ami_image_id }}' + tags: + Name: '{{ ec2_ami_name }}_ami' + wait: yes + ignore_errors: true + register: result + + - name: assert that the image has been deleted + assert: + that: + - "result.changed" + - "'image_id' not in result" + + - name: ensure the snapshot still exists + ec2_snapshot_info: + snapshot_ids: + - '{{ ec2_ami_snapshot }}' + register: snapshot_result + + - name: assert the snapshot wasn't deleted + assert: + that: + - "snapshot_result.snapshots[0].snapshot_id == ec2_ami_snapshot" + + - name: delete ami for a second time (check mode) + ec2_ami: + instance_id: '{{ ec2_instance_id }}' + state: absent + name: '{{ ec2_ami_name }}_ami' + image_id: '{{ ec2_ami_image_id }}' + tags: + Name: '{{ ec2_ami_name }}_ami' + wait: yes + check_mode: true + register: check_mode_result + + - name: assert that check_mode result is not changed + assert: + that: + - check_mode_result is not changed + + - name: delete ami for a second time + ec2_ami: + instance_id: '{{ ec2_instance_id }}' + state: absent + name: '{{ ec2_ami_name }}_ami' + image_id: '{{ ec2_ami_image_id }}' + tags: + Name: '{{ ec2_ami_name }}_ami' + wait: yes + register: result + + - name: assert that image does not exist + assert: + that: + - not result.changed + - not result.failed + + # ============================================================ + + - name: create an image from the snapshot with boot_mode and tpm_support + ec2_ami: + name: '{{ ec2_ami_name }}_ami-boot-tpm' + description: '{{ ec2_ami_description }}' + state: present + boot_mode: uefi + tpm_support: v2.0 + launch_permissions: + user_ids: [] + tags: + Name: '{{ ec2_ami_name }}_ami-boot-tpm' + root_device_name: '{{ ec2_ami_root_disk }}' + device_mapping: + - device_name: '{{ ec2_ami_root_disk }}' + volume_type: gp2 + size: 8 + delete_on_termination: true + snapshot_id: '{{ setup_snapshot.snapshot_id }}' + register: result + ignore_errors: true + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - name: set image id fact for deletion later + set_fact: + ec2_ami_image_id_boot_tpm: "{{ result.image_id }}" + ec2_ami_snapshot_boot_tpm: "{{ result.block_device_mapping[ec2_ami_root_disk].snapshot_id }}" + + - name: gather facts about the image created + ec2_ami_info: + image_ids: '{{ ec2_ami_image_id_boot_tpm }}' + register: ami_facts_result_boot_tpm + ignore_errors: true + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - name: assert that new ami has been created with desired options + assert: + that: + - "result.changed" + - "result.image_id.startswith('ami-')" + - ami_facts_result_boot_tpm.images[0].image_id | length != 0 + - ami_facts_result_boot_tpm.images[0].boot_mode == 'uefi' + - ami_facts_result_boot_tpm.images[0].tpm_support == 'v2.0' + + # ============================================================ + + always: + + # ============================================================ + + # TEAR DOWN: snapshot, ec2 instance, ec2 key pair, security group, vpc + - name: Announce teardown start + debug: + msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****" + + - name: delete ami + ec2_ami: + state: absent + image_id: "{{ ec2_ami_image_id_boot_tpm }}" + wait: yes + ignore_errors: yes + + - name: delete ami + ec2_ami: + state: absent + image_id: "{{ ec2_ami_image_id }}" + name: '{{ ec2_ami_name }}_ami' + wait: yes + ignore_errors: yes + + - name: delete ami + ec2_ami: + state: absent + image_id: "{{ ec2_ami_no_device_true_image_id }}" + wait: yes + ignore_errors: yes + + - name: delete ami + ec2_ami: + state: absent + image_id: "{{ ec2_ami_no_device_false_image_id }}" + wait: yes + ignore_errors: yes + + - name: remove setup snapshot of ec2 instance + ec2_snapshot: + state: absent + snapshot_id: '{{ setup_snapshot.snapshot_id }}' + ignore_errors: yes + + - name: remove setup ec2 instance + ec2_instance: + state: absent + instance_ids: + - '{{ ec2_instance_id }}' + wait: true + ignore_errors: yes + + - name: remove setup keypair + ec2_key: + name: '{{ec2_ami_name}}_setup' + state: absent + ignore_errors: yes + + - name: remove setup security group + ec2_group: + name: '{{ ec2_ami_name }}_setup' + description: 'created by Ansible integration tests' + state: absent + vpc_id: '{{ setup_vpc.vpc.id }}' + ignore_errors: yes + + - name: remove setup subnet + ec2_vpc_subnet: + az: '{{ availability_zone }}' + tags: '{{ec2_ami_name}}_setup' + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: '{{ subnet_cidr }}' + state: absent + resource_tags: + Name: '{{ ec2_ami_name }}_setup' + ignore_errors: yes + + - name: remove setup VPC + ec2_vpc_net: + cidr_block: '{{ vpc_cidr }}' + state: absent + name: '{{ ec2_ami_name }}_setup' + resource_tags: + Name: '{{ ec2_ami_name }}_setup' + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/vars/main.yml new file mode 100644 index 000000000..dac1fda2e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/vars/main.yml @@ -0,0 +1,20 @@ +--- +# vars file for test_ec2_ami + +# based on Amazon Linux AMI 2017.09.0 (HVM), SSD Volume Type +ec2_region_images: + us-east-1: ami-8c1be5f6 + us-east-2: ami-c5062ba0 + us-west-1: ami-02eada62 + us-west-2: ami-e689729e + ca-central-1: ami-fd55ec99 + eu-west-1: ami-acd005d5 + eu-central-1: ami-c7ee5ca8 + eu-west-2: ami-1a7f6d7e + ap-southeast-1: ami-0797ea64 + ap-southeast-2: ami-8536d6e7 + ap-northeast-2: ami-9bec36f5 + ap-northeast-1: ami-2a69be4c + ap-south-1: ami-4fc58420 + sa-east-1: ami-f1344b9d + cn-north-1: ami-fba67596 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/aliases new file mode 100644 index 000000000..78305e989 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/aliases @@ -0,0 +1,5 @@ +# https://github.com/ansible-collections/community.aws/issues/159 +# unstable + +cloud/aws +ec2_eip_info \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/defaults/main.yml new file mode 100644 index 000000000..115bcca12 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/defaults/main.yml @@ -0,0 +1,5 @@ +# VPCs are identified by the CIDR. Don't hard code the CIDR. CI may +# run multiple copies of the test concurrently. +vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 +subnet_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.42.0/24 +subnet_az: '{{ ec2_availability_zone_names[0] }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/meta/main.yml new file mode 100644 index 000000000..1d40168d0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: +- setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/tasks/main.yml new file mode 100644 index 000000000..46f33a399 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/tasks/main.yml @@ -0,0 +1,1442 @@ +- name: Integration testing for ec2_eip + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + amazon.aws.ec2_eip: + in_vpc: true + + block: + - name: Get the current caller identity facts + aws_caller_info: + register: caller_info + + - name: List available AZs + aws_az_info: + register: region_azs + + - name: Create a VPC + ec2_vpc_net: + name: '{{ resource_prefix }}-vpc' + state: present + cidr_block: '{{ vpc_cidr }}' + tags: + AnsibleEIPTest: Pending + AnsibleEIPTestPrefix: '{{ resource_prefix }}' + register: vpc_result + + - name: Look for signs of concurrent EIP tests. Pause if they are running or their + prefix comes before ours. + vars: + running_query: vpcs[?tags.AnsibleEIPTest=='Running'] + pending_query: vpcs[?tags.AnsibleEIPTest=='Pending'].tags.AnsibleEIPTestPrefix + ec2_vpc_net_info: + filters: + tag:AnsibleEIPTest: + - Pending + - Running + register: vpc_info + retries: 10 + delay: 5 + until: + - ( vpc_info.vpcs | map(attribute='tags') | selectattr('AnsibleEIPTest', 'equalto', + 'Running') | length == 0 ) + - ( vpc_info.vpcs | map(attribute='tags') | selectattr('AnsibleEIPTest', 'equalto', + 'Pending') | map(attribute='AnsibleEIPTestPrefix') | sort | first == resource_prefix + ) + + - name: Create subnet + ec2_vpc_subnet: + cidr: '{{ subnet_cidr }}' + az: '{{ subnet_az }}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: present + register: vpc_subnet_create + + - name: Create internet gateway + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + register: vpc_igw + + - name: Create security group + ec2_group: + state: present + name: '{{ resource_prefix }}-sg' + description: a security group for ansible tests + vpc_id: '{{ vpc_result.vpc.id }}' + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 0.0.0.0/0 + register: security_group + + - name: Create instance for attaching + ec2_instance: + name: '{{ resource_prefix }}-instance' + image_id: '{{ ec2_ami_id }}' + security_group: '{{ security_group.group_id }}' + vpc_subnet_id: '{{ vpc_subnet_create.subnet.id }}' + wait: yes + state: running + register: create_ec2_instance_result + + - name: Create ENI A + ec2_eni: + subnet_id: '{{ vpc_subnet_create.subnet.id }}' + register: eni_create_a + + - name: Create ENI B + ec2_eni: + subnet_id: '{{ vpc_subnet_create.subnet.id }}' + register: eni_create_b + + - name: Make a crude lock + ec2_vpc_net: + name: '{{ resource_prefix }}-vpc' + state: present + cidr_block: '{{ vpc_cidr }}' + tags: + AnsibleEIPTest: Running + AnsibleEIPTestPrefix: '{{ resource_prefix }}' + + - name: Get current state of EIPs + ec2_eip_info: + register: eip_info_start + + - name: Require that there are no free IPs when we start, otherwise we can't test + things properly + assert: + that: + - '"addresses" in eip_info_start' + - ( eip_info_start.addresses | length ) == ( eip_info_start.addresses | select('match', + 'association_id') | length ) + + # ------------------------------------------------------------------------------------------ + + - name: Allocate a new EIP with no conditions - check_mode + ec2_eip: + state: present + tags: + AnsibleEIPTestPrefix: '{{ resource_prefix }}' + register: eip + check_mode: yes + + - assert: + that: + - eip is changed + + - name: Allocate a new EIP with no conditions + ec2_eip: + state: present + tags: + AnsibleEIPTestPrefix: '{{ resource_prefix }}' + register: eip + + - ec2_eip_info: + register: eip_info + check_mode: yes + + - assert: + that: + - eip is changed + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) + - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length + ) + + - name: Get EIP info via public ip + ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - '"addresses" in eip_info' + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + + - name: Get EIP info via allocation id + ec2_eip_info: + filters: + allocation-id: '{{ eip.allocation_id }}' + register: eip_info + + - assert: + that: + - '"addresses" in eip_info' + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + + - name: Allocate a new ip (idempotence) - check_mode + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + register: eip + check_mode: yes + + - assert: + that: + - eip is not changed + + - name: Allocate a new ip (idempotence) + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + register: eip + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - eip is not changed + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) + - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length + ) + + # ------------------------------------------------------------------------------------------ + + - name: Release EIP - check_mode + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + register: eip_release + check_mode: yes + + - assert: + that: + - eip_release.changed + + - name: Release eip + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + register: eip_release + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - eip_release.changed + - not eip_release.disassociated + - eip_release.released + - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) + + - name: Release EIP (idempotence) - check_mode + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + register: eip_release + check_mode: yes + + - assert: + that: + - eip_release is not changed + + - name: Release EIP (idempotence) + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + register: eip_release + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - not eip_release.changed + - not eip_release.disassociated + - not eip_release.released + - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) + + # ------------------------------------------------------------------------------------------ + + - name: Allocate a new EIP - attempt reusing unallocated ones (none available) - + check_mode + ec2_eip: + state: present + reuse_existing_ip_allowed: true + register: eip + check_mode: yes + + - assert: + that: + - eip is changed + + - name: Allocate a new EIP - attempt reusing unallocated ones (none available) + ec2_eip: + state: present + reuse_existing_ip_allowed: true + register: eip + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - eip is changed + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) + - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length + ) + + - name: Re-Allocate a new EIP - attempt reusing unallocated ones (one available) + - check_mode + ec2_eip: + state: present + reuse_existing_ip_allowed: true + register: reallocate_eip + check_mode: yes + + - assert: + that: + - reallocate_eip is not changed + + - name: Re-Allocate a new EIP - attempt reusing unallocated ones (one available) + ec2_eip: + state: present + reuse_existing_ip_allowed: true + register: reallocate_eip + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - reallocate_eip is not changed + - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.utils.ipaddr + ) + - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length + ) + + # ------------------------------------------------------------------------------------------ + + - name: attempt reusing an existing EIP with a tag (No match available) - check_mode + ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + register: no_tagged_eip + check_mode: yes + + - assert: + that: + - no_tagged_eip is changed + + - name: attempt reusing an existing EIP with a tag (No match available) + ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + register: no_tagged_eip + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - no_tagged_eip is changed + - no_tagged_eip.public_ip is defined and ( no_tagged_eip.public_ip | ansible.utils.ipaddr + ) + - no_tagged_eip.allocation_id is defined and no_tagged_eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length + ) + + # ------------------------------------------------------------------------------------------ + + - name: Tag EIP so we can try matching it + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + Team: Frontend + + - name: Attempt reusing an existing EIP with a tag (Match available) - check_mode + ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + register: reallocate_eip + check_mode: yes + + - assert: + that: + - reallocate_eip is not changed + + - name: Attempt reusing an existing EIP with a tag (Match available) + ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + register: reallocate_eip + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - reallocate_eip is not changed + - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.utils.ipaddr + ) + - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length + ) + + - name: Attempt reusing an existing EIP with a tag and it's value (no match available) + - check_mode + ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + tag_value: Backend + register: backend_eip + check_mode: yes + + - assert: + that: + - backend_eip is changed + + - name: Attempt reusing an existing EIP with a tag and it's value (no match available) + ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + tag_value: Backend + register: backend_eip + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - backend_eip is changed + - backend_eip.public_ip is defined and ( backend_eip.public_ip | ansible.utils.ipaddr + ) + - backend_eip.allocation_id is defined and backend_eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length + ) + + # ------------------------------------------------------------------------------------------ + + - name: Tag EIP so we can try matching it + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + Team: Backend + + - name: Attempt reusing an existing EIP with a tag and it's value (match available) + - check_mode + ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + tag_value: Backend + register: reallocate_eip + check_mode: yes + + - assert: + that: + - reallocate_eip is not changed + + - name: Attempt reusing an existing EIP with a tag and it's value (match available) + ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + tag_value: Backend + register: reallocate_eip + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - reallocate_eip is not changed + - reallocate_eip.public_ip is defined and reallocate_eip.public_ip != "" + - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id != + "" + - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length + ) + + - name: Release backend_eip + ec2_eip: + state: absent + public_ip: '{{ backend_eip.public_ip }}' + + - name: Release no_tagged_eip + ec2_eip: + state: absent + public_ip: '{{ no_tagged_eip.public_ip }}' + + - name: Release eip + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) + + # ------------------------------------------------------------------------------------------ + + - name: Allocate a new EIP from a pool - check_mode + ec2_eip: + state: present + public_ipv4_pool: amazon + register: eip + check_mode: yes + + - assert: + that: + - eip is changed + + - name: Allocate a new EIP from a pool + ec2_eip: + state: present + public_ipv4_pool: amazon + register: eip + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - eip is changed + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) + - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length + ) + + # ------------------------------------------------------------------------------------------ + + - name: Attach EIP to ENI A - check_mode + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + register: associate_eip + check_mode: yes + + - assert: + that: + - associate_eip is changed + + - name: Attach EIP to ENI A + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + register: associate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - associate_eip is changed + - eip_info.addresses | length == 1 + - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip + - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") + - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address + | ansible.utils.ipaddr ) + - eip_info.addresses[0].network_interface_owner_id == caller_info.account + + - name: Attach EIP to ENI A (idempotence) - check_mode + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + register: associate_eip + check_mode: yes + + - assert: + that: + - associate_eip is not changed + + - name: Attach EIP to ENI A (idempotence) + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + register: associate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - associate_eip is not changed + - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip + - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") + - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address + | ansible.utils.ipaddr ) + + # ------------------------------------------------------------------------------------------ + + - name: Attach EIP to ENI B (should fail, already associated) + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + register: associate_eip + ignore_errors: true + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - associate_eip is failed + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") + - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address + | ansible.utils.ipaddr ) + + - name: Attach EIP to ENI B - check_mode + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + allow_reassociation: true + register: associate_eip + check_mode: yes + + - assert: + that: + - associate_eip is changed + + - name: Attach EIP to ENI B + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + allow_reassociation: true + register: associate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - associate_eip is changed + - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip + - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") + - eip_info.addresses[0].network_interface_id == eni_create_b.interface.id + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address + | ansible.utils.ipaddr ) + + - name: Attach EIP to ENI B (idempotence) - check_mode + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + allow_reassociation: true + register: associate_eip + check_mode: yes + + - assert: + that: + - associate_eip is not changed + + - name: Attach EIP to ENI B (idempotence) + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + allow_reassociation: true + register: associate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - associate_eip is not changed + - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip + - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") + - eip_info.addresses[0].network_interface_id == eni_create_b.interface.id + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address + | ansible.utils.ipaddr ) + + # ------------------------------------------------------------------------------------------ + + - name: Detach EIP from ENI B, without enabling release on disassociation - check_mode + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + register: disassociate_eip + check_mode: yes + + - assert: + that: + - disassociate_eip is changed + + - name: Detach EIP from ENI B, without enabling release on disassociation + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + register: disassociate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - disassociate_eip.changed + - disassociate_eip.disassociated + - not disassociate_eip.released + - eip_info.addresses | length == 1 + + - name: Detach EIP from ENI B, without enabling release on disassociation (idempotence) + - check_mode + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + register: disassociate_eip + check_mode: yes + + - assert: + that: + - disassociate_eip is not changed + + - name: Detach EIP from ENI B, without enabling release on disassociation (idempotence) + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + register: disassociate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - not disassociate_eip.changed + - not disassociate_eip.disassociated + - not disassociate_eip.released + - eip_info.addresses | length == 1 + + # ------------------------------------------------------------------------------------------ + + - name: Attach EIP to ENI A + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + register: associate_eip + + - name: Detach EIP from ENI A, enabling release on disassociation - check_mode + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + release_on_disassociation: true + register: disassociate_eip + check_mode: yes + + - assert: + that: + - disassociate_eip is changed + + - name: Detach EIP from ENI A, enabling release on disassociation + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + release_on_disassociation: true + register: disassociate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - disassociate_eip.changed + - disassociate_eip.disassociated + - disassociate_eip.released + - eip_info.addresses | length == 0 + + - name: Detach EIP from ENI A, enabling release on disassociation (idempotence) + - check_mode + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + release_on_disassociation: true + register: disassociate_eip + check_mode: yes + + - assert: + that: + - disassociate_eip is not changed + + - name: Detach EIP from ENI A, enabling release on disassociation (idempotence) + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + release_on_disassociation: true + register: disassociate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - not disassociate_eip.changed + - not disassociate_eip.disassociated + - not disassociate_eip.released + - eip_info.addresses | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Attach EIP to an EC2 instance - check_mode + ec2_eip: + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + state: present + release_on_disassociation: yes + register: instance_eip + check_mode: yes + + - assert: + that: + - instance_eip is changed + + - name: Attach EIP to an EC2 instance + ec2_eip: + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + state: present + release_on_disassociation: yes + register: instance_eip + + - ec2_eip_info: + filters: + public-ip: '{{ instance_eip.public_ip }}' + register: eip_info + + - assert: + that: + - instance_eip is changed + - eip_info.addresses[0].allocation_id is defined + - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0] + }}' + + - name: Attach EIP to an EC2 instance (idempotence) - check_mode + ec2_eip: + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + state: present + release_on_disassociation: yes + register: instance_eip + check_mode: yes + + - assert: + that: + - instance_eip is not changed + + - name: Attach EIP to an EC2 instance (idempotence) + ec2_eip: + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + state: present + release_on_disassociation: yes + register: instance_eip + + - ec2_eip_info: + filters: + public-ip: '{{ instance_eip.public_ip }}' + register: eip_info + + - assert: + that: + - instance_eip is not changed + - eip_info.addresses[0].allocation_id is defined + - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0] + }}' + + # ------------------------------------------------------------------------------------------ + + - name: Detach EIP from EC2 instance, without enabling release on disassociation + - check_mode + ec2_eip: + state: absent + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + register: detach_eip + check_mode: yes + + - assert: + that: + - detach_eip is changed + + - name: Detach EIP from EC2 instance, without enabling release on disassociation + ec2_eip: + state: absent + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + register: detach_eip + + - ec2_eip_info: + filters: + public-ip: '{{ instance_eip.public_ip }}' + register: eip_info + + - assert: + that: + - detach_eip.changed + - detach_eip.disassociated + - not detach_eip.released + - eip_info.addresses | length == 1 + + - name: Detach EIP from EC2 instance, without enabling release on disassociation + (idempotence) - check_mode + ec2_eip: + state: absent + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + register: detach_eip + check_mode: yes + + - assert: + that: + - detach_eip is not changed + + - name: Detach EIP from EC2 instance, without enabling release on disassociation + (idempotence) + ec2_eip: + state: absent + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + register: detach_eip + + - ec2_eip_info: + filters: + public-ip: '{{ instance_eip.public_ip }}' + register: eip_info + + - assert: + that: + - not detach_eip.changed + - not detach_eip.disassociated + - not detach_eip.released + - eip_info.addresses | length == 1 + + - name: Release EIP + ec2_eip: + state: absent + public_ip: '{{ instance_eip.public_ip }}' + + # ------------------------------------------------------------------------------------------ + + - name: Attach EIP to an EC2 instance with private Ip specified - check_mode + ec2_eip: + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address + }}' + state: present + release_on_disassociation: yes + register: instance_eip + check_mode: yes + + - assert: + that: + - instance_eip is changed + + - name: Attach EIP to an EC2 instance with private Ip specified + ec2_eip: + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address + }}' + state: present + release_on_disassociation: yes + register: instance_eip + + - ec2_eip_info: + filters: + public-ip: '{{ instance_eip.public_ip }}' + register: eip_info + + - assert: + that: + - instance_eip is changed + - eip_info.addresses[0].allocation_id is defined + - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0] + }}' + + - name: Attach EIP to an EC2 instance with private Ip specified (idempotence) - + check_mode + ec2_eip: + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address + }}' + state: present + release_on_disassociation: yes + register: instance_eip + check_mode: yes + + - assert: + that: + - instance_eip is not changed + + - name: Attach EIP to an EC2 instance with private Ip specified (idempotence) + ec2_eip: + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address + }}' + state: present + release_on_disassociation: yes + register: instance_eip + + - ec2_eip_info: + filters: + public-ip: '{{ instance_eip.public_ip }}' + register: eip_info + + - assert: + that: + - instance_eip is not changed + - eip_info.addresses[0].allocation_id is defined + - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0] + }}' + + # ------------------------------------------------------------------------------------------ + + - name: Detach EIP from EC2 instance, enabling release on disassociation - check_mode + ec2_eip: + state: absent + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + release_on_disassociation: yes + register: disassociate_eip + check_mode: yes + + - assert: + that: + - disassociate_eip is changed + + - name: Detach EIP from EC2 instance, enabling release on disassociation + ec2_eip: + state: absent + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + release_on_disassociation: yes + register: disassociate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ instance_eip.public_ip }}' + register: eip_info + + - assert: + that: + - disassociate_eip.changed + - disassociate_eip.disassociated + - disassociate_eip.released + - eip_info.addresses | length == 0 + + - name: Detach EIP from EC2 instance, enabling release on disassociation (idempotence) + - check_mode + ec2_eip: + state: absent + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + release_on_disassociation: yes + register: disassociate_eip + check_mode: yes + + - assert: + that: + - disassociate_eip is not changed + + - name: Detach EIP from EC2 instance, enabling release on disassociation (idempotence) + ec2_eip: + state: absent + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + release_on_disassociation: yes + register: disassociate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ instance_eip.public_ip }}' + register: eip_info + + - assert: + that: + - not disassociate_eip.changed + - not disassociate_eip.disassociated + - not disassociate_eip.released + - eip_info.addresses | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Allocate a new eip + ec2_eip: + state: present + register: eip + + - name: Tag EIP - check_mode + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + AnsibleEIPTestPrefix: '{{ resource_prefix }}' + another_tag: another Value {{ resource_prefix }} + register: tag_eip + check_mode: yes + + - assert: + that: + - tag_eip is changed + + - name: Tag EIP + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + AnsibleEIPTestPrefix: '{{ resource_prefix }}' + another_tag: another Value {{ resource_prefix }} + register: tag_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - tag_eip is changed + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - '"another_tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length + ) + + - name: Tag EIP (idempotence) - check_mode + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + AnsibleEIPTestPrefix: '{{ resource_prefix }}' + another_tag: another Value {{ resource_prefix }} + register: tag_eip + check_mode: yes + + - assert: + that: + - tag_eip is not changed + + - name: Tag EIP (idempotence) + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + AnsibleEIPTestPrefix: '{{ resource_prefix }}' + another_tag: another Value {{ resource_prefix }} + register: tag_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - tag_eip is not changed + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - '"another_tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length + ) + + # ------------------------------------------------------------------------------------------ + + - name: Add another Tag - check_mode + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + third tag: Third tag - {{ resource_prefix }} + purge_tags: false + register: tag_eip + check_mode: yes + + - assert: + that: + - tag_eip is changed + + - name: Add another Tag + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + third tag: Third tag - {{ resource_prefix }} + purge_tags: false + register: tag_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - tag_eip is changed + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - '"another_tag" in eip_info.addresses[0].tags' + - '"third tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix + - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length + ) + + - name: Add another Tag (idempotence) - check_mode + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + third tag: Third tag - {{ resource_prefix }} + purge_tags: false + register: tag_eip + check_mode: yes + + - assert: + that: + - tag_eip is not changed + + - name: Add another Tag (idempotence) + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + third tag: Third tag - {{ resource_prefix }} + purge_tags: false + register: tag_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - tag_eip is not changed + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - '"another_tag" in eip_info.addresses[0].tags' + - '"third tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix + - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix + + # ------------------------------------------------------------------------------------------ + + - name: Purge tags - check_mode + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + third tag: Third tag - {{ resource_prefix }} + purge_tags: true + register: tag_eip + check_mode: yes + + - assert: + that: + - tag_eip is changed + + - name: Purge tags + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + third tag: Third tag - {{ resource_prefix }} + purge_tags: true + register: tag_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - tag_eip is changed + - '"AnsibleEIPTestPrefix" not in eip_info.addresses[0].tags' + - '"another_tag" not in eip_info.addresses[0].tags' + - '"third tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix + + - name: Purge tags (idempotence) - check_mode + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + third tag: Third tag - {{ resource_prefix }} + purge_tags: true + register: tag_eip + check_mode: yes + + - assert: + that: + - tag_eip is not changed + + - name: Purge tags (idempotence) + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + third tag: Third tag - {{ resource_prefix }} + purge_tags: true + register: tag_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - tag_eip is not changed + - '"AnsibleEIPTestPrefix" not in eip_info.addresses[0].tags' + - '"another_tag" not in eip_info.addresses[0].tags' + - '"third tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix + + # ----- Cleanup ------------------------------------------------------------------------------ + + always: + + - name: Cleanup instance (by id) + ec2_instance: + instance_ids: '{{ create_ec2_instance_result.instance_ids }}' + state: absent + wait: true + ignore_errors: true + + - name: Cleanup instance (by name) + ec2_instance: + name: '{{ resource_prefix }}-instance' + state: absent + wait: true + ignore_errors: true + + - name: Cleanup ENI A + ec2_eni: + state: absent + eni_id: '{{ eni_create_a.interface.id }}' + ignore_errors: true + + - name: Cleanup ENI B + ec2_eni: + state: absent + eni_id: '{{ eni_create_b.interface.id }}' + ignore_errors: true + + - name: Cleanup instance eip + ec2_eip: + state: absent + public_ip: '{{ instance_eip.public_ip }}' + retries: 5 + delay: 5 + until: eip_cleanup is successful + ignore_errors: true + + - name: Cleanup IGW + ec2_vpc_igw: + state: absent + vpc_id: '{{ vpc_result.vpc.id }}' + register: vpc_igw + ignore_errors: true + + - name: Cleanup security group + ec2_group: + state: absent + name: '{{ resource_prefix }}-sg' + ignore_errors: true + + - name: Cleanup Subnet + ec2_vpc_subnet: + state: absent + cidr: '{{ subnet_cidr }}' + vpc_id: '{{ vpc_result.vpc.id }}' + ignore_errors: true + + - name: Cleanup eip + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + ignore_errors: true + + - name: Cleanup reallocate_eip + ec2_eip: + state: absent + public_ip: '{{ reallocate_eip.public_ip }}' + ignore_errors: true + + - name: Cleanup backend_eip + ec2_eip: + state: absent + public_ip: '{{ backend_eip.public_ip }}' + ignore_errors: true + + - name: Cleanup no_tagged_eip + ec2_eip: + state: absent + public_ip: '{{ no_tagged_eip.public_ip }}' + ignore_errors: true + + - name: Cleanup VPC + ec2_vpc_net: + state: absent + name: '{{ resource_prefix }}-vpc' + cidr_block: '{{ vpc_cidr }}' + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/aliases new file mode 100644 index 000000000..9adce4567 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/aliases @@ -0,0 +1,3 @@ +cloud/aws + +ec2_eni_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/defaults/main.yml new file mode 100644 index 000000000..364c435cf --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/defaults/main.yml @@ -0,0 +1,16 @@ +--- +availability_zone: '{{ ec2_availability_zone_names[0] }}' + +vpc_seed_a: '{{ resource_prefix }}' +vpc_seed_b: '{{ resource_prefix }}-ec2_eni' +vpc_prefix: '10.{{ 256 | random(seed=vpc_seed_a) }}.{{ 256 | random(seed=vpc_seed_b ) }}' +vpc_cidr: '{{ vpc_prefix}}.128/26' +ip_1: "{{ vpc_prefix }}.132" +ip_2: "{{ vpc_prefix }}.133" +ip_3: "{{ vpc_prefix }}.134" +ip_4: "{{ vpc_prefix }}.135" +ip_5: "{{ vpc_prefix }}.136" + +ec2_ips: +- "{{ vpc_prefix }}.137" +- "{{ vpc_prefix }}.138" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/meta/main.yml new file mode 100644 index 000000000..2bff8543a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: +- role: setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/main.yaml new file mode 100644 index 000000000..b55f6563b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/main.yaml @@ -0,0 +1,159 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + + collections: + - amazon.aws + - ansible.utils + - community.aws + + block: + + # ============================================================ + - name: create a VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: present + cidr_block: "{{ vpc_cidr }}" + tags: + Name: "{{ resource_prefix }}-vpc" + Description: "Created by ansible-test" + register: vpc_result + + - name: create a subnet + ec2_vpc_subnet: + cidr: "{{ vpc_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: "{{ resource_prefix }}-vpc" + Description: "Created by ansible-test" + state: present + register: vpc_subnet_result + + - name: create a security group + ec2_group: + name: "{{ resource_prefix }}-sg" + description: "Created by {{ resource_prefix }}" + rules: [] + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + register: vpc_sg_result + + - name: Set facts to simplify use of extra resources + set_fact: + vpc_id: "{{ vpc_result.vpc.id }}" + vpc_subnet_id: "{{ vpc_subnet_result.subnet.id }}" + vpc_sg_id: "{{ vpc_sg_result.group_id }}" + + # ============================================================ + + - name: Create 2 instances to test attaching and detaching network interfaces + ec2_instance: + name: "{{ resource_prefix }}-eni-instance-{{ item }}" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ vpc_subnet_id }}" + instance_type: t2.micro + wait: false + security_group: "{{ vpc_sg_id }}" + network: + private_ip_address: '{{ ec2_ips[item] }}' + register: ec2_instances + loop: + - 0 + - 1 + + # We only need these instances to be running + - name: set variables for the instance IDs + set_fact: + instance_id_1: "{{ ec2_instances.results[0].instance_ids[0] }}" + instance_id_2: "{{ ec2_instances.results[1].instance_ids[0] }}" + + # ============================================================ + - name: test attaching and detaching network interfaces + include_tasks: ./test_eni_basic_creation.yaml + + - name: test attaching and detaching network interfaces + include_tasks: ./test_ipaddress_assign.yaml + + - name: test attaching and detaching network interfaces + include_tasks: ./test_attachment.yaml + + - name: test attaching and detaching multiple network interfaces + include_tasks: ./test_create_attached_multiple.yml + + - name: test modifying source_dest_check + include_tasks: ./test_modifying_source_dest_check.yaml + + - name: test modifying tags + include_tasks: ./test_modifying_tags.yaml + + # Note: will delete *both* EC2 instances + - name: test modifying delete_on_termination + include_tasks: ./test_modifying_delete_on_termination.yaml + + - name: test deleting ENIs + include_tasks: ./test_deletion.yaml + + always: + # ============================================================ + # Some test problems are caused by "eventual consistency" + # describe the ENIs in the account so we can see what's happening + - name: Describe ENIs in account + ec2_eni_info: {} + + # ============================================================ + - name: remove the network interfaces + ec2_eni: + eni_id: "{{ item }}" + force_detach: True + state: absent + ignore_errors: true + retries: 5 + loop: + - "{{ eni_id_1 | default(omit) }}" + - "{{ eni_id_2 | default(omit) }}" + - "{{ eni_id_3 | default(omit) }}" + + - name: terminate the instances + ec2_instance: + state: absent + instance_ids: + - "{{ instance_id_1 }}" + - "{{ instance_id_2 }}" + wait: True + ignore_errors: true + retries: 5 + when: instance_id_1 is defined and instance_id_2 is defined + + - name: remove the security group + ec2_group: + name: "{{ resource_prefix }}-sg" + description: "{{ resource_prefix }}" + rules: [] + state: absent + vpc_id: "{{ vpc_result.vpc.id }}" + ignore_errors: true + retries: 5 + + - name: remove the subnet + ec2_vpc_subnet: + cidr: "{{ vpc_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: absent + ignore_errors: true + retries: 5 + when: vpc_subnet_result is defined + + - name: remove the VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + cidr_block: "{{ vpc_cidr }}" + state: absent + ignore_errors: true + retries: 5 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_attachment.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_attachment.yaml new file mode 100644 index 000000000..3ce0e9353 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_attachment.yaml @@ -0,0 +1,278 @@ + # ============================================================ +# If we don't stop the instances they can get stuck "detaching" +- name: Ensure test instances are stopped + ec2_instance: + state: stopped + instance_ids: + - "{{ instance_id_1 }}" + - "{{ instance_id_2 }}" + wait: True + +- name: attach the network interface to instance 1 (check mode) + ec2_eni: + instance_id: "{{ instance_id_1 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + state: present + attached: True + check_mode: true + register: result_check_mode + +- assert: + that: + - result_check_mode.changed + +- name: attach the network interface to instance 1 + ec2_eni: + instance_id: "{{ instance_id_1 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + state: present + attached: True + register: result +- ec2_eni_info: + eni_id: '{{ eni_id_1 }}' + register: eni_info + +- assert: + that: + - result.changed + - result.interface.attachment is defined + - result.interface.attachment is mapping + - result.interface.attachment.instance_id == instance_id_1 + - _interface_0.attachment is defined + - _interface_0.attachment is mapping + - '"attach_time" in _interface_0.attachment' + - _interface_0.attachment.attach_time is string + - '"attachment_id" in _interface_0.attachment' + - _interface_0.attachment.attachment_id.startswith("eni-attach-") + - '"delete_on_termination" in _interface_0.attachment' + - _interface_0.attachment.delete_on_termination == False + - '"device_index" in _interface_0.attachment' + - _interface_0.attachment.device_index == 1 + - '"instance_id" in _interface_0.attachment' + - _interface_0.attachment.instance_id == instance_id_1 + - '"instance_owner_id" in _interface_0.attachment' + - _interface_0.attachment.instance_owner_id is string + - '"status" in _interface_0.attachment' + - _interface_0.attachment.status == "attached" + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + +- name: verify the eni is attached + ec2_eni: + instance_id: "{{ instance_id_1 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + state: present + attached: True + register: result +- ec2_eni_info: + eni_id: '{{ eni_id_1 }}' + register: eni_info + +- assert: + that: + - not result.changed + - result.interface.attachment is defined + - result.interface.attachment.instance_id == instance_id_1 + - _interface_0.attachment is defined + - _interface_0.attachment is mapping + - '"attach_time" in _interface_0.attachment' + - _interface_0.attachment.attach_time is string + - '"attachment_id" in _interface_0.attachment' + - _interface_0.attachment.attachment_id.startswith("eni-attach-") + - '"delete_on_termination" in _interface_0.attachment' + - _interface_0.attachment.delete_on_termination == False + - '"device_index" in _interface_0.attachment' + - _interface_0.attachment.device_index == 1 + - '"instance_id" in _interface_0.attachment' + - _interface_0.attachment.instance_id == instance_id_1 + - '"instance_owner_id" in _interface_0.attachment' + - _interface_0.attachment.instance_owner_id is string + - '"status" in _interface_0.attachment' + - _interface_0.attachment.status == "attached" + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + + # ============================================================ +- name: test attaching the network interface to a different instance (check mode) + ec2_eni: + instance_id: "{{ instance_id_2 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + state: present + attached: True + check_mode: true + register: result_check_mode + +- assert: + that: + - result_check_mode.changed + +- name: test attaching the network interface to a different instance + ec2_eni: + instance_id: "{{ instance_id_2 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + state: present + attached: True + register: result +- ec2_eni_info: + eni_id: '{{ eni_id_1 }}' + register: eni_info + +- assert: + that: + - result.changed + - result.interface.attachment is defined + - result.interface.attachment.instance_id == instance_id_2 + - _interface_0.attachment is defined + - '"instance_id" in _interface_0.attachment' + - _interface_0.attachment.instance_id == instance_id_2 + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + + # ============================================================ +- name: detach the network interface (check mode) + ec2_eni: + instance_id: "{{ instance_id_2 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + state: present + attached: False + check_mode: true + register: result_check_mode + +- assert: + that: + - result_check_mode.changed + +- name: detach the network interface + ec2_eni: + instance_id: "{{ instance_id_2 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + state: present + attached: False + register: result +- ec2_eni_info: + eni_id: '{{ eni_id_1 }}' + register: eni_info + +- assert: + that: + - result.changed + - result.interface.attachment is undefined + - _interface_0.attachment is undefined + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + +- name: verify the network interface was detached + ec2_eni: + instance_id: "{{ instance_id_2 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + state: present + attached: False + register: result + +- assert: + that: + - not result.changed + - result.interface.attachment is undefined + + # ============================================================ +- name: reattach the network interface to test deleting it + ec2_eni: + instance_id: "{{ instance_id_2 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + state: present + attached: True + register: result + +- assert: + that: + - result.changed + - result.interface.attachment is defined + - result.interface.attachment.instance_id == instance_id_2 + +- name: test that deleting the network interface while attached must be intentional + ec2_eni: + eni_id: "{{ eni_id_1 }}" + state: absent + register: result + ignore_errors: True + +- assert: + that: + - result.failed + - '"currently in use" in result.msg' + +# ============================================================ +- name: Ensure test instances is running (will block non-forced detachment) + ec2_instance: + state: running + instance_ids: + - "{{ instance_id_2 }}" + wait: True + +- name: delete an attached network interface with force_detach (check mode) + ec2_eni: + force_detach: True + eni_id: "{{ eni_id_1 }}" + state: absent + check_mode: true + register: result_check_mode + ignore_errors: True + +- assert: + that: + - result_check_mode.changed + +- name: delete an attached network interface with force_detach + ec2_eni: + force_detach: True + eni_id: "{{ eni_id_1 }}" + state: absent + register: result + ignore_errors: True + +- assert: + that: + - result.changed + - result.interface.attachment is undefined + +- name: test removing a network interface that does not exist + ec2_eni: + force_detach: True + eni_id: "{{ eni_id_1 }}" + state: absent + register: result + +- assert: + that: + - not result.changed + - result.interface.attachment is undefined + +# ============================================================ +- name: recreate the network interface + ec2_eni: + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + register: result + +- set_fact: + eni_id_1: "{{ result.interface.id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_create_attached_multiple.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_create_attached_multiple.yml new file mode 100644 index 000000000..c82139140 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_create_attached_multiple.yml @@ -0,0 +1,121 @@ +--- + - name: Create instance to test attaching and detaching network interfaces for this test + ec2_instance: + name: "{{ resource_prefix }}-instance" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ vpc_subnet_id }}" + instance_type: t2.micro + register: ec2_instances + + - name: set variable for the instance ID + set_fact: + instance_id_3: "{{ ec2_instances.instances[0].instance_id }}" + +#================================================================= + + - name: Create and attach another interface to above instance - check_mode + amazon.aws.ec2_eni: + name: "{{ resource_prefix }}-eni" + instance_id: "{{ instance_id_3 }}" + device_index: 1 + subnet_id: "{{ vpc_subnet_id }}" + state: present + attached: true + delete_on_termination: true + check_mode: true + register: result + + # Get the instance info and ENI info to verify attachment of second eni + - ec2_instance_info: + instance_ids: + - "{{ instance_id_3 }}" + register: instance_info_result + + - assert: + that: + - result is changed + - result is not failed + - instance_info_result.instances[0].network_interfaces | length == 1 + - '"Would have created ENI if not in check mode." in result.msg' + - "'ec2:CreateNetworkInterface' not in {{ result.resource_actions }}" + + - name: Create and attach another interface to above instance + amazon.aws.ec2_eni: + name: "{{ resource_prefix }}-eni" + instance_id: "{{ instance_id_3 }}" + device_index: 1 + subnet_id: "{{ vpc_subnet_id }}" + state: present + attached: true + delete_on_termination: true + register: result + + - name: Set variable for the ENI ID + set_fact: + eni_id_attached_multiple: "{{ result.interface.id }}" + + # Get the instance info and ENI info to verify attachment of second eni + - ec2_instance_info: + instance_ids: + - "{{ instance_id_3 }}" + register: instance_info_result + - ec2_eni_info: + eni_id: "{{ eni_id_attached_multiple }}" + register: eni_info + + - name: Assert that the interface attachment was successful + assert: + that: + - result is changed + - result is not failed + - instance_info_result.instances[0].network_interfaces | length == 2 + - eni_info.network_interfaces[0].attachment.instance_id == instance_id_3 + - eni_info.network_interfaces[0].attachment.device_index == 1 + + - name: Create and attach another interface to above instance - check_mode - idempotent + amazon.aws.ec2_eni: + name: "{{ resource_prefix }}-eni" + instance_id: "{{ instance_id_3 }}" + device_index: 1 + subnet_id: "{{ vpc_subnet_id }}" + state: present + attached: true + delete_on_termination: true + check_mode: true + register: result + + # Get the instance info and ENI info to verify attachment of second eni + - ec2_instance_info: + instance_ids: + - "{{ instance_id_3 }}" + register: instance_info_result + + - name: Assert that the interface would have been modified if not in check_mode + assert: + that: + - result is changed + - result is not failed + - instance_info_result.instances[0].network_interfaces | length == 2 + - '"Would have modified ENI: {{ eni_id_attached_multiple }} if not in check mode" in result.msg' + - "'ec2:CreateNetworkInterface' not in {{ result.resource_actions }}" + - "'ec2:ModifyNetworkInterfaceAttribute' not in {{ result.resource_actions }}" + +#================================================================= + + - name: remove the network interface created in this test + ec2_eni: + eni_id: "{{ eni_id_attached_multiple }}" + force_detach: True + state: absent + ignore_errors: true + retries: 5 + + - name: terminate the instance created in this test + ec2_instance: + state: absent + instance_ids: + - "{{ instance_id_3 }}" + wait: True + ignore_errors: true + retries: 5 + when: instance_id_3 is defined diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_deletion.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_deletion.yaml new file mode 100644 index 000000000..a0144aaba --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_deletion.yaml @@ -0,0 +1,118 @@ +--- +# ============================================================ +- name: test deleting the unattached network interface by using the ID (check mode) + ec2_eni: + eni_id: "{{ eni_id_1 }}" + name: "{{ resource_prefix }}" + subnet_id: "{{ vpc_subnet_id }}" + state: absent + check_mode: True + register: result_check_mode + +- assert: + that: + - result_check_mode.changed + +- name: test deleting the unattached network interface by using the ID + ec2_eni: + eni_id: "{{ eni_id_1 }}" + name: "{{ resource_prefix }}" + subnet_id: "{{ vpc_subnet_id }}" + state: absent + register: result +- ec2_eni_info: + eni_id: "{{ eni_id_1 }}" + register: eni_info + +- assert: + that: + - result.changed + - result.interface is undefined + - '"network_interfaces" in eni_info' + - eni_id_1 not in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list ) + +- name: test removing the network interface by ID is idempotent (check mode) + ec2_eni: + eni_id: "{{ eni_id_1 }}" + name: "{{ resource_prefix }}" + subnet_id: "{{ vpc_subnet_id }}" + state: absent + check_mode: True + register: result_check_mode + +- assert: + that: + - not result_check_mode.changed + +- name: test removing the network interface by ID is idempotent + ec2_eni: + eni_id: "{{ eni_id_1 }}" + name: "{{ resource_prefix }}" + subnet_id: "{{ vpc_subnet_id }}" + state: absent + register: result + +- assert: + that: + - not result.changed + - result.interface is undefined + +# ============================================================ +- name: add a name tag to the other network interface before deleting it + ec2_eni: + eni_id: "{{ eni_id_2 }}" + name: "{{ resource_prefix }}" + state: present + +- name: test deleting the unattached network interface by using the name + ec2_eni: + name: "{{ resource_prefix }}" + subnet_id: "{{ vpc_subnet_id }}" + state: absent + register: result +- ec2_eni_info: + eni_id: "{{ eni_id_2 }}" + register: eni_info + +- assert: + that: + - result.changed + - result.interface is undefined + - '"network_interfaces" in eni_info' + - eni_id_2 not in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list ) + +- name: test removing the network interface by name is idempotent + ec2_eni: + name: "{{ resource_prefix }}" + subnet_id: "{{ vpc_subnet_id }}" + state: absent + register: result + +- assert: + that: + - not result.changed + - result.interface is undefined + +- name: verify that the network interface ID does not exist (retry-delete by ID) + ec2_eni: + eni_id: "{{ eni_id_2 }}" + state: absent + register: result + +- assert: + that: + - not result.changed + - result.interface is undefined + +# ============================================================ + +- name: Fetch ENI info without filter + ec2_eni_info: + register: eni_info + +- name: Assert that ec2_eni_info doesn't contain the two interfaces we just deleted + assert: + that: + - '"network_interfaces" in eni_info' + - eni_id_1 not in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list ) + - eni_id_2 not in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list ) diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml new file mode 100644 index 000000000..3f0530348 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml @@ -0,0 +1,263 @@ +--- +# ============================================================ +- name: create a network interface (check mode) + ec2_eni: + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + check_mode: true + register: result_check_mode + +- assert: + that: + - result_check_mode.changed + +- name: create a network interface + ec2_eni: + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + register: result + +- assert: + that: + - result.changed + - result.interface.private_ip_addresses | length == 1 + +- set_fact: + eni_id_1: "{{ result.interface.id }}" + +- name: Fetch ENI info (by ID) + ec2_eni_info: + eni_id: '{{ eni_id_1 }}' + register: eni_info + +- name: Assert that ec2_eni_info returns all the values we expect + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + assert: + that: + - '"network_interfaces" in eni_info' + - eni_info.network_interfaces | length == 1 + - '"association" not in _interface_0' + - '"attachment" not in _interface_0' + - '"availability_zone" in _interface_0' + - _interface_0.availability_zone.startswith(aws_region) + - '"description" in _interface_0' + - _interface_0.description == "" + - '"groups" in _interface_0' + - _interface_0.groups is iterable + - _interface_0.groups | length == 1 + - '"id" in _interface_0' + - _interface_0.id.startswith("eni-") + - _interface_0.id == eni_id_1 + - '"interface_type" in _interface_0' + - _interface_0.owner_id is string + - '"ipv6_addresses" in _interface_0' + - _interface_0.ipv6_addresses is iterable + - _interface_0.ipv6_addresses | length == 0 + - '"mac_address" in _interface_0' + - _interface_0.owner_id is string + - _interface_0.mac_address | length == 17 + - '"network_interface_id" in _interface_0' + - _interface_0.network_interface_id.startswith("eni-") + - _interface_0.network_interface_id == eni_id_1 + - '"owner_id" in _interface_0' + - _interface_0.owner_id is string + - '"private_dns_name" in _interface_0' + - _interface_0.private_dns_name is string + - _interface_0.private_dns_name.endswith("ec2.internal") + - '"private_ip_address" in _interface_0' + - _interface_0.private_ip_address | ansible.utils.ipaddr + - _interface_0.private_ip_address == ip_1 + - '"private_ip_addresses" in _interface_0' + - _interface_0.private_ip_addresses | length == 1 + - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) + - '"requester_id" in _interface_0' + - _interface_0.requester_id is string + - '"requester_managed" in _interface_0' + - _interface_0.requester_managed == False + - '"source_dest_check" in _interface_0' + - _interface_0.source_dest_check == True + - '"status" in _interface_0' + - _interface_0.status == "available" + - '"subnet_id" in _interface_0' + - _interface_0.subnet_id == vpc_subnet_id + - '"tag_set" in _interface_0' + - _interface_0.tag_set is mapping + - '"vpc_id" in _interface_0' + - _interface_0.vpc_id == vpc_id + +- name: test idempotence by using the same private_ip_address (check mode) + ec2_eni: + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + check_mode: true + register: result_check_mode + +- assert: + that: + - not result_check_mode.changed + +- name: test idempotence by using the same private_ip_address + ec2_eni: + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + register: result + +- assert: + that: + - not result.changed + - result.interface.id == eni_id_1 + - result.interface.private_ip_addresses | length == 1 + +# ============================================================ + +- name: create a second network interface to test IP reassignment + ec2_eni: + device_index: 1 + private_ip_address: "{{ ip_5 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + register: result + +- assert: + that: + - result.changed + - result.interface.id != eni_id_1 + +- name: save the second network interface ID for cleanup + set_fact: + eni_id_2: "{{ result.interface.id }}" + +- name: Fetch ENI info (using filter) + ec2_eni_info: + filters: + network-interface-id: '{{ eni_id_2 }}' + register: eni_info + +- name: Assert that ec2_eni_info returns all the values we expect + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + assert: + that: + - '"network_interfaces" in eni_info' + - eni_info.network_interfaces | length == 1 + - '"association" not in _interface_0' + - '"attachment" not in _interface_0' + - '"availability_zone" in _interface_0' + - _interface_0.availability_zone.startswith(aws_region) + - '"description" in _interface_0' + - _interface_0.description == "" + - '"groups" in _interface_0' + - _interface_0.groups is iterable + - _interface_0.groups | length == 1 + - '"id" in _interface_0' + - _interface_0.id.startswith("eni-") + - _interface_0.id == eni_id_2 + - '"interface_type" in _interface_0' + - _interface_0.owner_id is string + - '"ipv6_addresses" in _interface_0' + - _interface_0.ipv6_addresses is iterable + - _interface_0.ipv6_addresses | length == 0 + - '"mac_address" in _interface_0' + - _interface_0.owner_id is string + - _interface_0.mac_address | length == 17 + - '"network_interface_id" in _interface_0' + - _interface_0.network_interface_id.startswith("eni-") + - _interface_0.network_interface_id == eni_id_2 + - '"owner_id" in _interface_0' + - _interface_0.owner_id is string + - '"private_dns_name" in _interface_0' + - _interface_0.private_dns_name is string + - _interface_0.private_dns_name.endswith("ec2.internal") + - '"private_ip_address" in _interface_0' + - _interface_0.private_ip_address | ansible.utils.ipaddr + - _interface_0.private_ip_address == ip_5 + - '"private_ip_addresses" in _interface_0' + - _interface_0.private_ip_addresses | length == 1 + - ip_5 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) + - '"requester_id" in _interface_0' + - _interface_0.requester_id is string + - '"requester_managed" in _interface_0' + - _interface_0.requester_managed == False + - '"source_dest_check" in _interface_0' + - _interface_0.source_dest_check == True + - '"status" in _interface_0' + - _interface_0.status == "available" + - '"subnet_id" in _interface_0' + - _interface_0.subnet_id == vpc_subnet_id + - '"tag_set" in _interface_0' + - _interface_0.tag_set is mapping + - '"vpc_id" in _interface_0' + - _interface_0.vpc_id == vpc_id + +- name: Fetch ENI info without filter + ec2_eni_info: + register: eni_info + +- name: Assert that ec2_eni_info contains at least the two interfaces we expect + assert: + that: + - '"network_interfaces" in eni_info' + - eni_info.network_interfaces | length >= 2 + - eni_id_1 in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list ) + - eni_id_2 in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list ) + +# ============================================================ +# Run some VPC filter based tests of ec2_eni_info + +- name: Fetch ENI info with VPC filters - Available + ec2_eni_info: + filters: + vpc-id: '{{ vpc_id }}' + status: 'available' + register: eni_info + +- name: Assert that ec2_eni_info contains at least the two interfaces we expect + assert: + that: + - '"network_interfaces" in eni_info' + - eni_info.network_interfaces | length == 2 + - eni_id_1 in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list ) + - eni_id_2 in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list ) + +- name: Fetch ENI info with VPC filters - VPC + ec2_eni_info: + filters: + vpc-id: '{{ vpc_id }}' + register: eni_info + +- name: Assert that ec2_eni_info contains at least the two interfaces we expect + assert: + that: + - '"network_interfaces" in eni_info' + - eni_info.network_interfaces | length == 4 + - eni_id_1 in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list ) + - eni_id_2 in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list ) + - ec2_ips[0] in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) + - ec2_ips[1] in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) + + +# ========================================================= + +- name: create another network interface without private_ip_address + ec2_eni: + device_index: 1 + subnet_id: "{{ vpc_subnet_id }}" + state: present + register: result_no_private_ip + +- assert: + that: + - result_no_private_ip.changed + +- name: save the third network interface ID for cleanup + set_fact: + eni_id_3: "{{ result_no_private_ip.interface.id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml new file mode 100644 index 000000000..3f6d85b81 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml @@ -0,0 +1,325 @@ +--- +# ============================================================ +- name: add two implicit secondary IPs (check mode) + ec2_eni: + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + secondary_private_ip_address_count: 2 + check_mode: true + register: result_check_mode + +- assert: + that: + - result_check_mode.changed + +- name: add two implicit secondary IPs + ec2_eni: + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + secondary_private_ip_address_count: 2 + register: result +- ec2_eni_info: + eni_id: '{{ eni_id_1 }}' + register: eni_info + +- assert: + that: + - result.changed + - result.interface.id == eni_id_1 + - result.interface.private_ip_addresses | length == 3 + - _interface_0.private_ip_addresses | length == 3 + - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + +- name: test idempotence with two implicit secondary IPs (check mode) + ec2_eni: + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + secondary_private_ip_address_count: 2 + check_mode: true + register: result_check_mode + +- assert: + that: + - not result_check_mode.changed + +- name: test idempotence with two implicit secondary IPs + ec2_eni: + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + secondary_private_ip_address_count: 2 + register: result +- ec2_eni_info: + eni_id: '{{ eni_id_1 }}' + register: eni_info + +- assert: + that: + - not result.changed + - result.interface.id == eni_id_1 + - result.interface.private_ip_addresses | length == 3 + - _interface_0.private_ip_addresses | length == 3 + - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + +# ============================================================ +- name: ensure secondary addresses are only removed if purge is set to true + ec2_eni: + purge_secondary_private_ip_addresses: false + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + secondary_private_ip_addresses: [] + register: result +- ec2_eni_info: + eni_id: '{{ eni_id_1 }}' + register: eni_info + +- assert: + that: + - not result.changed + - result.interface.id == eni_id_1 + - result.interface.private_ip_addresses | length == 3 + - _interface_0.private_ip_addresses | length == 3 + - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + +# ============================================================ + +# Using secondary_private_ip_address_count leads to unpredicable IP assignment +# For the following test, first find an IP that has not been used yet + +- name: save the list of private IPs in use + set_fact: + current_private_ips: "{{ result.interface | json_query('private_ip_addresses[*].private_ip_address') | list }}" + +- name: set new_secondary_ip to an IP that has not been used + set_fact: + new_secondary_ip: "{{ [ip_2, ip_3, ip_4] | difference(current_private_ips) | first }}" + +- name: add an explicit secondary address without purging the ones added implicitly + ec2_eni: + purge_secondary_private_ip_addresses: false + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + secondary_private_ip_addresses: + - "{{ new_secondary_ip }}" + register: result +- ec2_eni_info: + eni_id: '{{ eni_id_1 }}' + register: eni_info + +- assert: + that: + - result.changed + - result.interface.id == eni_id_1 + - result.interface.private_ip_addresses | length == 4 + - _interface_0.private_ip_addresses | length == 4 + # Only ip_1 and the explicitly requested IP are guaranteed to be present + - ip_1 in _private_ips + - new_secondary_ip in _private_ips + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + _private_ips: "{{ eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list }}" + +# ============================================================ +- name: remove secondary address (check mode) + ec2_eni: + purge_secondary_private_ip_addresses: true + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + secondary_private_ip_addresses: [] + check_mode: true + register: result_check_mode + +- assert: + that: + - result_check_mode.changed + +- name: remove secondary address + ec2_eni: + purge_secondary_private_ip_addresses: true + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + secondary_private_ip_addresses: [] + register: result +- ec2_eni_info: + eni_id: '{{ eni_id_1 }}' + register: eni_info + +- assert: + that: + - result.changed + - result.interface.id == eni_id_1 + - result.interface.private_ip_addresses | length == 1 + - _interface_0.private_ip_addresses | length == 1 + - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + +- name: test idempotent behavior purging secondary addresses (check mode) + ec2_eni: + purge_secondary_private_ip_addresses: true + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + secondary_private_ip_addresses: [] + check_mode: true + register: result_check_mode + +- assert: + that: + - not result_check_mode.changed + +- name: test idempotent behavior purging secondary addresses + ec2_eni: + purge_secondary_private_ip_addresses: true + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + secondary_private_ip_addresses: [] + register: result +- ec2_eni_info: + eni_id: '{{ eni_id_1 }}' + register: eni_info + +- assert: + that: + - not result.changed + - result.interface.id == eni_id_1 + - result.interface.private_ip_addresses | length == 1 + - result.interface.private_ip_addresses | length == 1 + - _interface_0.private_ip_addresses | length == 1 + - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + +# ============================================================ + +- name: Assign secondary IP addess to second ENI + ec2_eni: + device_index: 1 + private_ip_address: "{{ ip_5 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + secondary_private_ip_addresses: + - "{{ ip_4 }}" + register: result +- ec2_eni_info: + eni_id: '{{ eni_id_2 }}' + register: eni_info + +- assert: + that: + - result.changed + - result.interface.id == eni_id_2 + - result.interface.private_ip_addresses | length == 2 + - _interface_0.private_ip_addresses | length == 2 + - ip_5 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) + - ip_4 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + +- name: test that reassignment of an IP already in use fails when not explcitly allowed (default for allow_reassignment == False) + ec2_eni: + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + secondary_private_ip_addresses: + - "{{ ip_2 }}" + - "{{ ip_3 }}" + - "{{ ip_4 }}" + register: result + ignore_errors: yes + +- assert: + that: + - result.failed + - '"move is not allowed" in result.msg' + +# ============================================================ +- name: allow reassignment to add the list of secondary addresses + ec2_eni: + allow_reassignment: true + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + secondary_private_ip_addresses: + - "{{ ip_2 }}" + - "{{ ip_3 }}" + - "{{ ip_4 }}" + register: result + +- assert: + that: + - result.changed + - result.interface.id == eni_id_1 + - result.interface.private_ip_addresses | length == 4 + +- name: test reassigment is idempotent + ec2_eni: + allow_reassignment: true + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + secondary_private_ip_addresses: + - "{{ ip_2 }}" + - "{{ ip_3 }}" + - "{{ ip_4 }}" + register: result + +- assert: + that: + - not result.changed + - result.interface.id == eni_id_1 + +# ============================================================ + +- name: purge all the secondary addresses + ec2_eni: + purge_secondary_private_ip_addresses: true + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + secondary_private_ip_addresses: [] + register: result +- ec2_eni_info: + eni_id: '{{ eni_id_1 }}' + register: eni_info + until: _interface_0.private_ip_addresses | length == 1 + retries: 5 + delay: 2 + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + +- assert: + that: + - result.changed + - _interface_0.private_ip_addresses | length == 1 + - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml new file mode 100644 index 000000000..f8c6e23b1 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml @@ -0,0 +1,214 @@ +# ============================================================ + +- name: ensure delete_on_termination defaults to False + ec2_eni: + instance_id: "{{ instance_id_2 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + state: present + attached: True + register: result +- ec2_eni_info: + eni_id: "{{ eni_id_1 }}" + register: eni_info + +- assert: + that: + - result is successful + - result.interface.attachment.delete_on_termination == false + - _interface_0.attachment.delete_on_termination == False + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + +# ============================================================ + +- name: enable delete_on_termination (check mode) + ec2_eni: + instance_id: "{{ instance_id_2 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + state: present + attached: True + delete_on_termination: True + check_mode: true + register: result_check_mode + +- assert: + that: + - result_check_mode.changed + +- name: enable delete_on_termination + ec2_eni: + instance_id: "{{ instance_id_2 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + state: present + attached: True + delete_on_termination: True + register: result +- ec2_eni_info: + eni_id: "{{ eni_id_1 }}" + register: eni_info + +- assert: + that: + - result.changed + - result.interface.attachment.delete_on_termination == true + - _interface_0.attachment.delete_on_termination == True + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + +- name: test idempotent behavior enabling delete_on_termination (check mode) + ec2_eni: + instance_id: "{{ instance_id_2 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + state: present + attached: True + delete_on_termination: True + check_mode: true + register: result_check_mode + +- assert: + that: + - not result_check_mode.changed + +- name: test idempotent behavior enabling delete_on_termination + ec2_eni: + instance_id: "{{ instance_id_2 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + state: present + attached: True + delete_on_termination: True + register: result + +- assert: + that: + - not result.changed + - result.interface.attachment.delete_on_termination == true + +# ============================================================ + +- name: disable delete_on_termination (check mode) + ec2_eni: + instance_id: "{{ instance_id_2 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + state: present + attached: True + delete_on_termination: False + check_mode: true + register: result_check_mode + +- assert: + that: + - result_check_mode.changed + +- name: disable delete_on_termination + ec2_eni: + instance_id: "{{ instance_id_2 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + state: present + attached: True + delete_on_termination: False + register: result +- ec2_eni_info: + eni_id: "{{ eni_id_1 }}" + register: eni_info + +- assert: + that: + - result.changed + - result.interface.attachment.delete_on_termination == false + - _interface_0.attachment.delete_on_termination == False + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + +# ============================================================ + +- name: terminate the instance to make sure the attached ENI remains + ec2_instance: + state: absent + instance_ids: + - "{{ instance_id_2 }}" + wait: True + +- name: verify the eni still exists + ec2_eni: + eni_id: "{{ eni_id_1 }}" + state: present + register: result + +- assert: + that: + - not result.changed + - result.interface.id == eni_id_1 + - result.interface.attachment is undefined + +# ============================================================ + +- name: ensure the network interface is attached + ec2_eni: + instance_id: "{{ instance_id_1 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + attached: True + register: result + +- name: ensure delete_on_termination is true + ec2_eni: + instance_id: "{{ instance_id_1 }}" + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + attached: True + delete_on_termination: True + register: result + +- name: test terminating the instance after setting delete_on_termination to true + ec2_instance: + state: absent + instance_ids: + - "{{ instance_id_1 }}" + wait: True + +- name: verify the eni was also removed + ec2_eni: + eni_id: "{{ eni_id_1 }}" + state: absent + register: result +- ec2_eni_info: + register: eni_info + +- assert: + that: + - not result.changed + - '"network_interfaces" in eni_info' + - eni_info.network_interfaces | length >= 1 + - eni_id_1 not in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list ) + - eni_id_2 in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list ) + +# ============================================================ + +- name: recreate the network interface + ec2_eni: + device_index: 1 + private_ip_address: "{{ ip_1 }}" + subnet_id: "{{ vpc_subnet_id }}" + state: present + register: result + +- set_fact: + eni_id_1: "{{ result.interface.id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml new file mode 100644 index 000000000..4259d3a81 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml @@ -0,0 +1,98 @@ + # ============================================================ +- name: test source_dest_check defaults to true (check mode) + ec2_eni: + eni_id: "{{ eni_id_1 }}" + source_dest_check: true + state: present + check_mode: true + register: result_check_mode + +- assert: + that: + - not result_check_mode.changed + +- name: test source_dest_check defaults to true + ec2_eni: + eni_id: "{{ eni_id_1 }}" + source_dest_check: true + state: present + register: result + +- assert: + that: + - not result.changed + - result.interface.source_dest_check == true + + # ============================================================ +- name: disable source_dest_check + ec2_eni: + eni_id: "{{ eni_id_1 }}" + source_dest_check: false + state: present + register: result + +- name: Check source_dest_check state + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + ec2_eni_info: + eni_id: "{{ eni_id_1 }}" + register: eni_info + until: _interface_0.source_dest_check == False + retries: 5 + delay: 2 + +- assert: + that: + - result.changed + - _interface_0.source_dest_check == False + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + +- name: test idempotence disabling source_dest_check (check mode) + ec2_eni: + eni_id: "{{ eni_id_1 }}" + source_dest_check: false + state: present + check_mode: true + register: result_check_mode + +- assert: + that: + - not result_check_mode.changed + +- name: test idempotence disabling source_dest_check + ec2_eni: + eni_id: "{{ eni_id_1 }}" + source_dest_check: false + state: present + register: result + +- assert: + that: + - not result.changed + - result.interface.source_dest_check == false + + # ============================================================ +- name: enable source_dest_check + ec2_eni: + eni_id: "{{ eni_id_1 }}" + source_dest_check: true + state: present + register: result + +- name: Check source_dest_check state + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + ec2_eni_info: + eni_id: "{{ eni_id_1 }}" + register: eni_info + until: _interface_0.source_dest_check == True + retries: 5 + delay: 2 + +- assert: + that: + - result.changed + - _interface_0.source_dest_check == True + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml new file mode 100644 index 000000000..d26d96b5b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml @@ -0,0 +1,251 @@ + # ============================================================ +- name: verify there are no tags associated with the network interface + ec2_eni: + eni_id: "{{ eni_id_1 }}" + state: present + tags: {} + register: result + +- assert: + that: + - not result.changed + - not result.interface.tags + - result.interface.name is undefined + + # ============================================================ +- name: add tags to the network interface (check mode) + ec2_eni: + eni_id: "{{ eni_id_1 }}" + state: present + name: "{{ resource_prefix }}" + tags: + CreatedBy: "{{ resource_prefix }}" + check_mode: true + register: result_check_mode + +- assert: + that: + - result_check_mode.changed + +- name: add tags to the network interface + ec2_eni: + eni_id: "{{ eni_id_1 }}" + state: present + name: "{{ resource_prefix }}" + tags: + CreatedBy: "{{ resource_prefix }}" + register: result +- ec2_eni_info: + eni_id: "{{ eni_id_1 }}" + register: eni_info + +- assert: + that: + - result.changed + - result.interface.id == eni_id_1 + - result.interface.tags | length == 2 + - result.interface.tags.CreatedBy == resource_prefix + - result.interface.tags.Name == resource_prefix + - result.interface.name == resource_prefix + - _interface_0.tags | length == 2 + - _interface_0.tags.CreatedBy == resource_prefix + - _interface_0.tags.Name == resource_prefix + - _interface_0.tag_set | length == 2 + - _interface_0.tag_set.CreatedBy == resource_prefix + - _interface_0.tag_set.Name == resource_prefix + - _interface_0.name == resource_prefix + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + + # ============================================================ +- name: test idempotence by using the Name tag and the subnet (check mode) + ec2_eni: + name: "{{ resource_prefix }}" + state: present + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + check_mode: true + register: result_check_mode + +- assert: + that: + - not result_check_mode.changed + +- name: test idempotence by using the Name tag and the subnet + ec2_eni: + name: "{{ resource_prefix }}" + state: present + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + register: result + +- assert: + that: + - not result.changed + - result.interface.id == eni_id_1 + + # ============================================================ +- name: test tags are not purged if tags are null even if name is provided (check mode) + ec2_eni: + name: "{{ resource_prefix }}" + state: present + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + check_mode: true + register: result_check_mode + +- assert: + that: + - not result_check_mode.changed + +- name: test tags are not purged if tags are null even if name is provided + ec2_eni: + name: "{{ resource_prefix }}" + state: present + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + register: result +- ec2_eni_info: + eni_id: "{{ eni_id_1 }}" + register: eni_info + +- assert: + that: + - not result.changed + - result.interface.id == eni_id_1 + - result.interface.tags | length == 2 + - result.interface.tags.CreatedBy == resource_prefix + - result.interface.tags.Name == resource_prefix + - result.interface.name == resource_prefix + - _interface_0.tag_set | length == 2 + - _interface_0.tag_set.CreatedBy == resource_prefix + - _interface_0.tag_set.Name == resource_prefix + - _interface_0.name == resource_prefix + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + + # ============================================================ +- name: test setting purge tags to false + ec2_eni: + eni_id: "{{ eni_id_1 }}" + state: present + purge_tags: false + tags: {} + register: result +- ec2_eni_info: + eni_id: "{{ eni_id_1 }}" + register: eni_info + +- assert: + that: + - not result.changed + - result.interface.tags | length == 2 + - result.interface.tags.CreatedBy == resource_prefix + - result.interface.tags.Name == resource_prefix + - result.interface.name == resource_prefix + - _interface_0.tag_set | length == 2 + - _interface_0.tag_set.CreatedBy == resource_prefix + - _interface_0.tag_set.Name == resource_prefix + - _interface_0.name == resource_prefix + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + + # ============================================================ +- name: test adding a new tag without removing any others + ec2_eni: + eni_id: "{{ eni_id_1 }}" + state: present + purge_tags: false + tags: + environment: test + register: result +- ec2_eni_info: + eni_id: "{{ eni_id_1 }}" + register: eni_info + +- assert: + that: + - result.changed + - result.interface.tags | length == 3 + - result.interface.tags.environment == 'test' + - result.interface.tags.CreatedBy == resource_prefix + - result.interface.tags.Name == resource_prefix + - result.interface.name == resource_prefix + - _interface_0.tag_set | length == 3 + - _interface_0.tag_set.environment == 'test' + - _interface_0.tag_set.CreatedBy == resource_prefix + - _interface_0.tag_set.Name == resource_prefix + - _interface_0.name == resource_prefix + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + + # ============================================================ +- name: test purging tags and adding a new one + ec2_eni: + name: "{{ resource_prefix }}" + state: present + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + tags: + Description: "{{ resource_prefix }}" + register: result +- ec2_eni_info: + eni_id: "{{ eni_id_1 }}" + register: eni_info + +- assert: + that: + - result.changed + - result.interface.id == eni_id_1 + - result.interface.tags | length == 2 + - result.interface.tags.Description == resource_prefix + - result.interface.tags.Name == resource_prefix + - result.interface.name == resource_prefix + - _interface_0.tag_set | length == 2 + - _interface_0.tag_set.Description == resource_prefix + - _interface_0.tag_set.Name == resource_prefix + - _interface_0.name == resource_prefix + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + +- name: test purging tags and adding a new one is idempotent + ec2_eni: + name: "{{ resource_prefix }}" + state: present + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + tags: + Description: "{{ resource_prefix }}" + register: result +- ec2_eni_info: + eni_id: "{{ eni_id_1 }}" + register: eni_info + +- assert: + that: + - not result.changed + - result.interface.id == eni_id_1 + - result.interface.tags | length == 2 + - result.interface.tags.Description == resource_prefix + - result.interface.tags.Name == resource_prefix + - result.interface.name == resource_prefix + - _interface_0.tag_set | length == 2 + - _interface_0.tag_set.Description == resource_prefix + - _interface_0.tag_set.Name == resource_prefix + - _interface_0.name == resource_prefix + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' + + # ============================================================ +- name: test purging all tags + ec2_eni: + eni_id: "{{ eni_id_1 }}" + state: present + tags: {} + register: result +- ec2_eni_info: + eni_id: "{{ eni_id_1 }}" + register: eni_info + +- assert: + that: + - result.changed + - not result.interface.tags + - result.interface.name is undefined + - _interface_0.tag_set | length == 0 + vars: + _interface_0: '{{ eni_info.network_interfaces[0] }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/aliases new file mode 100644 index 000000000..7497e8011 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/aliases @@ -0,0 +1,6 @@ +time=10m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/defaults/main.yml new file mode 100644 index 000000000..364c37f82 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for ec2_instance_block_devices +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-block-devices' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/meta/main.yml new file mode 100644 index 000000000..320728605 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/meta/main.yml @@ -0,0 +1,6 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: block_devices diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/tasks/main.yml new file mode 100644 index 000000000..5e27d5ab0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/tasks/main.yml @@ -0,0 +1,110 @@ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "New instance with an extra block device" + ec2_instance: + state: running + name: "{{ resource_prefix }}-test-ebs-vols" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + volumes: + - device_name: /dev/sdb + ebs: + volume_size: 20 + delete_on_termination: true + volume_type: standard + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + wait: true + register: block_device_instances + + - name: "Gather instance info" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-ebs-vols" + register: block_device_instances_info + + - assert: + that: + - block_device_instances is not failed + - block_device_instances is changed + - block_device_instances_info.instances[0].block_device_mappings[0] + - block_device_instances_info.instances[0].block_device_mappings[1] + - block_device_instances_info.instances[0].block_device_mappings[1].device_name == '/dev/sdb' + + - name: "New instance with an extra block device (check mode)" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-ebs-vols-checkmode" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + volumes: + - device_name: /dev/sdb + ebs: + volume_size: 20 + delete_on_termination: true + volume_type: standard + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + check_mode: yes + + - name: "fact presented ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-ebs-vols" + register: presented_instance_fact + + - name: "fact checkmode ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-ebs-vols-checkmode" + register: checkmode_instance_fact + + - name: "Confirm instance was created without check mode" + assert: + that: + - "{{ presented_instance_fact.instances | length }} > 0" + + - name: "Confirm instance was not created with check mode" + assert: + that: + - "{{ checkmode_instance_fact.instances | length }} == 0" + + - name: "Terminate instances" + ec2_instance: + state: absent + instance_ids: "{{ block_device_instances.instance_ids }}" + + - name: "New instance with an extra block device - gp3 volume_type and throughput" + ec2_instance: + state: running + name: "{{ resource_prefix }}-test-ebs-vols-gp3" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + volumes: + - device_name: /dev/sdb + ebs: + volume_size: 20 + delete_on_termination: true + volume_type: gp3 + throughput: 500 + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + wait: true + register: block_device_instances_gp3 + + - assert: + that: + - block_device_instances_gp3 is not failed + - block_device_instances_gp3 is changed + - block_device_instances_gp3.spec.BlockDeviceMappings[0].DeviceName == '/dev/sdb' + - block_device_instances_gp3.spec.BlockDeviceMappings[0].Ebs.VolumeType == 'gp3' + - block_device_instances_gp3.spec.BlockDeviceMappings[0].Ebs.VolumeSize == 20 + - block_device_instances_gp3.spec.BlockDeviceMappings[0].Ebs.Throughput == 500 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/aliases new file mode 100644 index 000000000..7497e8011 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/aliases @@ -0,0 +1,6 @@ +time=10m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/defaults/main.yml new file mode 100644 index 000000000..829070a1e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for ec2_instance_checkmode_tests +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-checkmode' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/meta/main.yml new file mode 100644 index 000000000..634b3aa6e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/meta/main.yml @@ -0,0 +1,6 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: check_mode diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/tasks/main.yml new file mode 100644 index 000000000..2ffa2f9df --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/tasks/main.yml @@ -0,0 +1,208 @@ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "Make basic instance" + ec2_instance: + state: present + name: "{{ resource_prefix }}-checkmode-comparison" + image_id: "{{ ec2_ami_id }}" + security_groups: "{{ sg.group_id }}" + instance_type: "{{ ec2_instance_type }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + wait: false + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: "Some Value" + register: basic_instance + + - name: "Make basic instance (check mode)" + ec2_instance: + state: present + name: "{{ resource_prefix }}-checkmode-comparison-checkmode" + image_id: "{{ ec2_ami_id }}" + security_groups: "{{ sg.group_id }}" + instance_type: "{{ ec2_instance_type }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: "Some Value" + check_mode: yes + + - name: "fact presented ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-checkmode-comparison" + register: presented_instance_fact + + - name: "fact checkmode ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-checkmode-comparison-checkmode" + register: checkmode_instance_fact + + - name: "Confirm whether the check mode is working normally." + assert: + that: + - "{{ presented_instance_fact.instances | length }} > 0" + - "{{ checkmode_instance_fact.instances | length }} == 0" + + - name: "Stop instance (check mode)" + ec2_instance: + state: stopped + name: "{{ resource_prefix }}-checkmode-comparison" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: "Some Value" + check_mode: yes + + - name: "fact ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-checkmode-comparison" + register: confirm_checkmode_stopinstance_fact + + - name: "Verify that it was not stopped." + assert: + that: + - confirm_checkmode_stopinstance_fact.instances[0].state.name not in ["stopped", "stopping"] + + - name: "Stop instance." + ec2_instance: + state: stopped + name: "{{ resource_prefix }}-checkmode-comparison" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: "Some Value" + wait: true + register: instance_stop + + - name: "fact stopped ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-checkmode-comparison" + register: confirm_stopinstance_fact + + - name: "Verify that it was stopped." + assert: + that: + - confirm_stopinstance_fact.instances[0].state.name in ["stopped", "stopping"] + + - name: "Running instance in check mode." + ec2_instance: + state: running + name: "{{ resource_prefix }}-checkmode-comparison" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: "Some Value" + check_mode: yes + + - name: "fact ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-checkmode-comparison" + register: confirm_checkmode_runninginstance_fact + + - name: "Verify that it was not running." + assert: + that: + - '"{{ confirm_checkmode_runninginstance_fact.instances[0].state.name }}" != "running"' + + - name: "Running instance." + ec2_instance: + state: running + name: "{{ resource_prefix }}-checkmode-comparison" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: "Some Value" + + - name: "fact ec2 instance." + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-checkmode-comparison" + register: confirm_runninginstance_fact + + - name: "Verify that it was running." + assert: + that: + - '"{{ confirm_runninginstance_fact.instances[0].state.name }}" == "running"' + + - name: "Tag instance." + ec2_instance: + state: running + name: "{{ resource_prefix }}-checkmode-comparison" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: "Some Other Value" + check_mode: yes + + - name: "fact ec2 instance." + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-checkmode-comparison" + register: confirm_not_tagged + + - name: "Verify that it hasn't been re-tagged." + assert: + that: + - '"{{ confirm_not_tagged.instances[0].tags.TestTag }}" == "Some Value"' + + - name: "Terminate instance in check mode." + ec2_instance: + state: absent + name: "{{ resource_prefix }}-checkmode-comparison" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: "Some Value" + wait: True + check_mode: yes + + - name: "fact ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-checkmode-comparison" + register: confirm_checkmode_terminatedinstance_fact + + - name: "Verify that it was not terminated," + assert: + that: + - '"{{ confirm_checkmode_terminatedinstance_fact.instances[0].state.name }}" != "terminated"' + + - name: "Terminate instance." + ec2_instance: + state: absent + name: "{{ resource_prefix }}-checkmode-comparison" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: "Some Value" + wait: True + + - name: "fact ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-checkmode-comparison" + register: confirm_terminatedinstance_fact + + - name: "Verify that it was terminated," + assert: + that: + - '"{{ confirm_terminatedinstance_fact.instances[0].state.name }}" == "terminated"' + + always: + - name: "Terminate checkmode instances" + ec2_instance: + state: absent + filters: + "tag:TestId": "{{ ec2_instance_tag_TestId }}" + wait: yes + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/aliases new file mode 100644 index 000000000..ca83d373d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/aliases @@ -0,0 +1,6 @@ +time=6m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/defaults/main.yml new file mode 100644 index 000000000..eb1859b3f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for ec2_instance_cpu_options +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-cpu-options' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/meta/main.yml new file mode 100644 index 000000000..2d7d140d4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/meta/main.yml @@ -0,0 +1,6 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: cpu_options diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/tasks/main.yml new file mode 100644 index 000000000..a0bdd4106 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/tasks/main.yml @@ -0,0 +1,85 @@ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "create t3.nano instance with cpu_options" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + instance_type: t3.nano + cpu_options: + core_count: 1 + threads_per_core: 1 + wait: true + register: instance_creation + + - name: "instance with cpu_options created with the right options" + assert: + that: + - instance_creation is success + - instance_creation is changed + + - name: "modify cpu_options on existing instance (warning displayed)" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + instance_type: t3.nano + cpu_options: + core_count: 1 + threads_per_core: 2 + wait: true + register: cpu_options_update + ignore_errors: yes + + - name: "fact presented ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-t3nano-1-threads-per-core" + register: presented_instance_fact + + - name: "modify cpu_options has no effect on existing instance" + assert: + that: + - cpu_options_update is success + - cpu_options_update is not changed + - "{{ presented_instance_fact.instances | length }} > 0" + - "'{{ presented_instance_fact.instances.0.state.name }}' in ['running','pending']" + - "{{ presented_instance_fact.instances.0.cpu_options.core_count }} == 1" + - "{{ presented_instance_fact.instances.0.cpu_options.threads_per_core }} == 1" + + - name: "create t3.nano instance with cpu_options(check mode)" + ec2_instance: + state: running + name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core-checkmode" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + instance_type: t3.nano + cpu_options: + core_count: 1 + threads_per_core: 1 + wait: true + check_mode: yes + + - name: "fact checkmode ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-t3nano-1-threads-per-core-checkmode" + register: checkmode_instance_fact + + - name: "Confirm existence of instance id." + assert: + that: + - "{{ checkmode_instance_fact.instances | length }} == 0" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/aliases new file mode 100644 index 000000000..7497e8011 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/aliases @@ -0,0 +1,6 @@ +time=10m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/defaults/main.yml new file mode 100644 index 000000000..b233d4547 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for ec2_instance_default_vpc +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-default-vpc' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/meta/main.yml new file mode 100644 index 000000000..7622736b4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/meta/main.yml @@ -0,0 +1,6 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: default_vpc diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/tasks/main.yml new file mode 100644 index 000000000..3abcf0f8a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/tasks/main.yml @@ -0,0 +1,63 @@ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "Make instance in a default subnet of the VPC" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-default-vpc" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_group: "default" + instance_type: "{{ ec2_instance_type }}" + wait: false + register: in_default_vpc + + - name: "Make instance in a default subnet of the VPC(check mode)" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-default-vpc-checkmode" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_group: "default" + instance_type: "{{ ec2_instance_type }}" + check_mode: yes + + - name: "fact presented ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-default-vpc" + register: presented_instance_fact + + - name: "fact checkmode ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-default-vpc-checkmode" + register: checkmode_instance_fact + + - name: "Confirm whether the check mode is working normally." + assert: + that: + - "{{ presented_instance_fact.instances | length }} > 0" + - "{{ checkmode_instance_fact.instances | length }} == 0" + + - name: "Terminate instances" + ec2_instance: + state: absent + instance_ids: "{{ in_default_vpc.instance_ids }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + + always: + - name: "Terminate vpc_tests instances" + ec2_instance: + state: absent + filters: + "tag:TestId": "{{ ec2_instance_tag_TestId }}" + wait: yes + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/aliases new file mode 100644 index 000000000..ca83d373d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/aliases @@ -0,0 +1,6 @@ +time=6m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/defaults/main.yml new file mode 100644 index 000000000..feec2e7c1 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for ec2_instance_ebs_optimized +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-ebs-optimized' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/meta/main.yml new file mode 100644 index 000000000..9ee97b6f4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/meta/main.yml @@ -0,0 +1,6 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: ebs_optimized diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/tasks/main.yml new file mode 100644 index 000000000..d01ee77ee --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/tasks/main.yml @@ -0,0 +1,31 @@ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "Make EBS optimized instance in the testing subnet of the test VPC" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-ebs-optimized-instance-in-vpc" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + ebs_optimized: true + instance_type: t3.nano + wait: false + register: ebs_opt_in_vpc + + - name: "Get ec2 instance info" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-ebs-optimized-instance-in-vpc" + register: ebs_opt_instance_info + + - name: "Assert instance is ebs_optimized" + assert: + that: + - "{{ ebs_opt_instance_info.instances.0.ebs_optimized }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/aliases new file mode 100644 index 000000000..ca83d373d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/aliases @@ -0,0 +1,6 @@ +time=6m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/defaults/main.yml new file mode 100644 index 000000000..7dca186d8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for ec2_instance_external_resource_attach +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-external-attach' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/meta/main.yml new file mode 100644 index 000000000..f30ad80c4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/meta/main.yml @@ -0,0 +1,6 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: external_resources diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml new file mode 100644 index 000000000..7aa2c1960 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml @@ -0,0 +1,161 @@ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + # Make custom ENIs and attach via the `network` parameter + - ec2_eni: + state: present + delete_on_termination: true + subnet_id: "{{ testing_subnet_b.subnet.id }}" + security_groups: + - "{{ sg.group_id }}" + register: eni_a + + - ec2_eni: + state: present + delete_on_termination: true + subnet_id: "{{ testing_subnet_b.subnet.id }}" + security_groups: + - "{{ sg.group_id }}" + register: eni_b + + - ec2_eni: + state: present + delete_on_termination: true + subnet_id: "{{ testing_subnet_b.subnet.id }}" + security_groups: + - "{{ sg.group_id }}" + register: eni_c + + - ec2_key: + name: "{{ resource_prefix }}_test_key" + + - name: "Make instance in the testing subnet created in the test VPC" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-eni-vpc" + key_name: "{{ resource_prefix }}_test_key" + network: + interfaces: + - id: "{{ eni_a.interface.id }}" + image_id: "{{ ec2_ami_id }}" + availability_zone: '{{ subnet_b_az }}' + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + wait: false + register: in_test_vpc + + - name: "Gather {{ resource_prefix }}-test-eni-vpc info" + ec2_instance_info: + filters: + "tag:Name": '{{ resource_prefix }}-test-eni-vpc' + register: in_test_vpc_instance + + - assert: + that: + - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"' + - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 1' + + - name: "Add a second interface (check_mode=true)" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-eni-vpc" + network: + interfaces: + - id: "{{ eni_a.interface.id }}" + - id: "{{ eni_b.interface.id }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + wait: false + register: add_interface_check_mode + check_mode: true + + - name: Validate task reported changed + assert: + that: + - add_interface_check_mode is changed + + - name: "Gather {{ resource_prefix }}-test-eni-vpc info" + ec2_instance_info: + filters: + "tag:Name": '{{ resource_prefix }}-test-eni-vpc' + register: in_test_vpc_instance + + - name: Validate that only 1 ENI is attached to instance as we run using check_mode=true + assert: + that: + - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"' + - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 1' + + - name: "Add a second interface" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-eni-vpc" + network: + interfaces: + - id: "{{ eni_a.interface.id }}" + - id: "{{ eni_b.interface.id }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + wait: false + register: add_interface + until: add_interface is not failed + ignore_errors: true + retries: 10 + + - name: Validate that the instance has now 2 interfaces attached + block: + - name: "Gather {{ resource_prefix }}-test-eni-vpc info" + ec2_instance_info: + filters: + "tag:Name": '{{ resource_prefix }}-test-eni-vpc' + register: in_test_vpc_instance + + - name: Validate that only 1 ENI is attached to instance as we run using check_mode=true + assert: + that: + - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"' + - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 2' + + when: add_interface is successful + + - name: "Make instance in the testing subnet created in the test VPC(check mode)" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-eni-vpc-checkmode" + key_name: "{{ resource_prefix }}_test_key" + network: + interfaces: + - id: "{{ eni_c.interface.id }}" + image_id: "{{ ec2_ami_id }}" + availability_zone: '{{ subnet_b_az }}' + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + check_mode: yes + + - name: "fact presented ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-eni-vpc" + register: presented_instance_fact + + - name: "fact checkmode ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-eni-vpc-checkmode" + register: checkmode_instance_fact + + - name: "Confirm existence of instance id." + assert: + that: + - "{{ presented_instance_fact.instances | length }} > 0" + - "{{ checkmode_instance_fact.instances | length }} == 0" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/aliases new file mode 100644 index 000000000..ca83d373d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/aliases @@ -0,0 +1,6 @@ +time=6m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/defaults/main.yml new file mode 100644 index 000000000..28e57b948 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for ec2_instance_hibernation_options +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-hibernation-options' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/meta/main.yml new file mode 100644 index 000000000..80a82ca0b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/meta/main.yml @@ -0,0 +1,9 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: hibernation_options +- role: setup_botocore_pip + vars: + boto3_version: "1.20.30" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/tasks/main.yml new file mode 100644 index 000000000..e6aace728 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/tasks/main.yml @@ -0,0 +1,145 @@ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: Create instance with hibernation option (check mode) + ec2_instance: + name: "{{ resource_prefix }}-hibernation-options" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ resource_prefix }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + hibernation_options: true + instance_type: "{{ ec2_instance_type }}" + volumes: + - device_name: /dev/sda1 + ebs: + delete_on_termination: true + encrypted: true + state: running + wait: yes + check_mode: yes + register: create_instance_check_mode_results + + - name: Check the returned value for the earlier task + assert: + that: + - create_instance_check_mode_results is changed + - create_instance_check_mode_results.spec.HibernationOptions.Configured == True + + - name: Create instance with hibernation config + ec2_instance: + name: "{{ resource_prefix }}-hibernation-options" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ resource_prefix }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + hibernation_options: true + instance_type: "{{ ec2_instance_type }}" + volumes: + - device_name: /dev/sda1 + ebs: + delete_on_termination: true + encrypted: true + state: running + wait: yes + register: create_instance_results + + - set_fact: + instance_id: '{{ create_instance_results.instances[0].instance_id }}' + + - name: Check return values of the create instance task + assert: + that: + - "{{ create_instance_results.instances | length }} > 0" + - "'{{ create_instance_results.instances.0.state.name }}' == 'running'" + - "'{{ create_instance_results.spec.HibernationOptions.Configured }}'" + + - name: Gather information about the instance to get the hibernation status + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-hibernation-options" + register: instance_hibernation_status + + - name: Assert hibernation options is true + assert: + that: + - instance_hibernation_status.instances[0].hibernation_options.configured == true + + - name: Create instance with hibernation option (check mode) (idempotent) + ec2_instance: + name: "{{ resource_prefix }}-hibernation-options" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ resource_prefix }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + hibernation_options: true + instance_type: "{{ ec2_instance_type }}" + volumes: + - device_name: /dev/sda1 + ebs: + delete_on_termination: true + encrypted: true + state: running + wait: yes + check_mode: yes + register: create_instance_check_mode_results + + - name: Check the returned value for the earlier task + assert: + that: + - create_instance_check_mode_results is not changed + + - name: Create instance with hibernation options configured (idempotent) + ec2_instance: + name: "{{ resource_prefix }}-hibernation-options" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ resource_prefix }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + hibernation_options: true + instance_type: "{{ ec2_instance_type }}" + volumes: + - device_name: /dev/sda1 + ebs: + delete_on_termination: true + encrypted: true + state: running + wait: yes + register: create_instance_results + + - name: Check return values of the create instance task + assert: + that: + - "{{ not create_instance_results.changed }}" + - "{{ create_instance_results.instances | length }} > 0" + + - name: Create instance with hibernation options configured with unencrypted volume + ec2_instance: + name: "{{ resource_prefix }}-hibernation-options-error" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ resource_prefix }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + hibernation_options: true + instance_type: "{{ ec2_instance_type }}" + volumes: + - device_name: /dev/sda1 + ebs: + delete_on_termination: true + register: create_instance_results + failed_when: "'Hibernation prerequisites not satisfied' not in create_instance_results.msg" + + - name: Terminate the instance + ec2_instance: + filters: + tag:TestId: "{{ resource_prefix }}" + state: absent \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/aliases new file mode 100644 index 000000000..7497e8011 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/aliases @@ -0,0 +1,6 @@ +time=10m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/defaults/main.yml new file mode 100644 index 000000000..2dc4d467b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/defaults/main.yml @@ -0,0 +1,7 @@ +--- +# defaults file for ec2_instance_iam_instance_profile +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-profile' + +first_iam_role: "ansible-test-{{ tiny_prefix }}-instance_role" +second_iam_role: "ansible-test-{{ tiny_prefix }}-instance_role-2" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/files/assume-role-policy.json b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/files/assume-role-policy.json new file mode 100644 index 000000000..72413abdd --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/files/assume-role-policy.json @@ -0,0 +1,13 @@ +{ + "Version": "2008-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/meta/main.yml new file mode 100644 index 000000000..1e3a6043a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/meta/main.yml @@ -0,0 +1,6 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: instance_role diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml new file mode 100644 index 000000000..2f28ae3b8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml @@ -0,0 +1,131 @@ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "Create IAM role for test" + iam_role: + state: present + name: '{{ first_iam_role }}' + assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}" + create_instance_profile: yes + managed_policy: + - AmazonEC2ContainerServiceRole + register: iam_role + + - name: "Create second IAM role for test" + iam_role: + state: present + name: '{{ second_iam_role }}' + assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}" + create_instance_profile: yes + managed_policy: + - AmazonEC2ContainerServiceRole + register: iam_role_2 + + - name: "wait 10 seconds for roles to become available" + wait_for: + timeout: 10 + delegate_to: localhost + + - name: "Make instance with an instance_role" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-instance-role" + image_id: "{{ ec2_ami_id }}" + security_groups: "{{ sg.group_id }}" + instance_type: "{{ ec2_instance_type }}" + instance_role: "{{ first_iam_role }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + register: instance_with_role + + - assert: + that: + - 'instance_with_role.instances[0].iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")' + + - name: "Make instance with an instance_role(check mode)" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-instance-role-checkmode" + image_id: "{{ ec2_ami_id }}" + security_groups: "{{ sg.group_id }}" + instance_type: "{{ ec2_instance_type }}" + instance_role: "{{ iam_role.arn.replace(':role/', ':instance-profile/') }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + check_mode: yes + + - name: "fact presented ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-instance-role" + register: presented_instance_fact + + - name: "fact checkmode ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-instance-role-checkmode" + register: checkmode_instance_fact + + - name: "Confirm whether the check mode is working normally." + assert: + that: + - "{{ presented_instance_fact.instances | length }} > 0" + - "{{ checkmode_instance_fact.instances | length }} == 0" + + - name: "Update instance with new instance_role" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-instance-role" + image_id: "{{ ec2_ami_id }}" + security_groups: "{{ sg.group_id }}" + instance_type: "{{ ec2_instance_type }}" + instance_role: "{{ iam_role_2.arn.replace(':role/', ':instance-profile/') }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + register: instance_with_updated_role + + - name: "wait 10 seconds for role update to complete" + wait_for: + timeout: 10 + delegate_to: localhost + + - name: "fact checkmode ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-instance-role" + register: updates_instance_info + + - assert: + that: + - 'updates_instance_info.instances[0].iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")' + - 'updates_instance_info.instances[0].instance_id == instance_with_role.instances[0].instance_id' + + always: + # We need to delete the instances before we can delete the roles + - name: "Terminate iam_instance_role instances" + ec2_instance: + state: absent + filters: + "tag:TestId": "{{ ec2_instance_tag_TestId }}" + wait: yes + ignore_errors: yes + + - name: "Delete IAM role for test" + iam_role: + state: absent + name: "{{ item }}" + delete_instance_profile: true + loop: + - '{{ first_iam_role }}' + - '{{ second_iam_role }}' + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/aliases new file mode 100644 index 000000000..ca83d373d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/aliases @@ -0,0 +1,6 @@ +time=6m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/defaults/main.yml new file mode 100644 index 000000000..d5a60251e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for ec2_instance_minimal +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-minimal' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/meta/main.yml new file mode 100644 index 000000000..7fa5de555 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/meta/main.yml @@ -0,0 +1,6 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: minimal diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/tasks/main.yml new file mode 100644 index 000000000..8dcfca437 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/tasks/main.yml @@ -0,0 +1,699 @@ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "Create a new instance (check_mode)" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance + check_mode: true + + - assert: + that: + - create_instance is not failed + - create_instance is changed + - '"instance_ids" not in create_instance' + - '"ec2:RunInstances" not in create_instance.resource_actions' + + - name: "Create a new instance" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance + + - assert: + that: + - create_instance is not failed + - create_instance is changed + - '"ec2:RunInstances" in create_instance.resource_actions' + - '"instance_ids" in create_instance' + - create_instance.instance_ids | length == 1 + - create_instance.instance_ids[0].startswith("i-") + + - name: "Save instance ID" + set_fact: + create_instance_id_1: "{{ create_instance.instance_ids[0] }}" + + - name: "Create a new instance - idempotency (check_mode)" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance + check_mode: true + + - assert: + that: + - create_instance is not failed + - create_instance is not changed + - '"ec2:RunInstances" not in create_instance.resource_actions' + - '"instance_ids" in create_instance' + - create_instance.instance_ids | length == 1 + - create_instance.instance_ids[0] == create_instance_id_1 + + - name: "Create a new instance - idempotency" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance + + - assert: + that: + - create_instance is not failed + - create_instance is not changed + - '"ec2:RunInstances" not in create_instance.resource_actions' + - '"instance_ids" in create_instance' + - create_instance.instance_ids | length == 1 + - create_instance.instance_ids[0] == create_instance_id_1 + +################################################################ + + - name: "Create a new instance with a different name (check_mode)" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-2" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance_2 + check_mode: true + + - assert: + that: + - create_instance_2 is not failed + - create_instance_2 is changed + - '"instance_ids" not in create_instance_2' + - '"ec2:RunInstances" not in create_instance_2.resource_actions' + + - name: "Create a new instance with a different name" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-2" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance_2 + + - assert: + that: + - create_instance_2 is not failed + - create_instance_2 is changed + - '"ec2:RunInstances" in create_instance_2.resource_actions' + - '"instance_ids" in create_instance_2' + - create_instance_2.instance_ids | length == 1 + - create_instance_2.instance_ids[0].startswith("i-") + - create_instance_2.instance_ids[0] != create_instance_id_1 + + - name: "Save instance ID" + set_fact: + create_instance_id_2: "{{ create_instance_2.instance_ids[0] }}" + + - name: "Create a new instance with a different name - idempotency (check_mode)" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-2" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance_2 + check_mode: true + + - assert: + that: + - create_instance_2 is not failed + - create_instance_2 is not changed + - '"ec2:RunInstances" not in create_instance_2.resource_actions' + - '"instance_ids" in create_instance_2' + - create_instance_2.instance_ids | length == 1 + - create_instance_2.instance_ids[0] == create_instance_id_2 + + - name: "Create a new instance with a different name - idempotency" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-2" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance_2 + + - assert: + that: + - create_instance_2 is not failed + - create_instance_2 is not changed + - '"ec2:RunInstances" not in create_instance_2.resource_actions' + - '"instance_ids" in create_instance_2' + - create_instance_2.instance_ids | length == 1 + - create_instance_2.instance_ids[0] == create_instance_id_2 + +################################################################ + + - name: "Create a new instance with a different name in tags (check_mode)" + ec2_instance: + state: present + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + Name: "{{ resource_prefix }}-test-basic-tag" + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance_tag + check_mode: true + + - assert: + that: + - create_instance_tag is not failed + - create_instance_tag is changed + - '"instance_ids" not in create_instance_tag' + - '"ec2:RunInstances" not in create_instance_tag.resource_actions' + + - name: "Create a new instance with a different name in tags" + ec2_instance: + state: present + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + Name: "{{ resource_prefix }}-test-basic-tag" + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance_tag + + - assert: + that: + - create_instance_tag is not failed + - create_instance_tag is changed + - '"ec2:RunInstances" in create_instance_tag.resource_actions' + - '"instance_ids" in create_instance_tag' + - create_instance_tag.instance_ids | length == 1 + - create_instance_tag.instance_ids[0].startswith("i-") + - create_instance_tag.instance_ids[0] != create_instance_id_1 + - create_instance_tag.instance_ids[0] != create_instance_id_2 + + - name: "Save instance ID" + set_fact: + create_instance_id_tag: "{{ create_instance_tag.instance_ids[0] }}" + + - name: "Create a new instance with a different name in tags - idempotency (check_mode)" + ec2_instance: + state: present + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + Name: "{{ resource_prefix }}-test-basic-tag" + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance_tag + check_mode: true + + - assert: + that: + - create_instance_tag is not failed + - create_instance_tag is not changed + - '"ec2:RunInstances" not in create_instance_tag.resource_actions' + - '"instance_ids" in create_instance_tag' + - create_instance_tag.instance_ids | length == 1 + - create_instance_tag.instance_ids[0] == create_instance_id_tag + + - name: "Create a new instance with a different name in tags - idempotency" + ec2_instance: + state: present + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + Name: "{{ resource_prefix }}-test-basic-tag" + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance_tag + + - assert: + that: + - create_instance_tag is not failed + - create_instance_tag is not changed + - '"ec2:RunInstances" not in create_instance_tag.resource_actions' + - '"instance_ids" in create_instance_tag' + - create_instance_tag.instance_ids | length == 1 + - create_instance_tag.instance_ids[0] == create_instance_id_tag + +############################################################### + + - name: "Create a new instance in AZ {{ aws_region }}a" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-{{ aws_region }}a" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + region: "{{ aws_region }}" + availability_zone: "{{ aws_region }}a" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance + + - name: "Save instance ID" + set_fact: + create_instance_id_3: "{{ create_instance.instance_ids[0] }}" + + - name: Get instance info + ec2_instance_info: + instance_ids: + - "{{ create_instance_id_3 }}" + register: info_result + + - assert: + that: + - create_instance is not failed + - create_instance is changed + - '"ec2:RunInstances" in create_instance.resource_actions' + - '"instance_ids" in create_instance' + - create_instance.instance_ids | length == 1 + - create_instance.instance_ids[0].startswith("i-") + - info_result.instances[0].placement.availability_zone == '{{ aws_region }}a' + + - name: "Create a new instance in AZ {{ aws_region }}b" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-{{ aws_region }}b" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + region: "{{ aws_region }}" + availability_zone: "{{ aws_region }}b" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance + + - name: "Save instance ID" + set_fact: + create_instance_id_4: "{{ create_instance.instance_ids[0] }}" + + - name: Get instance info + ec2_instance_info: + instance_ids: + - "{{ create_instance_id_4 }}" + register: info_result + + - assert: + that: + - create_instance is not failed + - create_instance is changed + - '"ec2:RunInstances" in create_instance.resource_actions' + - '"instance_ids" in create_instance' + - create_instance.instance_ids | length == 1 + - create_instance.instance_ids[0].startswith("i-") + - info_result.instances[0].placement.availability_zone == '{{ aws_region }}b' + +################################################################ + + - name: "Terminate instance based on name parameter (check_mode)" + ec2_instance: + state: absent + name: "{{ resource_prefix }}-test-basic" + wait: true + register: terminate_name + check_mode: true + + - assert: + that: + - terminate_name is not failed + - terminate_name is changed + - '"ec2:TerminateInstances" not in terminate_name.resource_actions' + - '"terminate_failed" in terminate_name' + - '"terminate_success" in terminate_name' + - terminate_name.terminate_failed | length == 0 + - terminate_name.terminate_success | length == 1 + - terminate_name.terminate_success[0] == create_instance_id_1 + + - name: "Terminate instance based on name parameter" + ec2_instance: + state: absent + name: "{{ resource_prefix }}-test-basic" + wait: true + register: terminate_name + + - assert: + that: + - terminate_name is not failed + - terminate_name is changed + - '"ec2:TerminateInstances" in terminate_name.resource_actions' + - '"terminate_failed" in terminate_name' + - '"terminate_success" in terminate_name' + - terminate_name.terminate_failed | length == 0 + - terminate_name.terminate_success | length == 1 + - terminate_name.terminate_success[0] == create_instance_id_1 + + - name: "Terminate instance based on name parameter - idempotency (check_mode)" + ec2_instance: + state: absent + name: "{{ resource_prefix }}-test-basic" + wait: true + register: terminate_name + check_mode: true + + - assert: + that: + - terminate_name is not failed + - terminate_name is not changed + - '"ec2:TerminateInstances" not in terminate_name.resource_actions' + - '"terminate_failed" not in terminate_name' + - '"terminate_success" not in terminate_name' + + - name: "Terminate instance based on name parameter - idempotency" + ec2_instance: + state: absent + name: "{{ resource_prefix }}-test-basic" + wait: true + register: terminate_name + + - assert: + that: + - terminate_name is not failed + - terminate_name is not changed + - '"ec2:TerminateInstances" not in terminate_name.resource_actions' + - '"terminate_failed" not in terminate_name' + - '"terminate_success" not in terminate_name' + +################################################################ + + - name: "Terminate instance based on name tag (check_mode)" + ec2_instance: + state: absent + tags: + Name: "{{ resource_prefix }}-test-basic-tag" + wait: true + register: terminate_tag + check_mode: true + + - assert: + that: + - terminate_tag is not failed + - terminate_tag is changed + - '"ec2:TerminateInstances" not in terminate_tag.resource_actions' + - '"terminate_failed" in terminate_tag' + - '"terminate_success" in terminate_tag' + - terminate_tag.terminate_failed | length == 0 + - terminate_tag.terminate_success | length == 1 + - terminate_tag.terminate_success[0] == create_instance_id_tag + + - name: "Terminate instance based on name tag" + ec2_instance: + state: absent + tags: + Name: "{{ resource_prefix }}-test-basic-tag" + wait: true + register: terminate_tag + + - assert: + that: + - terminate_tag is not failed + - terminate_tag is changed + - '"ec2:TerminateInstances" in terminate_tag.resource_actions' + - '"terminate_failed" in terminate_tag' + - '"terminate_success" in terminate_tag' + - terminate_tag.terminate_failed | length == 0 + - terminate_tag.terminate_success | length == 1 + - terminate_tag.terminate_success[0] == create_instance_id_tag + + - name: "Terminate instance based on name tag - idempotency (check_mode)" + ec2_instance: + state: absent + tags: + Name: "{{ resource_prefix }}-test-basic-tag" + wait: true + register: terminate_tag + check_mode: true + + - assert: + that: + - terminate_tag is not failed + - terminate_tag is not changed + - '"ec2:TerminateInstances" not in terminate_tag.resource_actions' + - '"terminate_failed" not in terminate_tag' + - '"terminate_success" not in terminate_tag' + + - name: "Terminate instance based on name tag - idempotency" + ec2_instance: + state: absent + tags: + Name: "{{ resource_prefix }}-test-basic-tag" + wait: true + register: terminate_tag + + - assert: + that: + - terminate_tag is not failed + - terminate_tag is not changed + - '"ec2:TerminateInstances" not in terminate_tag.resource_actions' + - '"terminate_failed" not in terminate_tag' + - '"terminate_success" not in terminate_tag' + +################################################################ + + - name: "Terminate instance based on id (check_mode)" + ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_2 }}" + wait: true + register: terminate_id + check_mode: true + + - assert: + that: + - terminate_id is not failed + - terminate_id is changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" in terminate_id' + - '"terminate_success" in terminate_id' + - terminate_id.terminate_failed | length == 0 + - terminate_id.terminate_success | length == 1 + - terminate_id.terminate_success[0] == create_instance_id_2 + + - name: "Terminate instance based on id" + ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_2 }}" + wait: true + register: terminate_id + + - assert: + that: + - terminate_id is not failed + - terminate_id is changed + - '"ec2:TerminateInstances" in terminate_id.resource_actions' + - '"terminate_failed" in terminate_id' + - '"terminate_success" in terminate_id' + - terminate_id.terminate_failed | length == 0 + - terminate_id.terminate_success | length == 1 + - terminate_id.terminate_success[0] == create_instance_id_2 + + - name: "Terminate instance based on id - idempotency (check_mode)" + ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_2 }}" + wait: true + register: terminate_id + check_mode: true + + - assert: + that: + - terminate_id is not failed + - terminate_id is not changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" not in terminate_id' + - '"terminate_success" not in terminate_id' + + - name: "Terminate instance based on id - idempotency" + ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_2 }}" + wait: true + register: terminate_id + + - assert: + that: + - terminate_id is not failed + - terminate_id is not changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" not in terminate_id' + - '"terminate_success" not in terminate_id' + +################################################################ + + - name: "Terminate instance based on id (check_mode)" + ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_3 }}" + wait: true + register: terminate_id + check_mode: true + + - assert: + that: + - terminate_id is not failed + - terminate_id is changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" in terminate_id' + - '"terminate_success" in terminate_id' + - terminate_id.terminate_failed | length == 0 + - terminate_id.terminate_success | length == 1 + - terminate_id.terminate_success[0] == create_instance_id_3 + + - name: "Terminate instance based on id" + ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_3 }}" + wait: true + register: terminate_id + + - assert: + that: + - terminate_id is not failed + - terminate_id is changed + - '"ec2:TerminateInstances" in terminate_id.resource_actions' + - '"terminate_failed" in terminate_id' + - '"terminate_success" in terminate_id' + - terminate_id.terminate_failed | length == 0 + - terminate_id.terminate_success | length == 1 + - terminate_id.terminate_success[0] == create_instance_id_3 + + - name: "Terminate instance based on id - idempotency (check_mode)" + ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_3 }}" + wait: true + register: terminate_id + check_mode: true + + - assert: + that: + - terminate_id is not failed + - terminate_id is not changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" not in terminate_id' + - '"terminate_success" not in terminate_id' + + - name: "Terminate instance based on id - idempotency" + ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_3 }}" + wait: true + register: terminate_id + + - assert: + that: + - terminate_id is not failed + - terminate_id is not changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" not in terminate_id' + - '"terminate_success" not in terminate_id' + +################################################################ + + - name: "Terminate instance based on id (check_mode)" + ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_4 }}" + wait: true + register: terminate_id + check_mode: true + + - assert: + that: + - terminate_id is not failed + - terminate_id is changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" in terminate_id' + - '"terminate_success" in terminate_id' + - terminate_id.terminate_failed | length == 0 + - terminate_id.terminate_success | length == 1 + - terminate_id.terminate_success[0] == create_instance_id_4 + + - name: "Terminate instance based on id" + ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_4 }}" + wait: true + register: terminate_id + + - assert: + that: + - terminate_id is not failed + - terminate_id is changed + - '"ec2:TerminateInstances" in terminate_id.resource_actions' + - '"terminate_failed" in terminate_id' + - '"terminate_success" in terminate_id' + - terminate_id.terminate_failed | length == 0 + - terminate_id.terminate_success | length == 1 + - terminate_id.terminate_success[0] == create_instance_id_4 + + - name: "Terminate instance based on id - idempotency (check_mode)" + ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_4 }}" + wait: true + register: terminate_id + check_mode: true + + - assert: + that: + - terminate_id is not failed + - terminate_id is not changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" not in terminate_id' + - '"terminate_success" not in terminate_id' + + - name: "Terminate instance based on id - idempotency" + ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_4 }}" + wait: true + register: terminate_id + + - assert: + that: + - terminate_id is not failed + - terminate_id is not changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" not in terminate_id' + - '"terminate_success" not in terminate_id' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/aliases new file mode 100644 index 000000000..b81074d57 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/aliases @@ -0,0 +1,6 @@ +time=30m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/defaults/main.yml new file mode 100644 index 000000000..065610b00 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for ec2_instance_multiple +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-multiple' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/meta/main.yml new file mode 100644 index 000000000..c3ba887f7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/meta/main.yml @@ -0,0 +1,6 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: multiple_instances diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/tasks/main.yml new file mode 100644 index 000000000..911e4c170 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/tasks/main.yml @@ -0,0 +1,443 @@ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: +################################################################ + + - name: "Create multiple instance (check_mode)" + ec2_instance: + instance_type: "{{ ec2_instance_type }}" + count: 5 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + state: present + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + filters: + "tag:TestId": "{{ ec2_instance_tag_TestId }}" + register: create_multiple_instances + check_mode: true + + - assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is changed + - '"instance_ids" not in create_multiple_instances' + - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' + + - name: "Create multiple instances" + ec2_instance: + instance_type: "{{ ec2_instance_type }}" + count: 5 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + state: present + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + filters: + "tag:TestId": "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_multiple_instances + + - assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is changed + - '"ec2:RunInstances" in create_multiple_instances.resource_actions' + - '"instance_ids" in create_multiple_instances' + - create_multiple_instances.instance_ids | length == 5 + + - name: "Save instance IDs" + set_fact: + created_instance_ids: "{{ create_multiple_instances.instance_ids }}" + +# Terminate instances created in count test + + - name: "Terminate instance based on id (check_mode)" + ec2_instance: + state: absent + instance_ids: + - "{{ item }}" + register: terminate_id + check_mode: true + with_items: "{{ created_instance_ids }}" + + - assert: + that: + - terminate_id is not failed + - terminate_id is changed + + - name: "Terminate instance based on id" + ec2_instance: + state: absent + instance_ids: + - "{{ item }}" + wait: true + register: terminate_id + with_items: "{{ created_instance_ids }}" + + - assert: + that: + - terminate_id is not failed + - terminate_id is changed + + - name: "Terminate instance based on id - Idempotency (check_mode)" + ec2_instance: + state: absent + instance_ids: + - "{{ item }}" + register: terminate_id + check_mode: true + with_items: "{{ created_instance_ids }}" + + - assert: + that: + - terminate_id is not failed + - terminate_id is not changed + + - name: "Terminate instance based on id - Idempotency" + ec2_instance: + state: absent + instance_ids: + - "{{ item }}" + register: terminate_id + with_items: "{{ created_instance_ids }}" + + - assert: + that: + - terminate_id is not failed + - terminate_id is not changed + +################################################################ + + - name: "Enforce instance count - launch 5 instances (check_mode)" + ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 5 + region: "{{ aws_region }}" + name: "{{ resource_prefix }}-test-enf_cnt" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + register: create_multiple_instances + check_mode: true + + - assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is changed + - '"instance_ids" not in create_multiple_instances' + - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' + + - name: "Enforce instance count - launch 5 instances" + ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 5 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_multiple_instances + + - assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is changed + - '"ec2:RunInstances" in create_multiple_instances.resource_actions' + - '"instance_ids" in create_multiple_instances' + - create_multiple_instances.instance_ids | length == 5 + + - name: "Enforce instance count - launch 5 instances (check_mode - Idempotency)" + ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 5 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + register: create_multiple_instances + check_mode: true + + - assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is not changed + - '"instance_ids" in create_multiple_instances' + - create_multiple_instances.instance_ids | length == 5 + - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' + + - name: "Enforce instance count - launch 5 instances (Idempotency)" + ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 5 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_multiple_instances + + - assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is not changed + - '"instance_ids" in create_multiple_instances' + - create_multiple_instances.instance_ids | length == 5 + - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' + + - name: "Enforce instance count to 3 - Terminate 2 instances (check_mode)" + ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 3 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + register: terminate_multiple_instances + check_mode: true + + - assert: + that: + - terminate_multiple_instances is not failed + - terminate_multiple_instances is changed + - '"instance_ids" in terminate_multiple_instances' + - terminate_multiple_instances.instance_ids | length == 5 + - '"terminated_ids" in terminate_multiple_instances' + - terminate_multiple_instances.terminated_ids | length == 2 + - '"ec2:RunInstances" not in terminate_multiple_instances.resource_actions' + + - name: "Enforce instance count to 3 - Terminate 2 instances" + ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 3 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: terminate_multiple_instances + + - assert: + that: + - terminate_multiple_instances is not failed + - terminate_multiple_instances is changed + - '"instance_ids" in terminate_multiple_instances' + - terminate_multiple_instances.instance_ids | length == 5 + - '"terminated_ids" in terminate_multiple_instances' + - terminate_multiple_instances.terminated_ids | length == 2 + + - name: "Enforce instance count to 3 - Terminate 2 instances (check_mode - Idempotency)" + ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 3 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + register: terminate_multiple_instances + check_mode: true + + - assert: + that: + - terminate_multiple_instances is not failed + - terminate_multiple_instances is not changed + - '"instance_ids" in terminate_multiple_instances' + - terminate_multiple_instances.instance_ids | length == 3 + - '"terminated_ids" not in terminate_multiple_instances' + - '"ec2:TerminateInstances" not in terminate_multiple_instances.resource_actions' + + - name: "Enforce instance count to 3 - Terminate 2 instances (Idempotency)" + ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 3 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + register: terminate_multiple_instances + + - assert: + that: + - terminate_multiple_instances is not failed + - terminate_multiple_instances is not changed + - '"instance_ids" in terminate_multiple_instances' + - terminate_multiple_instances.instance_ids | length == 3 + - '"terminated_ids" not in terminate_multiple_instances' + - '"ec2:TerminateInstances" not in terminate_multiple_instances.resource_actions' + + - name: "Enforce instance count to 6 - Launch 3 more instances (check_mode)" + ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 6 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + check_mode: true + register: create_multiple_instances + + - assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is changed + - '"instance_ids" in create_multiple_instances' + - create_multiple_instances.instance_ids | length == 3 + - '"changed_ids" not in create_multiple_instances' + - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' + + - name: "Enforce instance count to 6 - Launch 3 more instances" + ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 6 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_multiple_instances + + - name: debug is here + debug: msg="{{ create_multiple_instances.instance_ids }}" + + - assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is changed + - '"instance_ids" in create_multiple_instances' + - create_multiple_instances.instance_ids | length == 6 + - '"changed_ids" in create_multiple_instances' + - create_multiple_instances.changed_ids | length == 3 + + - name: "Enforce instance count to 6 - Launch 3 more instances (check_mode - Idempotency)" + ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 6 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + check_mode: true + register: create_multiple_instances + + - assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is not changed + - '"instance_ids" in create_multiple_instances' + - create_multiple_instances.instance_ids | length == 6 + - '"changed_ids" not in create_multiple_instances' + - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' + + - name: "Enforce instance count to 6 - Launch 3 more instances (Idempotency)" + ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 6 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_multiple_instances + + - assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is not changed + - '"instance_ids" in create_multiple_instances' + - create_multiple_instances.instance_ids | length == 6 + - '"changed_ids" not in create_multiple_instances' + - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' + + + - name: Gather information about any running instance with Name ending with "-test-enf_cnt" + ec2_instance_info: + region: "{{ ec2_region }}" + filters: + "tag:Name": "*-test-enf_cnt" + instance-state-name: [ "running"] + register: test_instances + + - name: set fact + set_fact: test_instances_ids="{{ test_instances.instances[item].instance_id }}" + loop: "{{ range(0, test_instances.instances | length) | list }}" + register: test_instances_list + + - name: Make a list of ids + set_fact: instances_to_terminate="{{ test_instances_list.results | map(attribute='ansible_facts.test_instances_ids') | list }}" + +# Terminate instances created in enforce count test + + - name: "Terminate instance based on id (check_mode)" + ec2_instance: + state: absent + instance_ids: + - "{{ item }}" + wait: true + register: terminate_id + check_mode: true + with_items: "{{ instances_to_terminate }}" + + - assert: + that: + - terminate_id is not failed + - terminate_id is changed + + - name: "Terminate instance based on id" + ec2_instance: + state: absent + instance_ids: + - "{{ item }}" + wait: true + register: terminate_id + with_items: "{{ instances_to_terminate }}" + + - assert: + that: + - terminate_id is not failed + - terminate_id is changed + + - name: "Terminate instance based on id - Idempotency (check_mode)" + ec2_instance: + state: absent + instance_ids: + - "{{ item }}" + wait: true + register: terminate_id + check_mode: true + with_items: "{{ instances_to_terminate }}" + + - assert: + that: + - terminate_id is not failed + - terminate_id is not changed + + - name: "Terminate instance based on id - Idempotency" + ec2_instance: + state: absent + instance_ids: + - "{{ item }}" + wait: true + register: terminate_id + with_items: "{{ instances_to_terminate }}" + + - assert: + that: + - terminate_id is not failed + - terminate_id is not changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/aliases new file mode 100644 index 000000000..ca83d373d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/aliases @@ -0,0 +1,6 @@ +time=6m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/defaults/main.yml new file mode 100644 index 000000000..154ca799c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for ec2_instance_no_wait +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-no-wait' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/meta/main.yml new file mode 100644 index 000000000..3014864e5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/meta/main.yml @@ -0,0 +1,6 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: no_wait diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/tasks/main.yml new file mode 100644 index 000000000..f279e46c3 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/tasks/main.yml @@ -0,0 +1,58 @@ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "New instance and don't wait for it to complete" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-no-wait" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: false + instance_type: "{{ ec2_instance_type }}" + register: in_test_vpc + + - assert: + that: + - in_test_vpc is not failed + - in_test_vpc is changed + - in_test_vpc.instances is not defined + - in_test_vpc.instance_ids is defined + - in_test_vpc.instance_ids | length > 0 + + - name: "New instance and don't wait for it to complete ( check mode )" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-no-wait-checkmode" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: false + instance_type: "{{ ec2_instance_type }}" + check_mode: yes + + - name: "Facts for ec2 test instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-no-wait" + register: real_instance_fact + until: real_instance_fact.instances | length > 0 + retries: 10 + + - name: "Facts for checkmode ec2 test instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-no-wait-checkmode" + register: checkmode_instance_fact + + - name: "Confirm whether the check mode is working normally." + assert: + that: + - "{{ real_instance_fact.instances | length }} > 0" + - "{{ checkmode_instance_fact.instances | length }} == 0" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/aliases new file mode 100644 index 000000000..ca83d373d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/aliases @@ -0,0 +1,6 @@ +time=6m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/defaults/main.yml new file mode 100644 index 000000000..07d18b5a8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for ec2_instance_metadata_options +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-metadata' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/meta/main.yml new file mode 100644 index 000000000..78ebf425e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/meta/main.yml @@ -0,0 +1,9 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_botocore_pip + vars: + botocore_version: 1.23.30 +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: metadata diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/tasks/main.yml new file mode 100644 index 000000000..57d588151 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/tasks/main.yml @@ -0,0 +1,98 @@ +- name: test with boto3 version that does not support instance_metadata_tags + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "fail create t3.nano instance with metadata_options" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-t3nano-enabled-required" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + instance_type: t3.nano + metadata_options: + http_endpoint: enabled + http_tokens: required + instance_metadata_tags: enabled + wait: false + ignore_errors: yes + register: instance_creation + + - name: verify fail instance with metadata_options because insufficient boto3 requirements + assert: + that: + - instance_creation is failed + - instance_creation is not changed + - "'This is required to set instance_metadata_tags' in instance_creation.msg" + +- name: test with boto3 version that supports instance_metadata_tags + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "create t3.nano instance with metadata_options" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-t3nano-enabled-required" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + instance_type: t3.nano + metadata_options: + http_endpoint: enabled + http_tokens: required + instance_metadata_tags: enabled + wait: false + register: instance_creation + + - name: "instance with metadata_options created with the right options" + assert: + that: + - instance_creation is success + - instance_creation is changed + - "'{{ instance_creation.spec.MetadataOptions.HttpEndpoint }}' == 'enabled'" + - "'{{ instance_creation.spec.MetadataOptions.HttpTokens }}' == 'required'" + - "'{{ instance_creation.spec.MetadataOptions.InstanceMetadataTags }}' == 'enabled'" + + - name: "modify metadata_options on existing instance" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-t3nano-enabled-required" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + instance_type: t3.nano + metadata_options: + http_endpoint: enabled + http_tokens: optional + wait: false + register: metadata_options_update + ignore_errors: yes + + - name: "fact presented ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-t3nano-enabled-required" + register: presented_instance_fact + + - name: "modify metadata_options has no effect on existing instance" + assert: + that: + - metadata_options_update is success + - metadata_options_update is not changed + - "{{ presented_instance_fact.instances | length }} > 0" + - "'{{ presented_instance_fact.instances.0.state.name }}' in ['running','pending']" + - "'{{ presented_instance_fact.instances.0.metadata_options.http_endpoint }}' == 'enabled'" + - "'{{ presented_instance_fact.instances.0.metadata_options.http_tokens }}' == 'required'" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/aliases new file mode 100644 index 000000000..ca83d373d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/aliases @@ -0,0 +1,6 @@ +time=6m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/defaults/main.yml new file mode 100644 index 000000000..3645fcabd --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for ec2_instance_security_group +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-sg' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/meta/main.yml new file mode 100644 index 000000000..2c8aa2e43 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/meta/main.yml @@ -0,0 +1,6 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: security_groups diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/tasks/main.yml new file mode 100644 index 000000000..47b1c963e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/tasks/main.yml @@ -0,0 +1,87 @@ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "New instance with 2 security groups" + ec2_instance: + name: "{{ resource_prefix }}-test-security-groups" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ resource_prefix }}" + instance_type: t2.micro + wait: false + security_groups: + - "{{ sg.group_id }}" + - "{{ sg2.group_id }}" + register: security_groups_test + + - name: "Recreate same instance with 2 security groups ( Idempotency )" + ec2_instance: + name: "{{ resource_prefix }}-test-security-groups" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ resource_prefix }}" + instance_type: t2.micro + wait: false + security_groups: + - "{{ sg.group_id }}" + - "{{ sg2.group_id }}" + register: security_groups_test_idempotency + + - name: "Gather ec2 facts to check SGs have been added" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-security-groups" + "instance-state-name": "running" + register: dual_sg_instance_facts + until: dual_sg_instance_facts.instances | length > 0 + retries: 10 + + - name: "Remove secondary security group from instance" + ec2_instance: + name: "{{ resource_prefix }}-test-security-groups" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ resource_prefix }}" + instance_type: t2.micro + security_groups: + - "{{ sg.group_id }}" + register: remove_secondary_security_group + + - name: "Gather ec2 facts to check seconday SG has been removed" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-security-groups" + "instance-state-name": "running" + register: single_sg_instance_facts + until: single_sg_instance_facts.instances | length > 0 + retries: 10 + + - name: "Add secondary security group to instance" + ec2_instance: + name: "{{ resource_prefix }}-test-security-groups" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ resource_prefix }}" + instance_type: t2.micro + security_groups: + - "{{ sg.group_id }}" + - "{{ sg2.group_id }}" + register: add_secondary_security_group + + - assert: + that: + - security_groups_test is not failed + - security_groups_test is changed + - security_groups_test_idempotency is not changed + - remove_secondary_security_group is changed + - single_sg_instance_facts.instances.0.security_groups | length == 1 + - dual_sg_instance_facts.instances.0.security_groups | length == 2 + - add_secondary_security_group is changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/aliases new file mode 100644 index 000000000..ca83d373d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/aliases @@ -0,0 +1,6 @@ +time=6m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/defaults/main.yml new file mode 100644 index 000000000..269677f92 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for ec2_instance_state_config_updates +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-state-config-updates' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/meta/main.yml new file mode 100644 index 000000000..c9fdd98d9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/meta/main.yml @@ -0,0 +1,6 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: state_config_updates diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/tasks/main.yml new file mode 100644 index 000000000..0d5d5a5c2 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/tasks/main.yml @@ -0,0 +1,133 @@ +# Test that configuration changes, like security groups and instance attributes, +# are updated correctly when the instance has different states, and also when +# changing the state of an instance. +# https://github.com/ansible-collections/community.aws/issues/16 +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "Make instance with sg and termination protection enabled" + ec2_instance: + state: running + name: "{{ resource_prefix }}-test-state-param-changes" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + termination_protection: False + instance_type: "{{ ec2_instance_type }}" + wait: True + register: create_result + + - assert: + that: + - create_result is not failed + - create_result.changed + - '"instances" in create_result' + - '"instance_ids" in create_result' + - '"spec" in create_result' + - create_result.instances[0].security_groups[0].group_id == "{{ sg.group_id }}" + - create_result.spec.DisableApiTermination == False + + - name: "Change sg and termination protection while instance is in state running" + ec2_instance: + state: running + name: "{{ resource_prefix }}-test-state-param-changes" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_groups: "{{ sg2.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + termination_protection: True + instance_type: "{{ ec2_instance_type }}" + register: change_params_result + + - assert: + that: + - change_params_result is not failed + - change_params_result.changed + - '"instances" in change_params_result' + - '"instance_ids" in change_params_result' + - '"changes" in change_params_result' + - change_params_result.instances[0].security_groups[0].group_id == "{{ sg2.group_id }}" + - change_params_result.changes[0].DisableApiTermination.Value == True + - change_params_result.changes[1].Groups[0] == "{{ sg2.group_id }}" # TODO fix this to be less fragile + + + - name: "Change instance state from running to stopped, and change sg and termination protection" + ec2_instance: + state: stopped + name: "{{ resource_prefix }}-test-state-param-changes" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + termination_protection: False + instance_type: "{{ ec2_instance_type }}" + register: change_state_params_result + + - assert: + that: + - change_state_params_result is not failed + - change_state_params_result.changed + - '"instances" in change_state_params_result' + - '"instance_ids" in change_state_params_result' + - '"changes" in change_state_params_result' + - '"stop_success" in change_state_params_result' + - '"stop_failed" in change_state_params_result' + - change_state_params_result.instances[0].security_groups[0].group_id == "{{ sg.group_id }}" + - change_state_params_result.changes[0].DisableApiTermination.Value == False + + - name: "Change sg and termination protection while instance is in state stopped" + ec2_instance: + state: stopped + name: "{{ resource_prefix }}-test-state-param-changes" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_groups: "{{ sg2.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + termination_protection: True + instance_type: "{{ ec2_instance_type }}" + register: change_params_stopped_result + + - assert: + that: + - change_params_stopped_result is not failed + - change_params_stopped_result.changed + - '"instances" in change_params_stopped_result' + - '"instance_ids" in change_params_stopped_result' + - '"changes" in change_params_stopped_result' + - change_params_stopped_result.instances[0].security_groups[0].group_id == "{{ sg2.group_id }}" + - change_params_stopped_result.changes[0].DisableApiTermination.Value == True + + - name: "Change instance state from stopped to running, and change sg and termination protection" + ec2_instance: + state: running + name: "{{ resource_prefix }}-test-state-param-changes" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + termination_protection: False + instance_type: "{{ ec2_instance_type }}" + wait: True + register: change_params_start_result + + - assert: + that: + - change_params_start_result is not failed + - change_params_start_result.changed + - '"instances" in change_params_start_result' + - '"instance_ids" in change_params_start_result' + - '"changes" in change_params_start_result' + - '"start_success" in change_params_start_result' + - '"start_failed" in change_params_start_result' + - change_params_start_result.instances[0].security_groups[0].group_id == "{{ sg.group_id }}" + - change_params_start_result.changes[0].DisableApiTermination.Value == False diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/aliases new file mode 100644 index 000000000..ca83d373d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/aliases @@ -0,0 +1,6 @@ +time=6m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults/main.yml new file mode 100644 index 000000000..0c09a7aab --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for ec2_instance_tags_and_vpc_settings +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-tags-vpc' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta/main.yml new file mode 100644 index 000000000..3a3510065 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta/main.yml @@ -0,0 +1,6 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: tags_and_vpc diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks/main.yml new file mode 100644 index 000000000..71551ef29 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks/main.yml @@ -0,0 +1,179 @@ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "Make instance in the testing subnet created in the test VPC" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-vpc-create" + image_id: "{{ ec2_ami_id }}" + user_data: | + #cloud-config + package_upgrade: true + package_update: true + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + Something: else + security_groups: "{{ sg.group_id }}" + network: + source_dest_check: false + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + instance_type: "{{ ec2_instance_type }}" + wait: false + register: in_test_vpc + + - name: "Make instance in the testing subnet created in the test VPC(check mode)" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-vpc-create-checkmode" + image_id: "{{ ec2_ami_id }}" + user_data: | + #cloud-config + package_upgrade: true + package_update: true + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + Something: else + security_groups: "{{ sg.group_id }}" + network: + source_dest_check: false + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + instance_type: "{{ ec2_instance_type }}" + check_mode: yes + + - name: "Try to re-make the instance, hopefully this shows changed=False" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-vpc-create" + image_id: "{{ ec2_ami_id }}" + user_data: | + #cloud-config + package_upgrade: true + package_update: true + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + Something: else + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + instance_type: "{{ ec2_instance_type }}" + register: remake_in_test_vpc + - name: "Remaking the same instance resulted in no changes" + assert: + that: not remake_in_test_vpc.changed + - name: "check that instance IDs match anyway" + assert: + that: 'remake_in_test_vpc.instance_ids[0] == in_test_vpc.instance_ids[0]' + - name: "check that source_dest_check was set to false" + assert: + that: 'not remake_in_test_vpc.instances[0].source_dest_check' + + - name: "fact presented ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-basic-vpc-create" + register: presented_instance_fact + + - name: "fact checkmode ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-basic-vpc-create-checkmode" + register: checkmode_instance_fact + + - name: "Confirm whether the check mode is working normally." + assert: + that: + - "{{ presented_instance_fact.instances | length }} > 0" + - "{{ checkmode_instance_fact.instances | length }} == 0" + + - name: "Alter it by adding tags" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-vpc-create" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + Another: thing + purge_tags: false + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + instance_type: "{{ ec2_instance_type }}" + register: add_another_tag + + - ec2_instance_info: + instance_ids: "{{ add_another_tag.instance_ids }}" + register: check_tags + - name: "Remaking the same instance resulted in no changes" + assert: + that: + - check_tags.instances[0].tags.Another == 'thing' + - check_tags.instances[0].tags.Something == 'else' + + - name: "Purge a tag" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-vpc-create" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + Another: thing + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + instance_type: "{{ ec2_instance_type }}" + + - ec2_instance_info: + instance_ids: "{{ add_another_tag.instance_ids }}" + register: check_tags + + - name: "Remaking the same instance resulted in no changes" + assert: + that: + - "'Something' not in check_tags.instances[0].tags" + + - name: "check that subnet-default public IP rule was followed" + assert: + that: + - check_tags.instances[0].public_dns_name == "" + - check_tags.instances[0].private_ip_address.startswith(subnet_b_startswith) + - check_tags.instances[0].subnet_id == testing_subnet_b.subnet.id + - name: "check that tags were applied" + assert: + that: + - check_tags.instances[0].tags.Name.startswith(resource_prefix) + - "'{{ check_tags.instances[0].state.name }}' in ['pending', 'running']" + + - name: "Try setting purge_tags to True without specifiying tags (should NOT purge tags)" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-vpc-create" + image_id: "{{ ec2_ami_id }}" + purge_tags: true + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + instance_type: "{{ ec2_instance_type }}" + register: _purge_tags_without_tags + + - name: Assert tags were not purged + assert: + that: + - _purge_tags_without_tags.instances[0].tags | length > 1 + + - name: "Purge all tags (aside from Name)" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-vpc-create" + image_id: "{{ ec2_ami_id }}" + purge_tags: true + tags: {} + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + instance_type: "{{ ec2_instance_type }}" + register: _purge_tags + + - name: Assert tags were purged + assert: + that: + - _purge_tags.instances[0].tags | length == 1 + - _purge_tags.instances[0].tags.Name.startswith(resource_prefix) diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/aliases new file mode 100644 index 000000000..ca83d373d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/aliases @@ -0,0 +1,6 @@ +time=6m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/defaults/main.yml new file mode 100644 index 000000000..a5cac7423 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for ec2_instance_termination_protection +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-temination' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/meta/main.yml new file mode 100644 index 000000000..b75f3dd58 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/meta/main.yml @@ -0,0 +1,6 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: terminaion_protection diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/tasks/main.yml new file mode 100644 index 000000000..4c888592b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/tasks/main.yml @@ -0,0 +1,250 @@ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: Create instance with termination protection (check mode) + ec2_instance: + name: "{{ resource_prefix }}-termination-protection" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ resource_prefix }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + termination_protection: true + instance_type: "{{ ec2_instance_type }}" + state: running + wait: yes + check_mode: yes + register: create_instance_check_mode_results + + - name: Check the returned value for the earlier task + assert: + that: + - create_instance_check_mode_results is changed + - create_instance_check_mode_results.spec.DisableApiTermination == True + + - name: Create instance with termination protection + ec2_instance: + name: "{{ resource_prefix }}-termination-protection" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ resource_prefix }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + termination_protection: true + instance_type: "{{ ec2_instance_type }}" + state: running + wait: yes + register: create_instance_results + + - set_fact: + instance_id: '{{ create_instance_results.instances[0].instance_id }}' + + - name: Check return values of the create instance task + assert: + that: + - "{{ create_instance_results.instances | length }} > 0" + - "'{{ create_instance_results.instances.0.state.name }}' == 'running'" + - "'{{ create_instance_results.spec.DisableApiTermination }}'" + + - name: Get info on termination protection + command: 'aws ec2 describe-instance-attribute --attribute disableApiTermination --instance-id {{ instance_id }}' + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" + register: instance_termination_check + + - name: convert it to an object + set_fact: + instance_termination_status: "{{ instance_termination_check.stdout | from_json }}" + + - name: Assert termination protection status did not change in check_mode + assert: + that: + - instance_termination_status.DisableApiTermination.Value == true + + - name: Create instance with termination protection (check mode) (idempotent) + ec2_instance: + name: "{{ resource_prefix }}-termination-protection" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ resource_prefix }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + termination_protection: true + instance_type: "{{ ec2_instance_type }}" + state: running + wait: yes + check_mode: yes + register: create_instance_check_mode_results + + - name: Check the returned value for the earlier task + assert: + that: + - create_instance_check_mode_results is not changed + + - name: Create instance with termination protection (idempotent) + ec2_instance: + name: "{{ resource_prefix }}-termination-protection" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ resource_prefix }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + termination_protection: true + instance_type: "{{ ec2_instance_type }}" + state: running + wait: yes + register: create_instance_results + + - name: Check return values of the create instance task + assert: + that: + - "{{ not create_instance_results.changed }}" + - "{{ create_instance_results.instances | length }} > 0" + + - name: Try to terminate the instance (expected to fail) + ec2_instance: + filters: + tag:Name: "{{ resource_prefix }}-termination-protection" + state: absent + failed_when: "'Unable to terminate instances' not in terminate_instance_results.msg" + register: terminate_instance_results + + - name: Set termination protection to false (check_mode) + ec2_instance: + name: "{{ resource_prefix }}-termination-protection" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ resource_prefix }}" + termination_protection: false + instance_type: "{{ ec2_instance_type }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + check_mode: True + register: set_termination_protectioncheck_mode_results + + - name: Check return value + assert: + that: + - "{{ set_termination_protectioncheck_mode_results.changed }}" + + - name: Get info on termination protection + command: 'aws ec2 describe-instance-attribute --attribute disableApiTermination --instance-id {{ instance_id }}' + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" + register: instance_termination_check + + - name: convert it to an object + set_fact: + instance_termination_status: "{{ instance_termination_check.stdout | from_json }}" + + - assert: + that: + - instance_termination_status.DisableApiTermination.Value == true + + - name: Set termination protection to false + ec2_instance: + name: "{{ resource_prefix }}-termination-protection" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ resource_prefix }}" + termination_protection: false + instance_type: "{{ ec2_instance_type }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + register: set_termination_protection_results + + - name: Check return value + assert: + that: + - set_termination_protection_results.changed + + - name: Get info on termination protection + command: 'aws ec2 describe-instance-attribute --attribute disableApiTermination --instance-id {{ instance_id }}' + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" + register: instance_termination_check + + - name: convert it to an object + set_fact: + instance_termination_status: "{{ instance_termination_check.stdout | from_json }}" + + - assert: + that: + - instance_termination_status.DisableApiTermination.Value == false + + - name: Set termination protection to false (idempotent) + ec2_instance: + name: "{{ resource_prefix }}-termination-protection" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ resource_prefix }}" + termination_protection: false + instance_type: "{{ ec2_instance_type }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + register: set_termination_protection_results + + - name: Check return value + assert: + that: + - "{{ not set_termination_protection_results.changed }}" + + - name: Set termination protection to true + ec2_instance: + name: "{{ resource_prefix }}-termination-protection" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ resource_prefix }}" + termination_protection: true + instance_type: "{{ ec2_instance_type }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + register: set_termination_protection_results + + - name: Check return value + assert: + that: + - "{{ set_termination_protection_results.changed }}" + - "{{ set_termination_protection_results.changes[0].DisableApiTermination.Value }}" + + - name: Set termination protection to true (idempotent) + ec2_instance: + name: "{{ resource_prefix }}-termination-protection" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ resource_prefix }}" + termination_protection: true + instance_type: "{{ ec2_instance_type }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + register: set_termination_protection_results + + - name: Check return value + assert: + that: + - "{{ not set_termination_protection_results.changed }}" + + - name: Set termination protection to false (so we can terminate instance) + ec2_instance: + name: "{{ resource_prefix }}-termination-protection" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ resource_prefix }}" + termination_protection: false + instance_type: "{{ ec2_instance_type }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + register: set_termination_protection_results + + - name: Terminate the instance + ec2_instance: + filters: + tag:TestId: "{{ resource_prefix }}" + state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/aliases new file mode 100644 index 000000000..ca83d373d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/aliases @@ -0,0 +1,6 @@ +time=6m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/defaults/main.yml new file mode 100644 index 000000000..a51f9bf2c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for ec2_instance_uptime +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-uptime' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/meta/main.yml new file mode 100644 index 000000000..6651aa834 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/meta/main.yml @@ -0,0 +1,6 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env + vars: + ec2_instance_test_name: uptime diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/tasks/main.yml new file mode 100644 index 000000000..6f7cf38dd --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/tasks/main.yml @@ -0,0 +1,63 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "create t3.nano instance" + ec2_instance: + name: "{{ resource_prefix }}-test-uptime" + region: "{{ ec2_region }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + instance_type: t3.nano + wait: yes + + - name: "check ec2 instance" + ec2_instance_info: + filters: + "tag:Name": "{{ resource_prefix }}-test-uptime" + instance-state-name: [ "running"] + register: instance_facts + + - name: "Confirm existence of instance id." + assert: + that: + - "{{ instance_facts.instances | length }} == 1" + + - name: "check using uptime 100 hours - should find nothing" + ec2_instance_info: + region: "{{ ec2_region }}" + uptime: 6000 + filters: + instance-state-name: [ "running"] + "tag:Name": "{{ resource_prefix }}-test-uptime" + register: instance_facts + + - name: "Confirm there is no running instance" + assert: + that: + - "{{ instance_facts.instances | length }} == 0" + + - name: Sleep for 61 seconds and continue with play + wait_for: + timeout: 61 + delegate_to: localhost + + - name: "check using uptime 1 minute" + ec2_instance_info: + region: "{{ ec2_region }}" + uptime: 1 + filters: + instance-state-name: [ "running"] + "tag:Name": "{{ resource_prefix }}-test-uptime" + register: instance_facts + + - name: "Confirm there is one running instance" + assert: + that: + - "{{ instance_facts.instances | length }} == 1" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/aliases new file mode 100644 index 000000000..e1a28da55 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/aliases @@ -0,0 +1,5 @@ +# reason: missing-dependency +# We need either the openssl binary, pycrpto, or a compiler on the Py36 and Py38 +# Zuul nodes +# https://github.com/ansible-collections/amazon.aws/issues/428 +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/defaults/main.yml new file mode 100644 index 000000000..df0082d99 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# defaults file for test_ec2_key +ec2_key_name: '{{resource_prefix}}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/meta/main.yml new file mode 100644 index 000000000..d9abc1110 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/meta/main.yml @@ -0,0 +1,5 @@ +dependencies: + - setup_sshkey + - role: setup_botocore_pip + vars: + botocore_version: '1.21.23' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/tasks/main.yml new file mode 100644 index 000000000..8aa461039 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/tasks/main.yml @@ -0,0 +1,461 @@ +--- +# TODO - name: test 'validate_certs' parameter +# ============================================================= + +- module_defaults: + group/aws: + region: '{{ aws_region }}' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + block: + + # ============================================================ + - name: test with no parameters + ec2_key: + register: result + ignore_errors: true + + - name: assert failure when called with no parameters + assert: + that: + - 'result.failed' + - 'result.msg == "missing required arguments: name"' + + # ============================================================ + - name: test removing a non-existent key pair (check mode) + ec2_key: + name: '{{ ec2_key_name }}' + state: absent + register: result + check_mode: true + + - name: assert removing a non-existent key pair + assert: + that: + - 'not result.changed' + + - name: test removing a non-existent key pair + ec2_key: + name: '{{ ec2_key_name }}' + state: absent + register: result + + - name: assert removing a non-existent key pair + assert: + that: + - 'not result.changed' + + # ============================================================ + - name: test creating a new key pair (check_mode) + ec2_key: + name: '{{ ec2_key_name }}' + state: present + tags: + snake_case: 'a_snake_case_value' + CamelCase: 'CamelCaseValue' + "spaced key": 'Spaced value' + register: result + check_mode: true + + - name: assert creating a new key pair + assert: + that: + - result is changed + + - name: test creating a new key pair + ec2_key: + name: '{{ ec2_key_name }}' + state: present + tags: + snake_case: 'a_snake_case_value' + CamelCase: 'CamelCaseValue' + "spaced key": 'Spaced value' + register: result + + - name: assert creating a new key pair + assert: + that: + - result is changed + - '"key" in result' + - '"name" in result.key' + - '"fingerprint" in result.key' + - '"private_key" in result.key' + - '"id" in result.key' + - '"tags" in result.key' + - result.key.name == ec2_key_name + - result.key.id.startswith('key-') + - '"snake_case" in result.key.tags' + - result.key.tags['snake_case'] == 'a_snake_case_value' + - '"CamelCase" in result.key.tags' + - result.key.tags['CamelCase'] == 'CamelCaseValue' + - '"spaced key" in result.key.tags' + - result.key.tags['spaced key'] == 'Spaced value' + + - set_fact: + key_id_1: '{{ result.key.id }}' + + - name: 'test re-"creating" the same key (check_mode)' + ec2_key: + name: '{{ ec2_key_name }}' + state: present + tags: + snake_case: 'a_snake_case_value' + CamelCase: 'CamelCaseValue' + "spaced key": 'Spaced value' + register: result + check_mode: true + + - name: assert re-creating the same key + assert: + that: + - result is not changed + + - name: 'test re-"creating" the same key' + ec2_key: + name: '{{ ec2_key_name }}' + state: present + tags: + snake_case: 'a_snake_case_value' + CamelCase: 'CamelCaseValue' + "spaced key": 'Spaced value' + register: result + + # ============================================================ + - name: test updating tags without purge (check mode) + ec2_key: + name: '{{ ec2_key_name }}' + state: present + tags: + newKey: 'Another value' + purge_tags: false + register: result + check_mode: true + + - name: assert updated tags + assert: + that: + - result is changed + + - name: test updating tags without purge + ec2_key: + name: '{{ ec2_key_name }}' + state: present + tags: + newKey: 'Another value' + purge_tags: false + register: result + + - name: assert updated tags + assert: + that: + - result is changed + - '"key" in result' + - '"name" in result.key' + - '"fingerprint" in result.key' + - '"private_key" not in result.key' + - '"id" in result.key' + - result.key.id == key_id_1 + - '"tags" in result.key' + - result.key.name == ec2_key_name + - '"snake_case" in result.key.tags' + - result.key.tags['snake_case'] == 'a_snake_case_value' + - '"CamelCase" in result.key.tags' + - result.key.tags['CamelCase'] == 'CamelCaseValue' + - '"spaced key" in result.key.tags' + - result.key.tags['spaced key'] == 'Spaced value' + - '"newKey" in result.key.tags' + - result.key.tags['newKey'] == 'Another value' + + - name: test updating tags without purge - idempotency (check mode) + ec2_key: + name: '{{ ec2_key_name }}' + state: present + tags: + newKey: 'Another value' + purge_tags: false + register: result + check_mode: true + + - name: assert updated tags + assert: + that: + - result is not changed + + - name: test updating tags without purge - idempotency + ec2_key: + name: '{{ ec2_key_name }}' + state: present + tags: + newKey: 'Another value' + purge_tags: false + register: result + + - name: assert updated tags + assert: + that: + - result is not changed + - '"key" in result' + - '"name" in result.key' + - '"fingerprint" in result.key' + - '"private_key" not in result.key' + - '"id" in result.key' + - '"tags" in result.key' + - result.key.name == ec2_key_name + - result.key.id == key_id_1 + - '"snake_case" in result.key.tags' + - result.key.tags['snake_case'] == 'a_snake_case_value' + - '"CamelCase" in result.key.tags' + - result.key.tags['CamelCase'] == 'CamelCaseValue' + - '"spaced key" in result.key.tags' + - result.key.tags['spaced key'] == 'Spaced value' + - '"newKey" in result.key.tags' + - result.key.tags['newKey'] == 'Another value' + + # ============================================================ + - name: test updating tags with purge (check mode) + ec2_key: + name: '{{ ec2_key_name }}' + state: present + tags: + newKey: 'Another value' + purge_tags: true + register: result + check_mode: true + + - name: assert updated tags + assert: + that: + - result is changed + + - name: test updating tags with purge + ec2_key: + name: '{{ ec2_key_name }}' + state: present + tags: + newKey: 'Another value' + purge_tags: true + register: result + + - name: assert updated tags + assert: + that: + - result is changed + - '"key" in result' + - '"name" in result.key' + - '"fingerprint" in result.key' + - '"private_key" not in result.key' + - '"id" in result.key' + - result.key.id == key_id_1 + - '"tags" in result.key' + - result.key.name == ec2_key_name + - '"snake_case" not in result.key.tags' + - '"CamelCase" not in result.key.tags' + - '"spaced key" not in result.key.tags' + - '"newKey" in result.key.tags' + - result.key.tags['newKey'] == 'Another value' + + - name: test updating tags with purge - idempotency (check mode) + ec2_key: + name: '{{ ec2_key_name }}' + state: present + tags: + newKey: 'Another value' + purge_tags: true + register: result + check_mode: true + + - name: assert updated tags + assert: + that: + - result is not changed + + - name: test updating tags with purge - idempotency + ec2_key: + name: '{{ ec2_key_name }}' + state: present + tags: + newKey: 'Another value' + purge_tags: true + register: result + + - name: assert updated tags + assert: + that: + - result is not changed + - '"key" in result' + - '"name" in result.key' + - '"fingerprint" in result.key' + - '"private_key" not in result.key' + - '"id" in result.key' + - '"tags" in result.key' + - result.key.name == ec2_key_name + - result.key.id == key_id_1 + - '"snake_case" not in result.key.tags' + - '"CamelCase" not in result.key.tags' + - '"spaced key" not in result.key.tags' + - '"newKey" in result.key.tags' + - result.key.tags['newKey'] == 'Another value' + + # ============================================================ + - name: test removing an existent key (check mode) + ec2_key: + name: '{{ ec2_key_name }}' + state: absent + register: result + check_mode: true + + - name: assert removing an existent key + assert: + that: + - result is changed + + - name: test removing an existent key + ec2_key: + name: '{{ ec2_key_name }}' + state: absent + register: result + + - name: assert removing an existent key + assert: + that: + - result is changed + - '"key" in result' + - result.key == None + + # ============================================================ + - name: test state=present with key_material + ec2_key: + name: '{{ ec2_key_name }}' + key_material: '{{ key_material }}' + state: present + register: result + + - name: assert state=present with key_material + assert: + that: + - 'result.changed == True' + - '"key" in result' + - '"name" in result.key' + - '"fingerprint" in result.key' + - '"private_key" not in result.key' + - '"id" in result.key' + - '"tags" in result.key' + - 'result.key.name == "{{ec2_key_name}}"' + - 'result.key.fingerprint == "{{fingerprint}}"' + + # ============================================================ + - name: test state=present with key_material (idempotency) + ec2_key: + name: '{{ ec2_key_name }}' + key_material: '{{ key_material }}' + state: present + register: result + + - name: assert state=present with key_material + assert: + that: + - result is not changed + - '"key" in result' + - '"name" in result.key' + - '"fingerprint" in result.key' + - '"private_key" not in result.key' + - '"id" in result.key' + - '"tags" in result.key' + - 'result.key.name == "{{ec2_key_name}}"' + - 'result.key.fingerprint == "{{fingerprint}}"' + - 'result.msg == "key pair already exists"' + + # ============================================================ + + - name: test force=no with another_key_material (expect changed=false) + ec2_key: + name: '{{ ec2_key_name }}' + key_material: '{{ another_key_material }}' + force: no + register: result + + - name: assert force=no with another_key_material (expect changed=false) + assert: + that: + - 'not result.changed' + - 'result.key.fingerprint == "{{ fingerprint }}"' + + # ============================================================ + + - name: test updating a key pair using another_key_material (expect changed=True) + ec2_key: + name: '{{ ec2_key_name }}' + key_material: '{{ another_key_material }}' + register: result + + - name: assert updating a key pair using another_key_material (expect changed=True) + assert: + that: + - 'result.changed' + - 'result.key.fingerprint != "{{ fingerprint }}"' + + # ============================================================ + - name: test state=absent (expect changed=true) + ec2_key: + name: '{{ ec2_key_name }}' + state: absent + register: result + + - name: assert state=absent with key_material (expect changed=true) + assert: + that: + - 'result.changed' + - '"key" in result' + - 'result.key == None' + + # ============================================================ + - name: test create ED25519 key pair type with botocore <= 1.21.23 + ec2_key: + name: '{{ ec2_key_name }}' + key_type: ed25519 + ignore_errors: true + register: result + + - name: assert that task failed + assert: + that: + - 'result.failed' + - '"Failed to import the required Python library (botocore>=1.21.23)" in result.msg' + - '"This is required to set the key_type for a keypair" in result.msg' + + - name: test create ED25519 key pair type + ec2_key: + name: '{{ ec2_key_name }}' + key_type: ed25519 + register: result + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - name: assert that task succeed + assert: + that: + - 'result.changed' + - 'result.key.type == "ed25519"' + + - name: Update key pair type from ED25519 to RSA + ec2_key: + name: '{{ ec2_key_name }}' + key_type: rsa + register: result + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - name: assert that task succeed + assert: + that: + - 'result.changed' + - 'result.key.type == "rsa"' + + always: + + # ============================================================ + - name: Always delete the key we might create + ec2_key: + name: '{{ ec2_key_name }}' + state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/aliases new file mode 100644 index 000000000..65a419c87 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/aliases @@ -0,0 +1,5 @@ +# very dependent on how quickly the instance comes up, varies between 5m and 10m +time=10m + +non_local +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/meta/main.yml new file mode 100644 index 000000000..445013b49 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/meta/main.yml @@ -0,0 +1,7 @@ +dependencies: + - setup_ec2_facts + - setup_sshkey + #required for run_instances with MetadataOptions.InstanceMetadataTags + - role: setup_botocore_pip + vars: + botocore_version: '1.23.30' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml new file mode 100644 index 000000000..11c623a33 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml @@ -0,0 +1,182 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + + hosts: localhost + + collections: + - amzon.aws + - community.aws + + vars: + vpc_name: '{{ resource_prefix }}-vpc' + vpc_seed: '{{ resource_prefix }}' + vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16' + subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24' + + tasks: + - set_fact: + # As lookup plugins don't have access to module_defaults + connection_args: + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token | default(omit) }}" + + - include_role: + name: '../setup_sshkey' + - include_role: + name: '../setup_ec2_facts' + + - include_role: + name: '../setup_botocore_pip' + vars: + botocore_version: '1.23.30' + + - set_fact: + availability_zone: '{{ ec2_availability_zone_names[0] }}' + + # ============================================================ + - name: create a VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: present + cidr_block: "{{ vpc_cidr }}" + tags: + Name: "{{ resource_prefix }}-vpc" + Description: "Created by ansible-test" + register: vpc_result + + - set_fact: + vpc_id: "{{ vpc_result.vpc.id }}" + + - name: create an internet gateway + ec2_vpc_igw: + vpc_id: "{{ vpc_id }}" + state: present + tags: + "Name": "{{ resource_prefix }}" + register: igw_result + + - name: create a subnet + ec2_vpc_subnet: + cidr: "{{ vpc_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_id }}" + tags: + Name: "{{ resource_prefix }}-vpc" + Description: "Created by ansible-test" + state: present + register: vpc_subnet_result + + - name: create a public route table + ec2_vpc_route_table: + vpc_id: "{{ vpc_id }}" + tags: + "Name": "{{ resource_prefix }}" + subnets: + - "{{ vpc_subnet_result.subnet.id }}" + routes: + - dest: 0.0.0.0/0 + gateway_id: "{{ igw_result.gateway_id }}" + register: public_route_table + + - name: create a security group + ec2_group: + name: "{{ resource_prefix }}-sg" + description: "Created by {{ resource_prefix }}" + rules: + - proto: tcp + ports: 22 + cidr_ip: 0.0.0.0/0 + - proto: icmp + from_port: -1 + to_port: -1 + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + register: vpc_sg_result + + - name: Create a key + ec2_key: + name: '{{ resource_prefix }}' + key_material: '{{ key_material }}' + state: present + register: ec2_key_result + + - name: Set facts to simplify use of extra resources + set_fact: + vpc_subnet_id: "{{ vpc_subnet_result.subnet.id }}" + vpc_sg_id: "{{ vpc_sg_result.group_id }}" + vpc_igw_id: "{{ igw_result.gateway_id }}" + vpc_route_table_id: "{{ public_route_table.route_table.id }}" + ec2_key_name: "{{ ec2_key_result.key.name }}" + + - name: Create an instance to test with + ec2_instance: + state: running + name: "{{ resource_prefix }}-ec2-metadata-facts" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ vpc_subnet_id }}" + security_group: "{{ vpc_sg_id }}" + instance_type: t2.micro + key_name: "{{ ec2_key_name }}" + network: + assign_public_ip: true + delete_on_termination: true + metadata_options: + instance_metadata_tags: enabled + tags: + snake_case_key: a_snake_case_value + camelCaseKey: aCamelCaseValue + register: ec2_instance + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - set_fact: + ec2_ami_id_py2: "{{ lookup('aws_ssm', '/aws/service/ami-amazon-linux-latest/amzn-ami-hvm-x86_64-gp2', **connection_args) }}" + ec2_ami_ssh_user_py2: "ec2-user" + + - name: Create an instance to test with using Python 2 + ec2_instance: + state: running + name: "{{ resource_prefix }}-ec2-metadata-facts-py2" + image_id: "{{ ec2_ami_id_py2 }}" + vpc_subnet_id: "{{ vpc_subnet_id }}" + security_group: "{{ vpc_sg_id }}" + instance_type: t2.micro + key_name: "{{ ec2_key_name }}" + network: + assign_public_ip: true + delete_on_termination: true + metadata_options: + instance_metadata_tags: enabled + tags: + snake_case_key: a_snake_case_value + camelCaseKey: aCamelCaseValue + wait: True + register: ec2_instance_py2 + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - set_fact: + ec2_instance_id: "{{ ec2_instance.instances[0].instance_id }}" + ec2_instance_id_py2: "{{ ec2_instance_py2.instances[0].instance_id }}" + + - name: Create inventory file + template: + src: ../templates/inventory.j2 + dest: ../inventory + + - wait_for: + port: 22 + host: '{{ ec2_instance.instances[0].public_ip_address }}' + timeout: 1200 + + - wait_for: + port: 22 + host: '{{ ec2_instance_py2.instances[0].public_ip_address }}' + timeout: 1200 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml new file mode 100644 index 000000000..11ddf88ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml @@ -0,0 +1,84 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + + hosts: localhost + + collections: + - amazon.aws + - community.aws + + tasks: + # ============================================================ + + - name: terminate the instance + ec2_instance: + state: absent + instance_ids: + - "{{ ec2_instance_id }}" + - "{{ ec2_instance_id_py2 }}" + wait: True + ignore_errors: true + retries: 5 + register: remove + until: remove is successful + + - name: remove ssh key + ec2_key: + name: "{{ ec2_key_name }}" + state: absent + ignore_errors: true + + - name: remove the public route table + ec2_vpc_route_table: + vpc_id: "{{ vpc_id }}" + route_table_id: "{{ vpc_route_table_id }}" + lookup: id + state: absent + ignore_errors: true + retries: 5 + register: remove + until: remove is successful + + - name: remove the internet gateway + ec2_vpc_igw: + vpc_id: "{{ vpc_id }}" + state: absent + ignore_errors: true + retries: 5 + register: remove + until: remove is successful + + - name: remove the security group + ec2_group: + group_id: "{{ vpc_sg_id }}" + state: absent + ignore_errors: true + retries: 5 + register: remove + until: remove is successful + + - name: remove the subnet + ec2_vpc_subnet: + cidr: "{{ vpc_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_id }}" + state: absent + ignore_errors: true + retries: 5 + register: remove + until: remove is successful + + - name: remove the VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + cidr_block: "{{ vpc_cidr }}" + state: absent + ignore_errors: true + retries: 5 + register: remove + until: remove is successful diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml new file mode 100644 index 000000000..eba96f916 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml @@ -0,0 +1,18 @@ +--- +- hosts: testhost + tasks: + + - name: Wait for EC2 to be available + wait_for_connection: + + - amazon.aws.ec2_metadata_facts: + + - name: Assert initial metadata for the instance + assert: + that: + - ansible_ec2_ami_id == image_id + - ansible_ec2_placement_availability_zone == availability_zone + - ansible_ec2_security_groups == "{{ resource_prefix }}-sg" + - ansible_ec2_user_data == "None" + - ansible_ec2_instance_tags_keys is defined + - ansible_ec2_instance_tags_keys | length == 3 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/runme.sh new file mode 100755 index 000000000..6f2bc4660 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/runme.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -eux +export ANSIBLE_HOST_KEY_CHECKING=False +export ANSIBLE_SSH_ARGS='-o UserKnownHostsFile=/dev/null' + +CMD_ARGS=("$@") + +# Destroy Environment +cleanup() { + ansible-playbook playbooks/teardown.yml -i inventory -c local "${CMD_ARGS[@]}" +} +trap "cleanup" EXIT + +# create test resources and inventory +ansible-playbook playbooks/setup.yml -c local "$@" + +# test ec2_instance_metadata +ansible-playbook playbooks/test_metadata.yml -i inventory \ + -e local_tmp=/tmp/ansible-local \ + -e remote_tmp=/tmp/ansible-remote \ + "$@" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/templates/inventory.j2 b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/templates/inventory.j2 new file mode 100644 index 000000000..86ec99287 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/templates/inventory.j2 @@ -0,0 +1,34 @@ +[testhost_py3] +"{{ ec2_instance.instances[0].public_ip_address }}" + +[testhost_py2] +"{{ ec2_instance_py2.instances[0].public_ip_address }}" + +[testhost:children] +testhost_py3 +testhost_py2 + +[testhost:vars] +ansible_ssh_private_key_file="{{ sshkey }}" +ansible_python_interpreter=/usr/bin/env python + +[testhost_py3:vars] +ansible_user="{{ ec2_ami_ssh_user }}" +image_id="{{ ec2_ami_id }}" + +[testhost_py2:vars] +ansible_user="{{ ec2_ami_ssh_user_py2 }}" +image_id="{{ ec2_ami_id_py2 }}" + +[all:vars] +# Template vars that will need to be used in used in tests and teardown +vpc_id="{{ vpc_id }}" +vpc_subnet_id="{{ vpc_subnet_id }}" +vpc_sg_id="{{ vpc_sg_id }}" +vpc_cidr="{{ vpc_cidr }}" +vpc_igw="{{ vpc_igw_id }}" +vpc_route_table_id="{{ vpc_route_table_id }}" +ec2_key_name="{{ ec2_key_name }}" +availability_zone="{{ availability_zone }}" +ec2_instance_id="{{ ec2_instance_id }}" +ec2_instance_id_py2="{{ ec2_instance_id_py2 }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/aliases new file mode 100644 index 000000000..115fded97 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/aliases @@ -0,0 +1,6 @@ +# duration: 15 +slow + +cloud/aws + +ec2_security_group_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/defaults/main.yml new file mode 100644 index 000000000..f17a67a51 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/defaults/main.yml @@ -0,0 +1,7 @@ +--- +# defaults file for test_ec2_group +ec2_group_name: '{{resource_prefix}}' +ec2_group_description: 'Created by ansible integration tests' + +vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' +subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/data_validation.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/data_validation.yml new file mode 100644 index 000000000..c461287d9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/data_validation.yml @@ -0,0 +1,33 @@ +--- +- block: + - name: Create a group with only the default rule + ec2_group: + name: '{{ec2_group_name}}-input-tests' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + + - name: Run through some common weird port specs + ec2_group: + name: '{{ec2_group_name}}-input-tests' + description: '{{ec2_group_description}}' + rules: + - "{{ item }}" + with_items: + - proto: tcp + from_port: "8182" + to_port: 8182 + cidr_ipv6: "fc00:ff9b::/96" + rule_desc: Mixed string and non-string ports + - proto: tcp + ports: + - "9000" + - 9001 + - 9002-9005 + cidr_ip: "10.2.3.0/24" + always: + - name: tidy up input testing group + ec2_group: + name: '{{ec2_group_name}}-input-tests' + vpc_id: '{{ vpc_result.vpc.id }}' + state: absent + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/diff_mode.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/diff_mode.yml new file mode 100644 index 000000000..e687bad23 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/diff_mode.yml @@ -0,0 +1,167 @@ +--- + # ============================================================ + + - name: create a group with a rule (CHECK MODE + DIFF) + ec2_group: + name: '{{ ec2_group_name }}' + description: '{{ ec2_group_description }}' + state: present + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + rules_egress: + - proto: all + cidr_ip: 0.0.0.0/0 + register: check_mode_result + check_mode: true + diff: true + + - assert: + that: + - check_mode_result.changed + + - name: create a group with a rule (DIFF) + ec2_group: + name: '{{ ec2_group_name }}' + description: '{{ ec2_group_description }}' + state: present + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + rules_egress: + - proto: all + cidr_ip: 0.0.0.0/0 + register: result + diff: true + + - assert: + that: + - result.changed + - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions + - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress + + - name: add rules to make sorting occur (CHECK MODE + DIFF) + ec2_group: + name: '{{ ec2_group_name }}' + description: '{{ ec2_group_description }}' + state: present + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 172.16.0.0/12 + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 10.0.0.0/8 + rules_egress: + - proto: all + cidr_ip: 0.0.0.0/0 + register: check_mode_result + check_mode: true + diff: true + + - assert: + that: + - check_mode_result.changed + + - name: add rules in a different order to test sorting consistency (DIFF) + ec2_group: + name: '{{ ec2_group_name }}' + description: '{{ ec2_group_description }}' + state: present + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 172.16.0.0/12 + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 10.0.0.0/8 + rules_egress: + - proto: all + cidr_ip: 0.0.0.0/0 + register: result + diff: true + + - assert: + that: + - result.changed + - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions + - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress + + - name: purge rules (CHECK MODE + DIFF) + ec2_group: + name: '{{ ec2_group_name }}' + description: '{{ ec2_group_description }}' + state: present + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + rules_egress: [] + register: check_mode_result + check_mode: true + diff: true + + - assert: + that: + - check_mode_result.changed + + - name: purge rules (DIFF) + ec2_group: + name: '{{ ec2_group_name }}' + description: '{{ ec2_group_description }}' + state: present + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + rules_egress: [] + register: result + diff: true + + - assert: + that: + - result.changed + - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions + - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress + + - name: delete the security group (CHECK MODE + DIFF) + ec2_group: + name: '{{ ec2_group_name }}' + state: absent + register: check_mode_result + diff: true + check_mode: true + + - assert: + that: + - check_mode_result.changed + + - name: delete the security group (DIFF) + ec2_group: + name: '{{ ec2_group_name }}' + state: absent + register: result + diff: true + + - assert: + that: + - result.changed + - not result.diff.0.after and not check_mode_result.diff.0.after diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/egress_tests.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/egress_tests.yml new file mode 100644 index 000000000..5635f4434 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/egress_tests.yml @@ -0,0 +1,177 @@ +--- +- block: + - name: Create a group with only the default rule + ec2_group: + name: '{{ec2_group_name}}-egress-tests' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + state: present + register: result + + - name: assert default rule is in place (expected changed=true) + assert: + that: + - result is changed + - result.ip_permissions|length == 0 + - result.ip_permissions_egress|length == 1 + - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0' + + - name: Create a group with only the default rule + ec2_group: + name: '{{ec2_group_name}}-egress-tests' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + purge_rules_egress: false + state: present + register: result + + - name: assert default rule is not purged (expected changed=false) + assert: + that: + - result is not changed + - result.ip_permissions|length == 0 + - result.ip_permissions_egress|length == 1 + - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0' + + - name: Pass empty egress rules without purging, should leave default rule in place + ec2_group: + name: '{{ec2_group_name}}-egress-tests' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + purge_rules_egress: false + rules_egress: [] + state: present + register: result + + - name: assert default rule is not purged (expected changed=false) + assert: + that: + - result is not changed + - result.ip_permissions|length == 0 + - result.ip_permissions_egress|length == 1 + - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0' + + - name: Purge rules, including the default + ec2_group: + name: '{{ec2_group_name}}-egress-tests' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + purge_rules_egress: true + rules_egress: [] + state: present + register: result + + - name: assert default rule is not purged (expected changed=false) + assert: + that: + - result is changed + - result.ip_permissions|length == 0 + - result.ip_permissions_egress|length == 0 + + - name: Add a custom egress rule + ec2_group: + name: '{{ec2_group_name}}-egress-tests' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + rules_egress: + - proto: tcp + ports: + - 1212 + cidr_ip: 10.2.1.2/32 + state: present + register: result + + - name: assert first rule is here + assert: + that: + - result.ip_permissions_egress|length == 1 + + - name: Add a second custom egress rule + ec2_group: + name: '{{ec2_group_name}}-egress-tests' + description: '{{ec2_group_description}}' + purge_rules_egress: false + vpc_id: '{{ vpc_result.vpc.id }}' + rules_egress: + - proto: tcp + ports: + - 2323 + cidr_ip: 10.3.2.3/32 + state: present + register: result + + - name: assert the first rule is not purged + assert: + that: + - result.ip_permissions_egress|length == 2 + + - name: Purge the second rule (CHECK MODE) (DIFF MODE) + ec2_group: + name: '{{ec2_group_name}}-egress-tests' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + rules_egress: + - proto: tcp + ports: + - 1212 + cidr_ip: 10.2.1.2/32 + state: present + register: result + check_mode: True + diff: True + + - name: assert first rule will be left + assert: + that: + - result.changed + - result.diff.0.after.ip_permissions_egress|length == 1 + - result.diff.0.after.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '10.2.1.2/32' + + - name: Purge the second rule + ec2_group: + name: '{{ec2_group_name}}-egress-tests' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + rules_egress: + - proto: tcp + ports: + - 1212 + cidr_ip: 10.2.1.2/32 + state: present + register: result + + - name: assert first rule is here + assert: + that: + - result.ip_permissions_egress|length == 1 + - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '10.2.1.2/32' + + - name: add a rule for all TCP ports + ec2_group: + name: '{{ec2_group_name}}-egress-tests' + description: '{{ec2_group_description}}' + rules_egress: + - proto: tcp + ports: 0-65535 + cidr_ip: 0.0.0.0/0 + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + register: result + + - name: Re-add the default rule + ec2_group: + name: '{{ec2_group_name}}-egress-tests' + description: '{{ec2_group_description}}' + rules_egress: + - proto: -1 + cidr_ip: 0.0.0.0/0 + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + register: result + always: + - name: tidy up egress rule test security group + ec2_group: + name: '{{ec2_group_name}}-egress-tests' + state: absent + vpc_id: '{{ vpc_result.vpc.id }}' + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/group_info.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/group_info.yml new file mode 100644 index 000000000..86c8a5460 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/group_info.yml @@ -0,0 +1,96 @@ +--- + +# file for testing the ec2_group_info module + +- block: + # ======================== Setup ===================================== + - name: Create a group for testing group info retrieval below + ec2_group: + name: '{{ ec2_group_name }}-info-1' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ ec2_group_description }}' + rules: + - proto: tcp + ports: + - 90 + cidr_ip: 10.2.2.2/32 + tags: + test: '{{ resource_prefix }}_ec2_group_info_module' + register: group_info_test_setup + + - name: Create another group for testing group info retrieval below + ec2_group: + name: '{{ ec2_group_name }}-info-2' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ ec2_group_description }}' + rules: + - proto: tcp + ports: + - 91 + cidr_ip: 10.2.2.2/32 + + # ========================= ec2_group_info tests ==================== + + - name: Retrieve security group info based on SG name + ec2_group_info: + filters: + group-name: '{{ ec2_group_name }}-info-2' + register: result_1 + + - name: Assert results found + assert: + that: + - result_1.security_groups is defined + - (result_1.security_groups|first).group_name == '{{ ec2_group_name }}-info-2' + + - name: Retrieve security group info based on SG VPC + ec2_group_info: + filters: + vpc-id: '{{ vpc_result.vpc.id }}' + register: result_2 + + - name: Assert results found + assert: + that: + - result_2.security_groups is defined + - (result_2.security_groups|first).vpc_id == vpc_result.vpc.id + - (result_2.security_groups|length) > 2 + + - name: Retrieve security group info based on SG tags + ec2_group_info: + filters: + "tag:test": "{{ resource_prefix }}_ec2_group_info_module" + register: result_3 + + - name: Assert results found + assert: + that: + - result_3.security_groups is defined + - (result_3.security_groups|first).group_id == group_info_test_setup.group_id + + - name: Retrieve security group info based on SG ID + ec2_group_info: + filters: + group-id: '{{ group_info_test_setup.group_id }}' + register: result_4 + + - name: Assert correct result found + assert: + that: + - result_4.security_groups is defined + - (result_4.security_groups|first).group_id == group_info_test_setup.group_id + - (result_4.security_groups|length) == 1 + + always: + # ========================= Cleanup ================================= + - name: tidy up test security group 1 + ec2_group: + name: '{{ ec2_group_name }}-info-1' + state: absent + ignore_errors: yes + + - name: tidy up test security group 2 + ec2_group: + name: '{{ ec2_group_name }}-info-2' + state: absent + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/icmp_verbs.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/icmp_verbs.yml new file mode 100644 index 000000000..a4f1d3947 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/icmp_verbs.yml @@ -0,0 +1,221 @@ +--- +- block: + # ============================================================ + - name: Create simple rule using icmp verbs + ec2_group: + name: '{{ec2_group_name}}-icmp-1' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + rules: + - proto: "icmp" + icmp_type: 3 + icmp_code: 8 + cidr_ip: + - 10.0.0.0/8 + - 172.16.40.10/32 + state: present + register: result + + - name: Retrieve security group info + ec2_group_info: + filters: + group-name: '{{ ec2_group_name }}-icmp-1' + register: result_1 + + - assert: + that: + - result is changed + - result_1.security_groups is defined + - (result_1.security_groups|first).group_name == '{{ ec2_group_name }}-icmp-1' + - (result_1.security_groups|first).ip_permissions[0].ip_protocol == "icmp" + + - name: Create ipv6 rule using icmp verbs + ec2_group: + name: '{{ec2_group_name}}-icmp-2' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + rules: + - proto: "icmpv6" + icmp_type: 1 + icmp_code: 4 + cidr_ipv6: "64:ff9b::/96" + state: present + register: result + + - name: Retrieve security group info + ec2_group_info: + filters: + group-name: '{{ ec2_group_name }}-icmp-2' + register: result_1 + + - assert: + that: + - result is changed + - result_1.security_groups is defined + - (result_1.security_groups|first).group_name == '{{ ec2_group_name }}-icmp-2' + - (result_1.security_groups|first).ip_permissions[0].ip_protocol == "icmpv6" + + + - name: Create rule using security group referencing + ec2_group: + name: '{{ec2_group_name}}-icmp-3' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + rules: + - proto: "icmp" + icmp_type: 5 + icmp_code: 1 + group_name: '{{ec2_group_name}}-auto-create-2' + group_desc: "sg-group-referencing" + state: present + register: result + + - name: Retrieve security group info + ec2_group_info: + filters: + group-name: '{{ ec2_group_name }}-icmp-3' + register: result_1 + + - assert: + that: + - result is changed + - (result_1.security_groups | first).ip_permissions[0].user_id_group_pairs is defined + + - name: Create list rule using 0 as icmp_type + ec2_group: + name: '{{ec2_group_name}}-icmp-4' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + rules: + - proto: icmp + icmp_type: 0 + icmp_code: 1 + cidr_ip: + - 10.0.0.0/8 + - 172.16.40.10/32 + - proto: "tcp" + from_port: 80 + to_port: 80 + cidr_ip: 172.16.40.10/32 + state: present + register: result + + - name: Retrieve security group info + ec2_group_info: + filters: + group-name: '{{ ec2_group_name }}-icmp-4' + register: result_1 + + - assert: + that: + - result is changed + - (result_1.security_groups | first).ip_permissions | length == 2 + # ============================================================ + + # ============================================================ + - name: Create a group with non-ICMP protocol + ec2_group: + name: '{{ec2_group_name}}-icmp-4' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + rules: + - proto: "tcp" + icmp_type: 0 + icmp_code: 1 + cidr_ip: + - 10.0.0.0/8 + - 172.16.40.10/32 + state: present + register: result + ignore_errors: true + + - name: assert that group creation fails when proto != icmp with icmp parameters + assert: + that: + - result is failed + + - name: Create a group with conflicting parameters + ec2_group: + name: '{{ec2_group_name}}-icmp-4' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + rules: + - proto: icmp + from_port: 5 + to_port: 1 + icmp_type: 0 + icmp_code: 1 + cidr_ip: + - 10.0.0.0/8 + - 172.16.40.10/32 + state: present + register: result + ignore_errors: true + + - name: assert that group creation fails when using conflicting parameters + assert: + that: + - result is failed + + - name: Create a group with missing icmp parameters + ec2_group: + name: '{{ec2_group_name}}-icmp-4' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + rules: + - proto: "tcp" + icmp_type: 0 + cidr_ip: + - 10.0.0.0/8 + - 172.16.40.10/32 + state: present + register: result + ignore_errors: true + + - name: assert that group creation fails when missing icmp parameters + assert: + that: + - result is failed + + always: + - name: tidy up egress rule test security group rules + ec2_group: + name: '{{ec2_group_name}}-auto-create-2' + description: 'sg-group-referencing' + vpc_id: '{{ vpc_result.vpc.id }}' + rules: [] + rules_egress: [] + ignore_errors: yes + + - name: tidy up egress rule test security group rules + ec2_group: + name: '{{ec2_group_name}}-icmp-{{ item }}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + rules: [] + rules_egress: [] + ignore_errors: yes + with_items: + - 1 + - 2 + - 3 + - 4 + + - name: tidy up egress rule test security group rules + ec2_group: + name: '{{ec2_group_name}}-auto-create-2' + state: absent + vpc_id: '{{ vpc_result.vpc.id }}' + ignore_errors: yes + + - name: tidy up egress rule test security group + ec2_group: + name: '{{ec2_group_name}}-icmp-{{ item }}' + state: absent + vpc_id: '{{ vpc_result.vpc.id }}' + ignore_errors: yes + with_items: + - 1 + - 2 + - 3 + - 4 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/ipv6_default_tests.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/ipv6_default_tests.yml new file mode 100644 index 000000000..2dea42a64 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/ipv6_default_tests.yml @@ -0,0 +1,90 @@ +--- +# ============================================================ +- name: test state=present for ipv6 (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: "64:ff9b::/96" + check_mode: true + register: result + +- name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + +# ============================================================ +- name: test state=present for ipv6 (expected changed=true) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: "64:ff9b::/96" + register: result + +- name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.group_id.startswith("sg-")' + +# ============================================================ +- name: test rules_egress state=present for ipv6 (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: "64:ff9b::/96" + rules_egress: + - proto: "tcp" + from_port: 8181 + to_port: 8181 + cidr_ipv6: "64:ff9b::/96" + check_mode: true + register: result + +- name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + +# ============================================================ +- name: test rules_egress state=present for ipv6 (expected changed=true) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: "64:ff9b::/96" + rules_egress: + - proto: "tcp" + from_port: 8181 + to_port: 8181 + cidr_ipv6: "64:ff9b::/96" + register: result + +- name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.group_id.startswith("sg-")' +- name: delete it + ec2_group: + name: '{{ec2_group_name}}' + state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/main.yml new file mode 100644 index 000000000..fa0ab9496 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/main.yml @@ -0,0 +1,1368 @@ +--- +- set_fact: + # lookup plugins don't have access to module_defaults + connection_args: + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token | default(omit) }}" + no_log: True + +# ============================================================ +- name: Run all tests + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit)}}" + region: "{{ aws_region }}" + block: + - name: determine if there is a default VPC + set_fact: + defaultvpc: "{{ lookup('amazon.aws.aws_account_attribute', attribute='default-vpc', **connection_args) }}" + register: default_vpc + + - name: create a VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: present + cidr_block: "{{ vpc_cidr }}" + tags: + Name: "{{ resource_prefix }}-vpc" + Description: "Created by ansible-test" + register: vpc_result + #TODO(ryansb): Update CI for VPC peering permissions + #- include: ./multi_account.yml + - include: ./diff_mode.yml + - include: ./numeric_protos.yml + - include: ./rule_group_create.yml + - include: ./egress_tests.yml + - include: ./icmp_verbs.yml + - include: ./data_validation.yml + - include: ./multi_nested_target.yml + - include: ./group_info.yml + + # ============================================================ + - name: test state=absent (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: absent + check_mode: true + register: result + + - name: assert no changes would be made + assert: + that: + - not result.changed + + # =========================================================== + - name: test state=absent + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: absent + register: result + + # ============================================================ + - name: test state=present (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + check_mode: true + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + + # ============================================================ + - name: test state=present (expected changed=true) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.group_id.startswith("sg-")' + + # ============================================================ + - name: test state=present different description (expected changed=false) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}CHANGED' + state: present + check_mode: true + register: result + + - name: assert state=present (expected changed=false) + assert: + that: + - 'not result.changed' + + # ============================================================ + - name: test state=present different description (expected changed=false) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}CHANGED' + state: present + ignore_errors: true + register: result + + - name: assert state=present (expected changed=false) + assert: + that: + - 'not result.changed' + - 'result.group_id.startswith("sg-")' + + # ============================================================ + - name: test state=present (expected changed=false) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + register: result + + - name: assert state=present (expected changed=false) + assert: + that: + - 'not result.changed' + - 'result.group_id.startswith("sg-")' + + # ============================================================ + - name: tests IPv6 with the default VPC + include: ./ipv6_default_tests.yml + when: default_vpc + + - name: test IPv6 with a specified VPC + block: + + # ============================================================ + - name: test state=present (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ ec2_group_name }}-2' + description: '{{ ec2_group_description }}-2' + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + check_mode: true + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + + # ============================================================ + - name: test state=present (expected changed=true) + ec2_group: + name: '{{ ec2_group_name }}-2' + description: '{{ ec2_group_description }}-2' + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.group_id.startswith("sg-")' + + # ============================================================ + - name: test state=present for ipv6 (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ ec2_group_name }}-2' + description: '{{ ec2_group_description }}-2' + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: "64:ff9b::/96" + check_mode: true + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + + # ============================================================ + - name: test state=present for ipv6 (expected changed=true) + ec2_group: + name: '{{ ec2_group_name }}-2' + description: '{{ ec2_group_description }}-2' + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: "64:ff9b::/96" + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.group_id.startswith("sg-")' + + # ============================================================ + - name: test state=present for ipv6 (expected changed=false) (CHECK MODE) + ec2_group: + name: '{{ ec2_group_name }}-2' + description: '{{ ec2_group_description }}-2' + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: "64:ff9b::/96" + check_mode: true + register: result + + - name: assert nothing changed + assert: + that: + - 'not result.changed' + + # ============================================================ + - name: test state=present for ipv6 (expected changed=false) + ec2_group: + name: '{{ ec2_group_name }}-2' + description: '{{ ec2_group_description }}-2' + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: "64:ff9b::/96" + register: result + + - name: assert nothing changed + assert: + that: + - 'not result.changed' + + # ============================================================ + - name: test rules_egress state=present for ipv6 (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ ec2_group_name }}-2' + description: '{{ ec2_group_description }}-2' + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: "64:ff9b::/96" + rules_egress: + - proto: "tcp" + from_port: 8181 + to_port: 8181 + cidr_ipv6: "64:ff9b::/96" + check_mode: true + diff: true + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.diff.0.before.ip_permissions == result.diff.0.after.ip_permissions' + - 'result.diff.0.before.ip_permissions_egress != result.diff.0.after.ip_permissions_egress' + + # ============================================================ + - name: test rules_egress state=present for ipv6 (expected changed=true) + ec2_group: + name: '{{ ec2_group_name }}-2' + description: '{{ ec2_group_description }}-2' + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: "64:ff9b::/96" + rules_egress: + - proto: "tcp" + from_port: 8181 + to_port: 8181 + cidr_ipv6: "64:ff9b::/96" + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.group_id.startswith("sg-")' + + # ============================================================ + - name: test state=absent (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ ec2_group_name }}-2' + description: '{{ ec2_group_description }}-2' + state: absent + vpc_id: '{{ vpc_result.vpc.id }}' + check_mode: true + diff: true + register: result + + - name: assert group was removed + assert: + that: + - 'result.changed' + - 'not result.diff.0.after' + + # ============================================================ + - name: test state=absent (expected changed=true) + ec2_group: + name: '{{ ec2_group_name }}-2' + description: '{{ ec2_group_description }}-2' + state: absent + vpc_id: '{{ vpc_result.vpc.id }}' + register: result + + - name: assert group was removed + assert: + that: + - 'result.changed' + + # ============================================================ + - name: test state=present for ipv4 (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ip: "10.1.1.1/32" + check_mode: true + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + + # ============================================================ + - name: test state=present for ipv4 (expected changed=true) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ip: "10.1.1.1/32" + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.group_id.startswith("sg-")' + - 'result.ip_permissions|length == 1' + - 'result.ip_permissions_egress|length == 1' + + # ============================================================ + - name: add same rule to the existing group (expected changed=false) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ip: "10.1.1.1/32" + check_mode: true + diff: true + register: check_result + + - assert: + that: + - not check_result.changed + - check_result.diff.0.before.ip_permissions.0 == check_result.diff.0.after.ip_permissions.0 + + # ============================================================ + - name: add same rule to the existing group (expected changed=false) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ip: "10.1.1.1/32" + register: result + + - name: assert state=present (expected changed=false) + assert: + that: + - 'not result.changed' + - 'result.group_id.startswith("sg-")' + + - name: assert state=present (expected changed=false) + assert: + that: + - 'not check_result.changed' + + # ============================================================ + - name: add a rule that auto creates another security group (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + purge_rules: no + rules: + - proto: "tcp" + group_name: "{{ resource_prefix }} - Another security group" + group_desc: Another security group + ports: 7171 + check_mode: true + register: result + + - name: check that there are now two rules + assert: + that: + - result.changed + + # ============================================================ + - name: add a rule that auto creates another security group + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + purge_rules: no + rules: + - proto: "tcp" + group_name: "{{ resource_prefix }} - Another security group" + group_desc: Another security group + ports: 7171 + register: result + + - name: check that there are now two rules + assert: + that: + - result.changed + - result.warning is not defined + - result.ip_permissions|length == 2 + - result.ip_permissions[0].user_id_group_pairs or + result.ip_permissions[1].user_id_group_pairs + - 'result.ip_permissions_egress[0].ip_protocol == "-1"' + + # ============================================================ + - name: test ip rules convert port numbers from string to int (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + rules: + - proto: "tcp" + from_port: "8183" + to_port: "8183" + cidr_ip: "10.1.1.1/32" + rules_egress: + - proto: "tcp" + from_port: "8184" + to_port: "8184" + cidr_ip: "10.1.1.1/32" + check_mode: true + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + + # ============================================================ + - name: test ip rules convert port numbers from string to int (expected changed=true) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + rules: + - proto: "tcp" + from_port: "8183" + to_port: "8183" + cidr_ip: "10.1.1.1/32" + rules_egress: + - proto: "tcp" + from_port: "8184" + to_port: "8184" + cidr_ip: "10.1.1.1/32" + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.group_id.startswith("sg-")' + - 'result.ip_permissions|length == 1' + - 'result.ip_permissions_egress[0].ip_protocol == "tcp"' + + + # ============================================================ + - name: test group rules convert port numbers from string to int (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + rules: + - proto: "tcp" + from_port: "8185" + to_port: "8185" + group_id: "{{result.group_id}}" + rules_egress: + - proto: "tcp" + from_port: "8186" + to_port: "8186" + group_id: "{{result.group_id}}" + check_mode: true + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + + # ============================================================ + - name: test group rules convert port numbers from string to int (expected changed=true) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + rules: + - proto: "tcp" + from_port: "8185" + to_port: "8185" + group_id: "{{result.group_id}}" + rules_egress: + - proto: "tcp" + from_port: "8186" + to_port: "8186" + group_id: "{{result.group_id}}" + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.group_id.startswith("sg-")' + - result.warning is not defined + + # ============================================================ + - name: test adding a range of ports and ports given as strings (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + # set purge_rules to false so we don't get a false positive from previously added rules + purge_rules: false + rules: + - proto: "tcp" + ports: + - 8183-8190 + - '8192' + cidr_ip: 10.1.1.1/32 + check_mode: true + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + + # ============================================================ + - name: test adding a range of ports and ports given as strings (expected changed=true) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + # set purge_rules to false so we don't get a false positive from previously added rules + purge_rules: false + rules: + - proto: "tcp" + ports: + - 8183-8190 + - '8192' + cidr_ip: 10.1.1.1/32 + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.group_id.startswith("sg-")' + + # ============================================================ + - name: test adding a rule with a IPv4 CIDR with host bits set (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + # set purge_rules to false so we don't get a false positive from previously added rules + purge_rules: false + rules: + - proto: "tcp" + ports: + - 8195 + cidr_ip: 10.0.0.1/8 + check_mode: true + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + + # ============================================================ + - name: test adding a rule with a IPv4 CIDR with host bits set (expected changed=true) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + # set purge_rules to false so we don't get a false positive from previously added rules + purge_rules: false + rules: + - proto: "tcp" + ports: + - 8195 + cidr_ip: 10.0.0.1/8 + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.group_id.startswith("sg-")' + + # ============================================================ + - name: test adding the same rule with a IPv4 CIDR with host bits set (expected changed=false) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + # set purge_rules to false so we don't get a false positive from previously added rules + purge_rules: false + rules: + - proto: "tcp" + ports: + - 8195 + cidr_ip: 10.0.0.1/8 + check_mode: true + register: check_result + + # ============================================================ + - name: test adding the same rule with a IPv4 CIDR with host bits set (expected changed=false and a warning) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + # set purge_rules to false so we don't get a false positive from previously added rules + purge_rules: false + rules: + - proto: "tcp" + ports: + - 8195 + cidr_ip: 10.0.0.1/8 + register: result + + - name: assert state=present (expected changed=false and a warning) + assert: + that: + - 'not check_result.changed' + + - name: assert state=present (expected changed=false and a warning) + assert: + that: + # No way to assert for warnings? + - 'not result.changed' + - 'result.group_id.startswith("sg-")' + + # ============================================================ + - name: test using the default VPC + block: + + - name: test adding a rule with a IPv6 CIDR with host bits set (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + # set purge_rules to false so we don't get a false positive from previously added rules + purge_rules: false + rules: + - proto: "tcp" + ports: + - 8196 + cidr_ipv6: '2001:db00::1/24' + check_mode: true + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + + # ============================================================ + - name: test adding a rule with a IPv6 CIDR with host bits set (expected changed=true) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + # set purge_rules to false so we don't get a false positive from previously added rules + purge_rules: false + rules: + - proto: "tcp" + ports: + - 8196 + cidr_ipv6: '2001:db00::1/24' + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.group_id.startswith("sg-")' + + # ============================================================ + + - name: test adding a rule again with a IPv6 CIDR with host bits set (expected changed=false and a warning) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + state: present + # set purge_rules to false so we don't get a false positive from previously added rules + purge_rules: false + rules: + - proto: "tcp" + ports: + - 8196 + cidr_ipv6: '2001:db00::1/24' + register: result + + - name: assert state=present (expected changed=false and a warning) + assert: + that: + # No way to assert for warnings? + - 'not result.changed' + - 'result.group_id.startswith("sg-")' + + when: default_vpc + + # ============================================================ + - name: test state=absent (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + state: absent + check_mode: true + register: result + + - name: assert state=absent (expected changed=true) + assert: + that: + - 'result.changed' + + # ============================================================ + - name: test state=absent (expected changed=true) + ec2_group: + name: '{{ec2_group_name}}' + state: absent + register: result + + - name: assert state=absent (expected changed=true) + assert: + that: + - 'result.changed' + - 'not result.group_id' + + # ============================================================ + - name: create security group in the VPC (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ip: "10.1.1.1/32" + check_mode: true + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + + # ============================================================ + - name: create security group in the VPC + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ip: "10.1.1.1/32" + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.vpc_id == vpc_result.vpc.id' + - 'result.group_id.startswith("sg-")' + + # ============================================================ + - name: test adding tags (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ip: "10.1.1.1/32" + tags: + tag1: test1 + tag2: test2 + check_mode: true + diff: true + register: result + + - name: assert that tags were added (expected changed=true) + assert: + that: + - 'result.changed' + - 'not result.diff.0.before.tags' + - 'result.diff.0.after.tags.tag1 == "test1"' + - 'result.diff.0.after.tags.tag2 == "test2"' + + # ============================================================ + - name: test adding tags (expected changed=true) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ip: "10.1.1.1/32" + tags: + tag1: test1 + tag2: test2 + register: result + + - name: assert that tags were added (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.tags == {"tag1": "test1", "tag2": "test2"}' + + # ============================================================ + - name: test that tags are present (expected changed=False) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: present + purge_rules_egress: false + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ip: "10.1.1.1/32" + tags: + tag1: test1 + tag2: test2 + check_mode: true + register: result + + - name: assert that tags were not changed (expected changed=False) + assert: + that: + - 'not result.changed' + + # ============================================================ + - name: test that tags are present (expected changed=False) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: present + purge_rules_egress: false + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ip: "10.1.1.1/32" + tags: + tag1: test1 + tag2: test2 + register: result + + - name: assert that tags were not changed (expected changed=False) + assert: + that: + - 'not result.changed' + - 'result.tags == {"tag1": "test1", "tag2": "test2"}' + + # ============================================================ + - name: test purging tags (expected changed=True) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ip: "10.1.1.1/32" + tags: + tag1: test1 + check_mode: true + register: result + + - name: assert that tag2 was removed (expected changed=true) + assert: + that: + - 'result.changed' + + # ============================================================ + - name: test purging tags (expected changed=True) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ip: "10.1.1.1/32" + tags: + tag1: test1 + register: result + + - name: assert that tag2 was removed (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.tags == {"tag1": "test1"}' + + # ============================================================ + + - name: assert that tags are left as-is if not specified (expected changed=False) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ip: "10.1.1.1/32" + register: result + + - name: assert that the tags stayed the same (expected changed=false) + assert: + that: + - 'not result.changed' + - 'result.tags == {"tag1": "test1"}' + + # ============================================================ + + - name: test purging all tags (expected changed=True) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ip: "10.1.1.1/32" + tags: {} + register: result + + - name: assert that tag1 was removed (expected changed=true) + assert: + that: + - 'result.changed' + - 'not result.tags' + + # ============================================================ + - name: test adding a rule and egress rule descriptions (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + # purge the other rules so assertions work for the subsequent tests for rule descriptions + purge_rules_egress: true + purge_rules: true + state: present + rules: + - proto: "tcp" + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rule_desc: ipv6 rule desc 1 + rules_egress: + - proto: "tcp" + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: egress rule desc 1 + check_mode: true + register: result + + - name: assert that rule descriptions are created (expected changed=true) + assert: + that: + - 'result.changed' + + # ========================================================================================= + - name: add rules without descriptions ready for adding descriptions to existing rules + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + # purge the other rules so assertions work for the subsequent tests for rule descriptions + purge_rules_egress: true + purge_rules: true + state: present + rules: + - proto: "tcp" + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rules_egress: + - proto: "tcp" + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + register: result + + # ============================================================ + - name: test adding a rule and egress rule descriptions (expected changed=true) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + # purge the other rules so assertions work for the subsequent tests for rule descriptions + purge_rules_egress: true + purge_rules: true + state: present + rules: + - proto: "tcp" + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rule_desc: ipv6 rule desc 1 + rules_egress: + - proto: "tcp" + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: egress rule desc 1 + register: result + + - name: assert that rule descriptions are created (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 1"' + - 'result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 1"' + + # ============================================================ + - name: test modifying rule and egress rule descriptions (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + purge_rules_egress: false + purge_rules: false + state: present + rules: + - proto: "tcp" + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rule_desc: ipv6 rule desc 2 + rules_egress: + - proto: "tcp" + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: egress rule desc 2 + check_mode: true + register: result + + - name: assert that rule descriptions were modified (expected changed=true) + assert: + that: + - 'result.ip_permissions | length > 0' + - 'result.changed' + + # ============================================================ + - name: test modifying rule and egress rule descriptions (expected changed=true) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + purge_rules_egress: false + purge_rules: false + state: present + rules: + - proto: "tcp" + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rule_desc: ipv6 rule desc 2 + rules_egress: + - proto: "tcp" + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: egress rule desc 2 + register: result + + - name: assert that rule descriptions were modified (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 2"' + - 'result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 2"' + + # ============================================================ + + - name: test creating rule in default vpc with egress rule (expected changed=true) + ec2_group: + name: '{{ec2_group_name}}-default-vpc' + description: '{{ec2_group_description}} default VPC' + purge_rules_egress: true + state: present + rules: + - proto: "tcp" + ports: + - 8281 + cidr_ip: 10.1.1.1/24 + rule_desc: ipv4 rule desc + rules_egress: + - proto: "tcp" + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: egress rule desc 2 + register: result + + - name: assert that rule descriptions were modified (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.ip_permissions_egress|length == 1' + + # ============================================================ + - name: test that keeping the same rule descriptions (expected changed=false) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + purge_rules_egress: false + purge_rules: false + state: present + rules: + - proto: "tcp" + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rule_desc: ipv6 rule desc 2 + rules_egress: + - proto: "tcp" + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: egress rule desc 2 + check_mode: true + register: result + + - name: assert that rule descriptions stayed the same (expected changed=false) + assert: + that: + - 'not result.changed' + + # ============================================================ + - name: test that keeping the same rule descriptions (expected changed=false) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + purge_rules_egress: false + purge_rules: false + state: present + rules: + - proto: "tcp" + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rule_desc: ipv6 rule desc 2 + rules_egress: + - proto: "tcp" + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: egress rule desc 2 + register: result + + - name: assert that rule descriptions stayed the same (expected changed=false) + assert: + that: + - 'not result.changed' + - 'result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 2"' + - 'result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 2"' + + # ============================================================ + - name: test removing rule descriptions (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + purge_rules_egress: false + purge_rules: false + state: present + rules: + - proto: "tcp" + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rule_desc: + rules_egress: + - proto: "tcp" + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: + check_mode: true + register: result + + - name: assert that rule descriptions were removed (expected changed=true) + assert: + that: + - 'result.changed' + + # ============================================================ + - name: test removing rule descriptions (expected changed=true) + ec2_group: + name: '{{ec2_group_name}}' + description: '{{ec2_group_description}}' + vpc_id: '{{ vpc_result.vpc.id }}' + purge_rules_egress: false + purge_rules: false + state: present + rules: + - proto: "tcp" + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rule_desc: + rules_egress: + - proto: "tcp" + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: + register: result + ignore_errors: true + + - name: assert that rule descriptions were removed + assert: + that: + - 'result.ip_permissions[0].ipv6_ranges[0].description is undefined' + - 'result.ip_permissions_egress[0].ip_ranges[0].description is undefined' + + # ============================================================ + + - name: test state=absent (expected changed=true) + ec2_group: + name: '{{ec2_group_name}}' + state: absent + register: result + + - name: assert state=absent (expected changed=true) + assert: + that: + - 'result.changed' + - 'not result.group_id' + always: + + # ============================================================ + # Describe state of remaining resources + + - name: Retrieve security group info based on SG VPC + ec2_group_info: + filters: + vpc-id: '{{ vpc_result.vpc.id }}' + register: remaining_groups + + - name: Retrieve subnet info based on SG VPC + ec2_vpc_subnet_info: + filters: + vpc-id: '{{ vpc_result.vpc.id }}' + register: remaining_subnets + + - name: Retrieve VPC info based on SG VPC + ec2_vpc_net_info: + vpc_ids: + - '{{ vpc_result.vpc.id }}' + register: remaining_vpc + + # ============================================================ + # Delete all remaining SGs + + - name: Delete rules from remaining SGs + ec2_group: + name: '{{ item.group_name }}' + group_id: '{{ item.group_id }}' + description: '{{ item.description }}' + rules: [] + rules_egress: [] + loop: '{{ remaining_groups.security_groups }}' + ignore_errors: yes + + - name: Delete remaining SGs + ec2_group: + state: absent + group_id: '{{ item.group_id }}' + loop: '{{ remaining_groups.security_groups }}' + ignore_errors: yes + + # ============================================================ + + - name: tidy up VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: absent + cidr_block: "{{ vpc_cidr }}" + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_account.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_account.yml new file mode 100644 index 000000000..675dfd933 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_account.yml @@ -0,0 +1,124 @@ +- block: + - aws_caller_info: + register: caller_facts + - name: create a VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc-2" + state: present + cidr_block: "{{ vpc_cidr }}" + tags: + Description: "Created by ansible-test" + register: vpc_result_2 + - name: Peer the secondary-VPC to the main VPC + ec2_vpc_peer: + vpc_id: '{{ vpc_result_2.vpc.id }}' + peer_vpc_id: '{{ vpc_result.vpc.id }}' + peer_owner_id: '{{ caller_facts.account }}' + peer_region: '{{ aws_region }}' + register: peer_origin + - name: Accept the secondary-VPC peering connection in the main VPC + ec2_vpc_peer: + peer_vpc_id: '{{ vpc_result_2.vpc.id }}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: accept + peering_id: '{{ peer_origin.peering_id }}' + peer_owner_id: '{{ caller_facts.account }}' + peer_region: '{{ aws_region }}' + - name: Create group in second VPC + ec2_group: + name: '{{ ec2_group_name }}-external' + description: '{{ ec2_group_description }}' + vpc_id: '{{ vpc_result_2.vpc.id }}' + state: present + rules: + - proto: "tcp" + cidr_ip: 0.0.0.0/0 + ports: + - 80 + rule_desc: 'http whoo' + register: external + - name: Create group in internal VPC + ec2_group: + name: '{{ ec2_group_name }}-internal' + description: '{{ ec2_group_description }}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: present + rules: + - proto: "tcp" + group_id: '{{ caller_facts.account }}/{{ external.group_id }}/{{ ec2_group_name }}-external' + ports: + - 80 + - name: Re-make same rule, expecting changed=false in internal VPC + ec2_group: + name: '{{ ec2_group_name }}-internal' + description: '{{ ec2_group_description }}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: present + rules: + - proto: "tcp" + group_id: '{{ caller_facts.account }}/{{ external.group_id }}/{{ ec2_group_name }}-external' + ports: + - 80 + register: out + - assert: + that: + - out is not changed + - name: Try again with a bad group_id group in internal VPC + ec2_group: + name: '{{ ec2_group_name }}-internal' + description: '{{ ec2_group_description }}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: present + rules: + - proto: "tcp" + group_id: '{{ external.group_id }}/{{ caller_facts.account }}/{{ ec2_group_name }}-external' + ports: + - 80 + register: out + ignore_errors: true + - assert: + that: + - out is failed + always: + - pause: seconds=5 + - name: Delete secondary-VPC side of peer + ec2_vpc_peer: + vpc_id: '{{ vpc_result_2.vpc.id }}' + peer_vpc_id: '{{ vpc_result.vpc.id }}' + peering_id: '{{ peer_origin.peering_id }}' + state: absent + peer_owner_id: '{{ caller_facts.account }}' + peer_region: '{{ aws_region }}' + ignore_errors: yes + - name: Delete main-VPC side of peer + ec2_vpc_peer: + peer_vpc_id: '{{ vpc_result_2.vpc.id }}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: absent + peering_id: '{{ peer_origin.peering_id }}' + peer_owner_id: '{{ caller_facts.account }}' + peer_region: '{{ aws_region }}' + ignore_errors: yes + - name: Clean up group in second VPC + ec2_group: + name: '{{ ec2_group_name }}-external' + description: '{{ ec2_group_description }}' + state: absent + vpc_id: '{{ vpc_result_2.vpc.id }}' + ignore_errors: yes + - name: Clean up group in second VPC + ec2_group: + name: '{{ ec2_group_name }}-internal' + description: '{{ ec2_group_description }}' + state: absent + vpc_id: '{{ vpc_result.vpc.id }}' + ignore_errors: yes + - name: tidy up VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc-2" + state: absent + cidr_block: "{{ vpc_cidr }}" + ignore_errors: yes + register: removed + retries: 10 + until: removed is not failed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_nested_target.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_nested_target.yml new file mode 100644 index 000000000..87f48468f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_nested_target.yml @@ -0,0 +1,213 @@ +--- + # ============================================================ + + - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ ec2_group_name }}' + description: '{{ ec2_group_description }}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: + - "64:ff9b::/96" + - ["2620::/32"] + - proto: "tcp" + ports: 5665 + cidr_ip: + - 172.16.1.0/24 + - 172.16.17.0/24 + - ["10.0.0.0/24", "10.20.0.0/24"] + check_mode: true + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + + - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=true) + ec2_group: + name: '{{ ec2_group_name }}' + description: '{{ ec2_group_description }}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: + - "64:ff9b::/96" + - ["2620::/32"] + - proto: "tcp" + ports: 5665 + cidr_ip: + - 172.16.1.0/24 + - 172.16.17.0/24 + - ["10.0.0.0/24", "10.20.0.0/24"] + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.ip_permissions | length == 2' + - 'result.ip_permissions[0].ip_ranges | length == 4 or result.ip_permissions[1].ip_ranges | length == 4' + - 'result.ip_permissions[0].ipv6_ranges | length == 2 or result.ip_permissions[1].ipv6_ranges | length == 2' + + - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=false) (CHECK MODE) + ec2_group: + name: '{{ ec2_group_name }}' + description: '{{ ec2_group_description }}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: + - "64:ff9b::/96" + - ["2620::/32"] + - proto: "tcp" + ports: 5665 + cidr_ip: + - 172.16.1.0/24 + - 172.16.17.0/24 + - ["10.0.0.0/24", "10.20.0.0/24"] + check_mode: true + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'not result.changed' + + - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=false) + ec2_group: + name: '{{ ec2_group_name }}' + description: '{{ ec2_group_description }}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: + - "64:ff9b::/96" + - ["2620::/32"] + - proto: "tcp" + ports: 5665 + cidr_ip: + - 172.16.1.0/24 + - 172.16.17.0/24 + - ["10.0.0.0/24", "10.20.0.0/24"] + register: result + + - name: assert state=present (expected changed=true) + assert: + that: + - 'not result.changed' + + - name: test state=present purging a nested ipv4 target (expected changed=true) (CHECK MODE) + ec2_group: + name: '{{ ec2_group_name }}' + description: '{{ ec2_group_description }}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: + - "64:ff9b::/96" + - ["2620::/32"] + - proto: "tcp" + ports: 5665 + cidr_ip: + - 172.16.1.0/24 + - 172.16.17.0/24 + - ["10.0.0.0/24"] + check_mode: true + register: result + + - assert: + that: + - result.changed + + - name: test state=present purging a nested ipv4 target (expected changed=true) + ec2_group: + name: '{{ ec2_group_name }}' + description: '{{ ec2_group_description }}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: + - "64:ff9b::/96" + - ["2620::/32"] + - proto: "tcp" + ports: 5665 + cidr_ip: + - 172.16.1.0/24 + - 172.16.17.0/24 + - ["10.0.0.0/24"] + register: result + + - assert: + that: + - result.changed + - 'result.ip_permissions[0].ip_ranges | length == 3 or result.ip_permissions[1].ip_ranges | length == 3' + - 'result.ip_permissions[0].ipv6_ranges | length == 2 or result.ip_permissions[1].ipv6_ranges | length == 2' + + - name: test state=present with both associated ipv6 targets nested (expected changed=false) + ec2_group: + name: '{{ ec2_group_name }}' + description: '{{ ec2_group_description }}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: + - ["2620::/32", "64:ff9b::/96"] + - proto: "tcp" + ports: 5665 + cidr_ip: + - 172.16.1.0/24 + - 172.16.17.0/24 + - ["10.0.0.0/24"] + register: result + + - assert: + that: + - not result.changed + + - name: test state=present add another nested ipv6 target (expected changed=true) + ec2_group: + name: '{{ ec2_group_name }}' + description: '{{ ec2_group_description }}' + state: present + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + cidr_ipv6: + - ["2620::/32", "64:ff9b::/96"] + - ["2001:DB8:A0B:12F0::1/64"] + - proto: "tcp" + ports: 5665 + cidr_ip: + - 172.16.1.0/24 + - 172.16.17.0/24 + - ["10.0.0.0/24"] + register: result + + - assert: + that: + - result.changed + - result.warning is not defined + - 'result.ip_permissions[0].ip_ranges | length == 3 or result.ip_permissions[1].ip_ranges | length == 3' + - 'result.ip_permissions[0].ipv6_ranges | length == 3 or result.ip_permissions[1].ipv6_ranges | length == 3' + + - name: delete it + ec2_group: + name: '{{ ec2_group_name }}' + state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/numeric_protos.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/numeric_protos.yml new file mode 100644 index 000000000..6cca9fc43 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/numeric_protos.yml @@ -0,0 +1,60 @@ +--- +- block: + - name: set up temporary group name for tests + set_fact: + group_tmp_name: '{{ec2_group_name}}-numbered-protos' + + - name: Create a group with numbered protocol (GRE) + ec2_group: + name: '{{ group_tmp_name }}' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ ec2_group_description }}' + rules: + - proto: 47 + to_port: -1 + from_port: -1 + cidr_ip: 0.0.0.0/0 + state: present + register: result + + - name: Create a group with a quoted proto + ec2_group: + name: '{{ group_tmp_name }}' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ ec2_group_description }}' + rules: + - proto: '47' + to_port: -1 + from_port: -1 + cidr_ip: 0.0.0.0/0 + state: present + register: result + - assert: + that: + - result is not changed + - name: Add a tag with a numeric value + ec2_group: + name: '{{ group_tmp_name }}' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ ec2_group_description }}' + tags: + foo: 1 + - name: Read a tag with a numeric value + ec2_group: + name: '{{ group_tmp_name }}' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ ec2_group_description }}' + tags: + foo: 1 + register: result + - assert: + that: + - result is not changed + + always: + - name: tidy up egress rule test security group + ec2_group: + name: '{{group_tmp_name}}' + state: absent + vpc_id: '{{ vpc_result.vpc.id }}' + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/rule_group_create.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/rule_group_create.yml new file mode 100644 index 000000000..4d763c988 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/rule_group_create.yml @@ -0,0 +1,127 @@ +--- +- block: + - name: Create a group with self-referring rule + ec2_group: + name: '{{ec2_group_name}}-auto-create-1' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + rules: + - proto: "tcp" + from_port: 8000 + to_port: 8100 + group_name: '{{ec2_group_name}}-auto-create-1' + state: present + register: result + + - name: Create a second group rule + ec2_group: + name: '{{ec2_group_name}}-auto-create-2' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + state: present + + - name: Create a series of rules with a recently created group as target + ec2_group: + name: '{{ec2_group_name}}-auto-create-1' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + purge_rules: false + rules: + - proto: "tcp" + from_port: "{{ item }}" + to_port: "{{ item }}" + group_name: '{{ec2_group_name}}-auto-create-2' + state: present + register: result + with_items: + - 20 + - 40 + - 60 + - 80 + + - assert: + that: + - result.warning is not defined + + - name: Create a group with only the default rule + ec2_group: + name: '{{ec2_group_name}}-auto-create-1' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + rules: + - proto: "tcp" + from_port: 8182 + to_port: 8182 + group_name: '{{ec2_group_name}}-auto-create-3' + state: present + register: result + ignore_errors: true + + - name: assert you can't create a new group from a rule target with no description + assert: + that: + - result is failed + + - name: Create a group with a target of a separate group + ec2_group: + name: '{{ec2_group_name}}-auto-create-1' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + rules: + - proto: tcp + ports: + - 22 + - 80 + group_name: '{{ec2_group_name}}-auto-create-3' + group_desc: '{{ec2_group_description}}' + state: present + register: result + + - assert: + that: + - result.warning is not defined + + - name: Create a 4th group + ec2_group: + name: '{{ec2_group_name}}-auto-create-4' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + state: present + rules: + - proto: tcp + ports: + - 22 + cidr_ip: 0.0.0.0/0 + + - name: use recently created group in a rule + ec2_group: + name: '{{ec2_group_name}}-auto-create-5' + vpc_id: '{{ vpc_result.vpc.id }}' + description: '{{ec2_group_description}}' + rules: + - proto: tcp + ports: + - 443 + group_name: '{{ec2_group_name}}-auto-create-4' + state: present + + - assert: + that: + - result.warning is not defined + + always: + - name: tidy up egress rule test security group rules + ec2_group: + name: '{{ec2_group_name}}-auto-create-{{ item }}' + description: '{{ec2_group_description}}' + rules: [] + rules_egress: [] + ignore_errors: yes + with_items: [5, 4, 3, 2, 1] + - name: tidy up egress rule test security group + ec2_group: + name: '{{ec2_group_name}}-auto-create-{{ item }}' + state: absent + vpc_id: '{{ vpc_result.vpc.id }}' + ignore_errors: yes + with_items: [1, 2, 3, 4, 5] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/aliases new file mode 100644 index 000000000..951ec3caf --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/aliases @@ -0,0 +1,10 @@ +# reason: unstable +# Testing of paginated results fails when fewer results are returned than +# expected - probably a race condition +# https://github.com/ansible-collections/amazon.aws/issues/441 +disabled + +slow + +cloud/aws +ec2_snapshot_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/defaults/main.yml new file mode 100644 index 000000000..dc1f0f703 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for ec2_snapshot diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/meta/main.yml new file mode 100644 index 000000000..2bff8543a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: +- role: setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/main.yml new file mode 100644 index 000000000..1a4bb0fb5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/main.yml @@ -0,0 +1,400 @@ +--- +# Tests for EC2 Snapshot +# +# Tests ec2_snapshot: +# - Snapshot creation +# - Create with last_snapshot_min_age +# - Snapshot deletion +# +# Tests ec2_snapshot_info: +# - Listing snapshots for filter: tag +# +- name: Integration testing for ec2_snapshot + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + + collections: + - amazon.aws + - community.aws + + block: + - name: Gather availability zones + aws_az_facts: + register: azs + + # Create a new volume in detached mode without tags + - name: Create a detached volume without tags + ec2_vol: + volume_size: 1 + zone: '{{ azs.availability_zones[0].zone_name }}' + register: volume_detached + + # Capture snapshot of this detached volume and assert the results + - name: Create a snapshot of detached volume without tags and store results + ec2_snapshot: + volume_id: '{{ volume_detached.volume_id }}' + register: untagged_snapshot + + - assert: + that: + - untagged_snapshot is changed + - untagged_snapshot.snapshots| length == 1 + - untagged_snapshot.snapshots[0].volume_id == volume_detached.volume_id + + - name: Setup an instance for testing, make sure volumes are attached before next task + ec2_instance: + name: '{{ resource_prefix }}' + instance_type: t2.nano + image_id: '{{ ec2_ami_id }}' + volumes: + - device_name: /dev/xvda + ebs: + volume_size: 8 + delete_on_termination: true + state: running + wait: true + register: instance + + - set_fact: + volume_id: '{{ instance.instances[0].block_device_mappings[0].ebs.volume_id }}' + instance_id: '{{ instance.instances[0].instance_id }}' + device_name: '{{ instance.instances[0].block_device_mappings[0].device_name }}' + + - name: Take snapshot (check mode) + ec2_snapshot: + instance_id: '{{ instance_id }}' + device_name: '{{ device_name }}' + snapshot_tags: + Test: '{{ resource_prefix }}' + check_mode: true + register: result + + - assert: + that: + - result is changed + + - name: Take snapshot of volume + ec2_snapshot: + volume_id: '{{ volume_id }}' + register: result + + # The Name tag is created automatically as the instance_name; ie the resource_prefix + - name: Get info about snapshots + ec2_snapshot_info: + filters: + "tag:Name": '{{ resource_prefix }}' + register: info_result + + - assert: + that: + - result is changed + - info_result is not changed + - info_result.snapshots| length == 1 + - info_result.snapshots[0].snapshot_id == result.snapshot_id + - info_result.snapshots[0].volume_id == result.volume_id + - info_result.snapshots[0].volume_size == result.volume_size + - info_result.snapshots[0].tags == result.tags + + - name: Get info about snapshots (check_mode) + ec2_snapshot_info: + filters: + "tag:Name": '{{ resource_prefix }}' + register: info_check + check_mode: true + + - assert: + that: + - info_check is not changed + - info_check.snapshots| length == 1 + - info_check.snapshots[0].snapshot_id == result.snapshot_id + - info_check.snapshots[0].volume_id == result.volume_id + - info_check.snapshots[0].volume_size == result.volume_size + - info_check.snapshots[0].tags == result.tags + + - name: Take snapshot if most recent >1hr (False) (check mode) + ec2_snapshot: + volume_id: '{{ volume_id }}' + snapshot_tags: + Name: '{{ resource_prefix }}' + last_snapshot_min_age: 60 + check_mode: true + register: result + - assert: + that: + - result is not changed + + - name: Take snapshot if most recent >1hr (False) + ec2_snapshot: + volume_id: '{{ volume_id }}' + last_snapshot_min_age: 60 + register: result + + - name: Get info about snapshots + ec2_snapshot_info: + filters: + "tag:Name": '{{ resource_prefix }}' + register: info_result + + - assert: + that: + - result is not changed + - info_result.snapshots| length == 1 + + - name: Pause so we can do a last_snapshot_min_age test + pause: + minutes: 1 + + - name: Take snapshot if most recent >1min (True) (check mode) + ec2_snapshot: + volume_id: '{{ volume_id }}' + snapshot_tags: + Name: '{{ resource_prefix }}' + last_snapshot_min_age: 1 + check_mode: true + register: result + - assert: + that: + - result is changed + + - name: Take snapshot if most recent >1min (True) + ec2_snapshot: + volume_id: '{{ volume_id }}' + last_snapshot_min_age: 1 + register: result + + - name: Get info about snapshots + ec2_snapshot_info: + filters: + "tag:Name": '{{ resource_prefix }}' + register: info_result + + - assert: + that: + - result is changed + - info_result.snapshots| length == 2 + - result.snapshot_id in ( info_result.snapshots | map(attribute='snapshot_id') | list ) + + - name: Take snapshot with a tag (check mode) + ec2_snapshot: + volume_id: '{{ volume_id }}' + snapshot_tags: + MyTag: '{{ resource_prefix }}' + check_mode: true + register: result + - assert: + that: + - result is changed + + - name: Take snapshot and tag it + ec2_snapshot: + volume_id: '{{ volume_id }}' + snapshot_tags: + MyTag: '{{ resource_prefix }}' + register: tagged_result + + - name: Get info about snapshots by tag + ec2_snapshot_info: + filters: + "tag:MyTag": '{{ resource_prefix }}' + register: tag_info_result + + - set_fact: + tagged_snapshot_id: '{{ tag_info_result.snapshots[0].snapshot_id }}' + + - assert: + that: + - tagged_result is changed + - tagged_result.tags| length == 2 + - tag_info_result.snapshots| length == 1 + - tagged_result.tags.MyTag == "{{ resource_prefix }}" + - '"{{ tagged_result.snapshot_id }}" == "{{ tagged_snapshot_id }}"' + + - name: Get info about all snapshots for this test + ec2_snapshot_info: + filters: + "tag:Name": '{{ resource_prefix }}' + register: info_result + + - assert: + that: + - info_result.snapshots | length == 3 + + - name: Generate extra snapshots + ec2_snapshot: + volume_id: '{{ volume_id }}' + snapshot_tags: + ResourcePrefix: '{{ resource_prefix }}' + loop: '{{ range(1, 6, 1) | list }}' + loop_control: + # Anything under 15 will trigger SnapshotCreationPerVolumeRateExceeded, + # this should now be automatically handled, but pause a little anyway to + # avoid being aggressive + pause: 15 + label: "Generate extra snapshots - {{ item }}" + + - name: Pause to allow creation to finish + pause: + minutes: 3 + + # check that snapshot_ids and max_results are mutually exclusive + - name: Check that max_results and snapshot_ids are mutually exclusive + ec2_snapshot_info: + snapshot_ids: + - '{{ tagged_snapshot_id }}' + max_results: 5 + ignore_errors: true + register: info_result + + - name: assert that operation failed + assert: + that: + - info_result is failed + + # check that snapshot_ids and next_token_id are mutually exclusive + - name: Check that snapshot_ids and next_token_id are mutually exclusive + ec2_snapshot_info: + snapshot_ids: + - '{{ tagged_snapshot_id }}' + next_token_id: 'random_value_token' + ignore_errors: true + register: info_result + + - name: assert that operation failed + assert: + that: + - info_result is failed + + # Retrieve snapshots in paginated mode + - name: Get snapshots in paginated mode using max_results option + ec2_snapshot_info: + filters: + "tag:Name": '{{ resource_prefix }}' + max_results: 5 + register: info_result + + - assert: + that: + - info_result.snapshots | length == 5 + - info_result.next_token_id is defined + + # Pagination : 2nd request + - name: Get snapshots for a second paginated request + ec2_snapshot_info: + filters: + "tag:Name": '{{ resource_prefix }}' + next_token_id: "{{ info_result.next_token_id }}" + register: info_result + + - assert: + that: + - info_result.snapshots | length == 3 + + # delete the tagged snapshot - check mode + - name: Delete the tagged snapshot (check mode) + ec2_snapshot: + state: absent + snapshot_id: '{{ tagged_snapshot_id }}' + register: delete_result_check_mode + check_mode: true + + - assert: + that: + - delete_result_check_mode is changed + + # delete the tagged snapshot + - name: Delete the tagged snapshot + ec2_snapshot: + state: absent + snapshot_id: '{{ tagged_snapshot_id }}' + + # delete the tagged snapshot again (results in InvalidSnapshot.NotFound) + - name: Delete already removed snapshot (check mode) + ec2_snapshot: + state: absent + snapshot_id: '{{ tagged_snapshot_id }}' + register: delete_result_second_check_mode + check_mode: true + + - assert: + that: + - delete_result_second_check_mode is not changed + + - name: Delete already removed snapshot (idempotent) + ec2_snapshot: + state: absent + snapshot_id: '{{ tagged_snapshot_id }}' + register: delete_result_second_idempotent + + - assert: + that: + - delete_result_second_idempotent is not changed + + - name: Get info about all snapshots for this test + ec2_snapshot_info: + filters: + "tag:Name": '{{ resource_prefix }}' + register: info_result + + - assert: + that: + - info_result.snapshots| length == 7 + - tagged_snapshot_id not in ( info_result.snapshots | map(attribute='snapshot_id') | list ) + + - name: Delete snapshots + ec2_snapshot: + state: absent + snapshot_id: '{{ item.snapshot_id }}' + with_items: '{{ info_result.snapshots }}' + + - name: Get info about all snapshots for this test + ec2_snapshot_info: + filters: + "tag:Name": '{{ resource_prefix }}' + register: info_result + + - assert: + that: + - info_result.snapshots| length == 0 + + always: + + - name: Snapshots to delete + ec2_snapshot_info: + filters: + "tag:Name": '{{ resource_prefix }}' + register: tagged_snapshots + + - name: Delete tagged snapshots + ec2_snapshot: + state: absent + snapshot_id: '{{ item.snapshot_id }}' + with_items: '{{ tagged_snapshots.snapshots }}' + ignore_errors: true + + - name: Delete instance + ec2_instance: + instance_ids: '{{ instance_id }}' + state: absent + ignore_errors: true + + - name: Delete volume + ec2_vol: + id: '{{ volume_id }}' + state: absent + ignore_errors: true + + - name: Delete detached and untagged volume + ec2_vol: + id: '{{ volume_detached.volume_id}}' + state: absent + ignore_errors: true + + - name: Delete untagged snapshot + ec2_snapshot: + state: absent + snapshot_id: '{{ untagged_snapshot.snapshot_id }}' + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/aliases new file mode 100644 index 000000000..f556641fb --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/aliases @@ -0,0 +1,2 @@ +cloud/aws +ec2_spot_instance_info \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/defaults/main.yml new file mode 100644 index 000000000..cb3895af0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/defaults/main.yml @@ -0,0 +1,14 @@ +--- +vpc_seed_a: '{{ resource_prefix }}' +vpc_seed_b: '{{ resource_prefix }}-ec2_eni' +vpc_prefix: '10.{{ 256 | random(seed=vpc_seed_a) }}.{{ 256 | random(seed=vpc_seed_b ) }}' +vpc_cidr: '{{ vpc_prefix}}.128/26' +ip_1: "{{ vpc_prefix }}.132" +ip_2: "{{ vpc_prefix }}.133" +ip_3: "{{ vpc_prefix }}.134" +ip_4: "{{ vpc_prefix }}.135" +ip_5: "{{ vpc_prefix }}.136" + +ec2_ips: +- "{{ vpc_prefix }}.137" +- "{{ vpc_prefix }}.138" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/meta/main.yml new file mode 100644 index 000000000..1471b11f6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/main.yaml new file mode 100644 index 000000000..1e98ad890 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/main.yaml @@ -0,0 +1,315 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + + collections: + - amazon.aws + - community.aws + + block: + - name: Get available AZs + aws_az_info: + filters: + region-name: "{{ aws_region }}" + register: az_info + + - name: Pick an AZ + set_fact: + availability_zone: "{{ az_info['availability_zones'][0]['zone_name'] }}" + + # ============================================================ + - name: create a VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: present + cidr_block: "{{ vpc_cidr }}" + tags: + Name: "{{ resource_prefix }}-vpc" + Description: "Created by ansible-test" + register: vpc_result + + - name: create a subnet + ec2_vpc_subnet: + cidr: "{{ vpc_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: "{{ resource_prefix }}-vpc" + Description: "Created by ansible-test" + state: present + register: vpc_subnet_result + + - name: create a security group + ec2_group: + name: "{{ resource_prefix }}-sg" + description: "Created by {{ resource_prefix }}" + rules: [] + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + register: vpc_sg_result + + - name: create a new ec2 key pair + ec2_key: + name: "{{ resource_prefix }}-keypair" + + - name: Set facts to simplify use of extra resources + set_fact: + vpc_id: "{{ vpc_result.vpc.id }}" + vpc_subnet_id: "{{ vpc_subnet_result.subnet.id }}" + vpc_sg_id: "{{ vpc_sg_result.group_id }}" + + # ============================================================ + + - name: Run tests for termianting associated instances + import_tasks: terminate_associated_instances.yml + + # Assert that spot instance request is created + - name: Create simple spot instance request + ec2_spot_instance: + launch_specification: + image_id: "{{ ec2_ami_id }}" + key_name: "{{ resource_prefix }}-keypair" + instance_type: "t2.medium" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + tags: + ansible-test: "{{ resource_prefix }}" + register: create_result + + - name: Assert that result has changed and request has been created + assert: + that: + - create_result is changed + - create_result.spot_request is defined + - create_result.spot_request.spot_instance_request_id is defined + - create_result.spot_request.launch_specification.subnet_id == vpc_subnet_result.subnet.id + + - name: Get info about the spot instance request created + ec2_spot_instance_info: + spot_instance_request_ids: + - "{{ create_result.spot_request.spot_instance_request_id }}" + register: spot_instance_info_result + + - name: Assert that the spot request created is open or active + assert: + that: + - spot_instance_info_result.spot_request[0].state in ['open', 'active'] + + - name: Create spot request with more complex options + ec2_spot_instance: + launch_specification: + image_id: "{{ ec2_ami_id }}" + key_name: "{{ resource_prefix }}-keypair" + instance_type: "t2.medium" + block_device_mappings: + - device_name: /dev/sdb + ebs: + delete_on_termination: True + volume_type: gp3 + volume_size: 5 + network_interfaces: + - associate_public_ip_address: False + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + delete_on_termination: True + device_index: 0 + placement: + availability_zone: '{{ availability_zone }}' + monitoring: + enabled: False + spot_price: 0.002 + tags: + camelCase: "helloWorld" + PascalCase: "HelloWorld" + snake_case: "hello_world" + "Title Case": "Hello World" + "lowercase spaced": "hello world" + ansible-test: "{{ resource_prefix }}" + register: complex_create_result + + - assert: + that: + - complex_create_result is changed + - complex_create_result.spot_request is defined + - complex_create_result.spot_request.spot_instance_request_id is defined + - complex_create_result.spot_request.type == 'one-time' + - '"0.002" in complex_create_result.spot_request.spot_price' ## AWS pads trailing zeros on the spot price + - launch_spec.placement.availability_zone == availability_zone + - launch_spec.block_device_mappings|length == 1 + - launch_spec.block_device_mappings.0.ebs.delete_on_termination == true + - launch_spec.block_device_mappings.0.ebs.volume_type == 'gp3' + - launch_spec.block_device_mappings.0.ebs.volume_size == 5 + - launch_spec.network_interfaces|length == 1 + - launch_spec.network_interfaces.0.device_index == 0 + - launch_spec.network_interfaces.0.associate_public_ip_address == false + - launch_spec.network_interfaces.0.delete_on_termination == true + - spot_request_tags|length == 6 + - spot_request_tags['camelCase'] == 'helloWorld' + - spot_request_tags['PascalCase'] == 'HelloWorld' + - spot_request_tags['snake_case'] == 'hello_world' + - spot_request_tags['Title Case'] == 'Hello World' + - spot_request_tags['lowercase spaced'] == 'hello world' + vars: + launch_spec: '{{ complex_create_result.spot_request.launch_specification }}' + spot_request_tags: '{{ complex_create_result.spot_request.tags }}' + + - name: Get info about the complex spot instance request created + ec2_spot_instance_info: + spot_instance_request_ids: + - "{{ complex_create_result.spot_request.spot_instance_request_id }}" + register: complex_info_result + + - name: Assert that the complex spot request created is open/active and correct keys are set + assert: + that: + - complex_info_result.spot_request[0].state in ['open', 'active'] + - complex_create_result.spot_request.spot_price == complex_info_result.spot_request[0].spot_price + - create_launch_spec.block_device_mappings[0].ebs.volume_size == info_launch_spec.block_device_mappings[0].ebs.volume_size + - create_launch_spec.block_device_mappings[0].ebs.volume_type == info_launch_spec.block_device_mappings[0].ebs.volume_type + - create_launch_spec.network_interfaces[0].delete_on_termination == info_launch_spec.network_interfaces[0].delete_on_termination + vars: + create_launch_spec: "{{ complex_create_result.spot_request.launch_specification }}" + info_launch_spec: "{{ complex_info_result.spot_request[0].launch_specification }}" + + - name: Get info about the created spot instance requests and filter result based on provided filters + ec2_spot_instance_info: + spot_instance_request_ids: + - '{{ create_result.spot_request.spot_instance_request_id }}' + - '{{ complex_create_result.spot_request.spot_instance_request_id }}' + filters: + tag:ansible-test: "{{ resource_prefix }}" + launch.block-device-mapping.device-name: /dev/sdb + register: spot_instance_info_filter_result + + - name: Assert that the correct spot request was returned in the filtered result + assert: + that: + - spot_instance_info_filter_result.spot_request[0].spot_instance_request_id == complex_create_result.spot_request.spot_instance_request_id + + # Assert check mode + - name: Create spot instance request (check_mode) + ec2_spot_instance: + launch_specification: + image_id: "{{ ec2_ami_id }}" + key_name: "{{ resource_prefix }}-keypair" + instance_type: "t2.medium" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + tags: + ansible-test: "{{ resource_prefix }}" + check_mode: True + register: check_create_result + + - assert: + that: + - check_create_result is changed + + - name: Remove spot instance request (check_mode) + ec2_spot_instance: + spot_instance_request_ids: '{{ create_result.spot_request.spot_instance_request_id }}' + state: absent + check_mode: True + register: check_cancel_result + + - assert: + that: + - check_cancel_result is changed + + - name: Remove spot instance requests + ec2_spot_instance: + spot_instance_request_ids: + - '{{ create_result.spot_request.spot_instance_request_id }}' + - '{{ complex_create_result.spot_request.spot_instance_request_id }}' + state: absent + register: cancel_result + + - assert: + that: + - cancel_result is changed + - '"Cancelled Spot request" in cancel_result.msg' + + - name: Sometimes we run the next test before the EC2 API is fully updated from the previous task + pause: + seconds: 3 + + - name: Check no change if request is already cancelled (idempotency) + ec2_spot_instance: + spot_instance_request_ids: '{{ create_result.spot_request.spot_instance_request_id }}' + state: absent + register: cancel_request_again + + - assert: + that: + - cancel_request_again is not changed + - '"Spot request not found or already cancelled" in cancel_request_again.msg' + + - name: Gracefully try to remove non-existent request (NotFound) + ec2_spot_instance: + spot_instance_request_ids: + - sir-12345678 + state: absent + register: fake_cancel_result + + - assert: + that: + - fake_cancel_result is not changed + - '"Spot request not found or already cancelled" in fake_cancel_result.msg' + + always: + + # ============================================================ + - name: Delete spot instances + ec2_instance: + state: absent + filters: + vpc-id: "{{ vpc_result.vpc.id }}" + + - name: get all spot requests created during test + ec2_spot_instance_info: + filters: + tag:ansible-test: "{{ resource_prefix }}" + register: spot_request_list + + - name: remove spot instance requests + ec2_spot_instance: + spot_instance_request_ids: + - '{{ item.spot_instance_request_id }}' + state: 'absent' + ignore_errors: true + retries: 5 + with_items: "{{ spot_request_list.spot_request }}" + + - name: remove the security group + ec2_group: + name: "{{ resource_prefix }}-sg" + description: "{{ resource_prefix }}" + rules: [] + state: absent + vpc_id: "{{ vpc_result.vpc.id }}" + ignore_errors: true + retries: 5 + + - name: remove the subnet + ec2_vpc_subnet: + cidr: "{{ vpc_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: absent + ignore_errors: true + retries: 5 + when: vpc_subnet_result is defined + + - name: remove the VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + cidr_block: "{{ vpc_cidr }}" + state: absent + ignore_errors: true + retries: 5 + + - name: remove key pair by name + ec2_key: + name: "{{ resource_prefix }}-keypair" + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/terminate_associated_instances.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/terminate_associated_instances.yml new file mode 100644 index 000000000..92864baaf --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/terminate_associated_instances.yml @@ -0,0 +1,109 @@ +--- +- block: + + # Spot instance request creation + - name: Simple Spot Request Creation + amazon.aws.ec2_spot_instance: + launch_specification: + image_id: "{{ ec2_ami_id }}" + key_name: "{{ resource_prefix }}-keypair" + instance_type: "t2.micro" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + tags: + ansible-test: "{{ resource_prefix }}" + register: create_result + + # Get instance ID of associated spot instance request + - name: Get info about the spot instance request created + amazon.aws.ec2_spot_instance_info: + spot_instance_request_ids: + - "{{ create_result.spot_request.spot_instance_request_id }}" + register: spot_instance_info_result + retries: 5 + until: spot_instance_info_result.spot_request[0].instance_id is defined + + - name: Pause to allow instance launch + pause: + seconds: 60 + + - name: Get instance ID of the instance associated with above spot instance request + set_fact: + instance_id_1: "{{ spot_instance_info_result.spot_request[0].instance_id }}" + + - name: Check state of instance - BEFORE request cancellation + amazon.aws.ec2_instance_info: + instance_ids: ["{{ instance_id_1 }}"] + register: instance_info_result + + # Cancel spot instance request + - name: Spot Request Termination + amazon.aws.ec2_spot_instance: + spot_instance_request_ids: + - '{{ create_result.spot_request.spot_instance_request_id }}' + state: absent + + # Verify that instance is not terminated and still running + - name: Check state of instance - AFTER request cancellation + amazon.aws.ec2_instance_info: + instance_ids: ["{{ instance_id_1 }}"] + register: instance_info_result + + - assert: + that: instance_info_result.instances[0].state.name == 'running' + +#========================================================================== + + # Spot instance request creation + - name: Simple Spot Request Creation + amazon.aws.ec2_spot_instance: + launch_specification: + image_id: "{{ ec2_ami_id }}" + key_name: "{{ resource_prefix }}-keypair" + instance_type: "t2.micro" + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + tags: + ansible-test: "{{ resource_prefix }}" + register: create_result + + # Get instance ID of associated spot instance request + - name: Get info about the spot instance request created + amazon.aws.ec2_spot_instance_info: + spot_instance_request_ids: + - "{{ create_result.spot_request.spot_instance_request_id }}" + register: spot_instance_info_result + retries: 5 + until: spot_instance_info_result.spot_request[0].instance_id is defined + + - name: Pause to allow instance launch + pause: + seconds: 60 + + - name: Get instance ID of the instance associated with above spot instance request + set_fact: + instance_id_2: "{{ spot_instance_info_result.spot_request[0].instance_id }}" + + - name: Check state of instance - BEFORE request cancellation + amazon.aws.ec2_instance_info: + instance_ids: ["{{ instance_id_2 }}"] + register: instance_info_result + + # Cancel spot instance request + - name: Spot Request Termination + amazon.aws.ec2_spot_instance: + spot_instance_request_ids: + - '{{ create_result.spot_request.spot_instance_request_id }}' + state: absent + terminate_instances: true + + - name: wait for instance to terminate + pause: + seconds: 60 + + # Verify that instance is terminated or shutting-down + - name: Check state of instance - AFTER request cancellation + amazon.aws.ec2_instance_info: + instance_ids: ["{{ instance_id_2 }}"] + register: instance_info_result + + - assert: + that: instance_info_result.instances[0].state.name in ['terminated', 'shutting-down'] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/aliases new file mode 100644 index 000000000..326c8845b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/aliases @@ -0,0 +1,2 @@ +cloud/aws +ec2_tag_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/defaults/main.yml new file mode 100644 index 000000000..6aa39c736 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for test_ec2_tag diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/tasks/main.yml new file mode 100644 index 000000000..1f2ea62cd --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/tasks/main.yml @@ -0,0 +1,136 @@ +--- +# tasks file for test_ec2_tag +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: Create an EC2 volume so we have something to tag + ec2_vol: + name: "{{ resource_prefix }} ec2_tag volume" + volume_size: 1 + state: present + zone: "{{ aws_region }}a" + register: volume + + - name: List the tags on the volume (ec2_tag_info) + ec2_tag_info: + resource: "{{ volume.volume_id }}" + register: result_info + + - assert: + that: + - result_info.tags | length == 1 + - result_info.tags.Name == '{{ resource_prefix }} ec2_tag volume' + + - name: Set some new tags on the volume + ec2_tag: + resource: "{{ volume.volume_id }}" + state: present + tags: + foo: foo + bar: baz + baz: also baz + register: result + - name: List the new tags on the volume + ec2_tag_info: + resource: "{{ volume.volume_id }}" + register: result_info + + - assert: + that: + - result is changed + - result.tags | length == 4 + - result.added_tags | length == 3 + - result.tags.Name == '{{ resource_prefix }} ec2_tag volume' + - result.tags.foo == 'foo' + - result.tags.bar == 'baz' + - result.tags.baz == 'also baz' + - result_info.tags | length == 4 + - result_info.tags.Name == '{{ resource_prefix }} ec2_tag volume' + - result_info.tags.foo == 'foo' + - result_info.tags.bar == 'baz' + - result_info.tags.baz == 'also baz' + + - name: Remove a tag by name + ec2_tag: + resource: "{{ volume.volume_id }}" + state: absent + tags: + baz: + register: result + + - assert: + that: + - result is changed + - result.removed_tags | length == 1 + - "'baz' in result.removed_tags" + + - name: Don't remove a tag + ec2_tag: + resource: "{{ volume.volume_id }}" + state: absent + tags: + foo: baz + register: result + + - assert: + that: + - result is not changed + + - name: Remove a tag + ec2_tag: + resource: "{{ volume.volume_id }}" + state: absent + tags: + foo: foo + register: result + + - assert: + that: + - result is changed + - result.tags | length == 2 + - "'added_tags' not in result" + - result.removed_tags | length == 1 + - result.tags.Name == '{{ resource_prefix }} ec2_tag volume' + - result.tags.bar == 'baz' + + - name: Set an exclusive tag + ec2_tag: + resource: "{{ volume.volume_id }}" + purge_tags: true + tags: + baz: quux + register: result + + - assert: + that: + - result is changed + - result.tags | length == 1 + - result.added_tags | length == 1 + - result.removed_tags | length == 2 + - result.tags.baz == 'quux' + + - name: Remove all tags + ec2_tag: + resource: "{{ volume.volume_id }}" + purge_tags: true + tags: {} + register: result + + - assert: + that: + - result is changed + - result.tags | length == 0 + + always: + - name: Remove the volume + ec2_vol: + id: "{{ volume.volume_id }}" + state: absent + register: result + until: result is not failed + ignore_errors: yes + retries: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/vars/main.yml new file mode 100644 index 000000000..c2d0654ae --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for test_ec2_tag diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/aliases new file mode 100644 index 000000000..f348f79e6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/aliases @@ -0,0 +1,4 @@ +time=10m + +cloud/aws +ec2_vol_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/defaults/main.yml new file mode 100644 index 000000000..ae86815c5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/defaults/main.yml @@ -0,0 +1,8 @@ +availability_zone: '{{ ec2_availability_zone_names[0] }}' + +vpc_name: '{{ resource_prefix }}-vpc' +vpc_seed: '{{ resource_prefix }}' +vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16' +subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24' + +instance_name: '{{ resource_prefix }}-instance' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/meta/main.yml new file mode 100644 index 000000000..2bff8543a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: +- role: setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml new file mode 100644 index 000000000..0b77b1571 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml @@ -0,0 +1,1002 @@ +--- +- module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key | default(omit) }}' + aws_secret_key: '{{ aws_secret_key | default(omit) }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region | default(omit) }}' + + collections: + - amazon.aws + - community.aws + + block: + + - name: Create a test VPC + ec2_vpc_net: + name: "{{ vpc_name }}" + cidr_block: "{{ vpc_cidr }}" + tags: + Name: ec2_vol testing + ResourcePrefix: "{{ resource_prefix }}" + register: testing_vpc + + - name: Create a test subnet + ec2_vpc_subnet: + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" + tags: + Name: ec2_vol testing + ResourcePrefix: "{{ resource_prefix }}" + az: '{{ availability_zone }}' + register: testing_subnet + + - name: create an ec2 instance + ec2_instance: + name: "{{ instance_name }}" + vpc_subnet_id: "{{ testing_subnet.subnet.id }}" + instance_type: t3.nano + image_id: "{{ ec2_ami_id }}" + tags: + ResourcePrefix: "{{ resource_prefix }}" + register: test_instance + + - name: check task return attributes + assert: + that: + - test_instance.changed + + - name: create another ec2 instance + ec2_instance: + name: "{{ instance_name }}-2" + vpc_subnet_id: "{{ testing_subnet.subnet.id }}" + instance_type: t3.nano + image_id: "{{ ec2_ami_id }}" + tags: + ResourcePrefix: "{{ resource_prefix }}" + register: test_instance_2 + + - name: check task return attributes + assert: + that: + - test_instance_2.changed + + - name: create another ec2 instance + ec2_instance: + name: "{{ instance_name }}-3" + vpc_subnet_id: "{{ testing_subnet.subnet.id }}" + instance_type: t3.nano + image_id: "{{ ec2_ami_id }}" + tags: + ResourcePrefix: "{{ resource_prefix }}" + register: test_instance_3 + + - name: check task return attributes + assert: + that: + - test_instance_3.changed + + # # ==== ec2_vol tests =============================================== + + - name: create a volume (validate module defaults - check_mode) + ec2_vol: + volume_size: 1 + zone: "{{ availability_zone }}" + tags: + ResourcePrefix: "{{ resource_prefix }}" + check_mode: true + register: volume1_check_mode + + - assert: + that: + - volume1_check_mode is changed + + + - name: create a volume (validate module defaults) + ec2_vol: + volume_size: 1 + zone: "{{ availability_zone }}" + tags: + ResourcePrefix: "{{ resource_prefix }}" + register: volume1 + + - name: check task return attributes + assert: + that: + - volume1.changed + - "'volume' in volume1" + - "'volume_id' in volume1" + - "'volume_type' in volume1" + - "'device' in volume1" + - volume1.volume.status == 'available' + - volume1.volume_type == 'standard' + - "'attachment_set' in volume1.volume" + - volume1.volume.attachment_set | length == 0 + - not ("Name" in volume1.volume.tags) + - not volume1.volume.encrypted + - volume1.volume.tags.ResourcePrefix == "{{ resource_prefix }}" + + # no idempotency check needed here + + - name: create another volume (override module defaults) + ec2_vol: + encrypted: yes + volume_size: 4 + volume_type: io1 + iops: 101 + name: "{{ resource_prefix }}" + tags: + ResourcePrefix: "{{ resource_prefix }}" + zone: "{{ availability_zone }}" + register: volume2 + + - name: check task return attributes + assert: + that: + - volume2.changed + - "'volume' in volume2" + - "'volume_id' in volume2" + - "'volume_type' in volume2" + - "'device' in volume2" + - volume2.volume.status == 'available' + - volume2.volume_type == 'io1' + - volume2.volume.iops == 101 + - volume2.volume.size == 4 + - volume2.volume.tags.Name == "{{ resource_prefix }}" + - volume2.volume.encrypted + - volume2.volume.tags.ResourcePrefix == "{{ resource_prefix }}" + + - name: create another volume (override module defaults) (idempotent) + ec2_vol: + encrypted: yes + volume_size: 4 + volume_type: io1 + iops: 101 + name: "{{ resource_prefix }}" + tags: + ResourcePrefix: "{{ resource_prefix }}" + zone: "{{ availability_zone }}" + register: volume2_idem + + - name: check task return attributes + assert: + that: + - not volume2_idem.changed + + - name: create snapshot from volume + ec2_snapshot: + volume_id: "{{ volume1.volume_id }}" + description: "Resource Prefix - {{ resource_prefix }}" + snapshot_tags: + ResourcePrefix: "{{ resource_prefix }}" + register: vol1_snapshot + + - name: check task return attributes + assert: + that: + - vol1_snapshot.changed + + - name: create a volume from a snapshot (check_mode) + ec2_vol: + snapshot: "{{ vol1_snapshot.snapshot_id }}" + encrypted: yes + volume_type: gp2 + volume_size: 1 + zone: "{{ availability_zone }}" + tags: + ResourcePrefix: "{{ resource_prefix }}" + check_mode: true + register: volume3_check_mode + + - name: check task return attributes + assert: + that: + - volume3_check_mode.changed + + - name: create a volume from a snapshot + ec2_vol: + snapshot: "{{ vol1_snapshot.snapshot_id }}" + encrypted: yes + volume_type: gp2 + volume_size: 1 + zone: "{{ availability_zone }}" + tags: + ResourcePrefix: "{{ resource_prefix }}" + register: volume3 + + - name: check task return attributes + assert: + that: + - volume3.changed + - "volume3.volume.snapshot_id == vol1_snapshot.snapshot_id" + + - name: Wait for instance to start + ec2_instance: + state: running + instance_ids: "{{ test_instance.instance_ids }}" + wait: True + + - name: attach existing volume to an instance (check_mode) + ec2_vol: + id: "{{ volume1.volume_id }}" + instance: "{{ test_instance.instance_ids[0] }}" + device_name: /dev/sdg + delete_on_termination: no + check_mode: true + register: vol_attach_result_check_mode + + - assert: + that: + - vol_attach_result_check_mode is changed + + - name: attach existing volume to an instance + ec2_vol: + id: "{{ volume1.volume_id }}" + instance: "{{ test_instance.instance_ids[0] }}" + device_name: /dev/sdg + delete_on_termination: no + register: vol_attach_result + + - name: check task return attributes + assert: + that: + - vol_attach_result.changed + - "'device' in vol_attach_result and vol_attach_result.device == '/dev/sdg'" + - "'volume' in vol_attach_result" + + # There's a delay between the volume being "In Use", and the attachment being reported. This + # can result in a race condition on the results. (There's no clean waiter to use either) + - name: wait for volume to report attached/attaching + ec2_vol_info: + filters: + volume-id: '{{ volume1.volume_id }}' + register: vol_attach_info + until: + - vol_attach_info.volumes[0].attachment_set | length >=1 + retries: 5 + delay: 2 + + - assert: + that: + - vol_attach_info.volumes[0].attachment_set[0].status in ['attached', 'attaching'] + - vol_attach_info.volumes[0].attachment_set[0].instance_id == test_instance.instance_ids[0] + - vol_attach_info.volumes[0].attachment_set[0].device == '/dev/sdg' + - not vol_attach_info.volumes[0].attachment_set[0].delete_on_termination + + - name: attach existing volume to an instance (idempotent - check_mode) + ec2_vol: + id: "{{ volume1.volume_id }}" + instance: "{{ test_instance.instance_ids[0] }}" + device_name: /dev/sdg + delete_on_termination: no + check_mode: true + register: vol_attach_result_check_mode + + - assert: + that: + - vol_attach_result_check_mode is not changed + + - name: attach existing volume to an instance (idempotent) + ec2_vol: + id: "{{ volume1.volume_id }}" + instance: "{{ test_instance.instance_ids[0] }}" + device_name: /dev/sdg + delete_on_termination: no + register: vol_attach_result + + - name: check task return attributes + assert: + that: + - "not vol_attach_result.changed" + - vol_attach_result.volume.attachment_set[0].status in ['attached', 'attaching'] + + - name: attach a new volume to an instance (check_mode) + ec2_vol: + instance: "{{ test_instance.instance_ids[0] }}" + device_name: /dev/sdh + volume_size: 1 + volume_type: gp2 + name: '{{ resource_prefix }} - sdh' + tags: + "lowercase spaced": 'hello cruel world' + "Title Case": 'Hello Cruel World' + CamelCase: 'SimpleCamelCase' + snake_case: 'simple_snake_case' + ResourcePrefix: "{{ resource_prefix }}" + check_mode: true + register: new_vol_attach_result_check_mode + + - assert: + that: + - new_vol_attach_result_check_mode is changed + + - name: attach a new volume to an instance + ec2_vol: + instance: "{{ test_instance.instance_ids[0] }}" + device_name: /dev/sdh + volume_size: 1 + volume_type: standard + name: '{{ resource_prefix }} - sdh' + tags: + "lowercase spaced": 'hello cruel world' + "Title Case": 'Hello Cruel World' + CamelCase: 'SimpleCamelCase' + snake_case: 'simple_snake_case' + ResourcePrefix: "{{ resource_prefix }}" + register: new_vol_attach_result + + - name: check task return attributes + assert: + that: + - new_vol_attach_result.changed + - "'device' in new_vol_attach_result and new_vol_attach_result.device == '/dev/sdh'" + - "'volume' in new_vol_attach_result" + - new_vol_attach_result.volume.attachment_set[0].status in ['attached', 'attaching'] + - new_vol_attach_result.volume.attachment_set[0].instance_id == test_instance.instance_ids[0] + - new_vol_attach_result.volume.attachment_set[0].device == '/dev/sdh' + - new_vol_attach_result.volume.tags["lowercase spaced"] == 'hello cruel world' + - new_vol_attach_result.volume.tags["Title Case"] == 'Hello Cruel World' + - new_vol_attach_result.volume.tags["CamelCase"] == 'SimpleCamelCase' + - new_vol_attach_result.volume.tags["snake_case"] == 'simple_snake_case' + - new_vol_attach_result.volume.tags["Name"] == '{{ resource_prefix }} - sdh' + + - name: attach a new volume to an instance (idempotent - check_mode) + ec2_vol: + instance: "{{ test_instance.instance_ids[0] }}" + device_name: /dev/sdh + volume_size: 1 + volume_type: standard + tags: + ResourcePrefix: "{{ resource_prefix }}" + check_mode: true + register: new_vol_attach_result_idem_check_mode + ignore_errors: true + + - assert: + that: + - new_vol_attach_result_idem_check_mode is not changed + + - name: attach a new volume to an instance (idempotent) + ec2_vol: + instance: "{{ test_instance.instance_ids[0] }}" + device_name: /dev/sdh + volume_size: 1 + volume_type: standard + tags: + ResourcePrefix: "{{ resource_prefix }}" + register: new_vol_attach_result_idem + ignore_errors: true + + - name: check task return attributes + assert: + that: + - "not new_vol_attach_result_idem.changed" + - "'Volume mapping for /dev/sdh already exists' in new_vol_attach_result_idem.msg" + + - name: change some tag values + ec2_vol: + instance: "{{ test_instance.instance_ids[0] }}" + id: "{{ new_vol_attach_result.volume.id }}" + device_name: /dev/sdh + volume_size: 1 + volume_type: standard + tags: + "lowercase spaced": 'hello cruel world ❤️' + "Title Case": 'Hello Cruel World ❤️' + CamelCase: 'SimpleCamelCase ❤️' + snake_case: 'simple_snake_case ❤️' + purge_tags: false + register: new_vol_attach_result + + - name: check task return attributes + assert: + that: + - new_vol_attach_result.changed + - "'volume_id' in new_vol_attach_result" + - new_vol_attach_result.volume_id == "{{ new_vol_attach_result.volume_id }}" + - "'attachment_set' in new_vol_attach_result.volume" + - "'create_time' in new_vol_attach_result.volume" + - "'id' in new_vol_attach_result.volume" + - "'size' in new_vol_attach_result.volume" + - new_vol_attach_result.volume.size == 1 + - "'volume_type' in new_vol_attach_result" + - new_vol_attach_result.volume_type == 'standard' + - "'tags' in new_vol_attach_result.volume" + - (new_vol_attach_result.volume.tags | length) == 6 + - new_vol_attach_result.volume.tags["lowercase spaced"] == 'hello cruel world ❤️' + - new_vol_attach_result.volume.tags["Title Case"] == 'Hello Cruel World ❤️' + - new_vol_attach_result.volume.tags["CamelCase"] == 'SimpleCamelCase ❤️' + - new_vol_attach_result.volume.tags["snake_case"] == 'simple_snake_case ❤️' + - new_vol_attach_result.volume.tags["ResourcePrefix"] == resource_prefix + - new_vol_attach_result.volume.tags["Name"] == '{{ resource_prefix }} - sdh' + + - name: change some tag values + ec2_vol: + instance: "{{ test_instance.instance_ids[0] }}" + id: "{{ new_vol_attach_result.volume.id }}" + device_name: /dev/sdh + volume_size: 1 + volume_type: standard + tags: + "lowercase spaced": 'hello cruel world ❤️' + "Title Case": 'Hello Cruel World ❤️' + snake_case: 'simple_snake_case ❤️' + ResourcePrefix: "{{ resource_prefix }}" + purge_tags: true + register: new_vol_attach_result + + - name: check task return attributes + assert: + that: + - new_vol_attach_result.changed + - "'volume_id' in new_vol_attach_result" + - new_vol_attach_result.volume_id == "{{ new_vol_attach_result.volume_id }}" + - "'attachment_set' in new_vol_attach_result.volume" + - "'create_time' in new_vol_attach_result.volume" + - "'id' in new_vol_attach_result.volume" + - "'size' in new_vol_attach_result.volume" + - new_vol_attach_result.volume.size == 1 + - "'volume_type' in new_vol_attach_result" + - new_vol_attach_result.volume_type == 'standard' + - "'tags' in new_vol_attach_result.volume" + - (new_vol_attach_result.volume.tags | length) == 4 + - new_vol_attach_result.volume.tags["lowercase spaced"] == 'hello cruel world ❤️' + - new_vol_attach_result.volume.tags["Title Case"] == 'Hello Cruel World ❤️' + - new_vol_attach_result.volume.tags["snake_case"] == 'simple_snake_case ❤️' + - new_vol_attach_result.volume.tags["ResourcePrefix"] == resource_prefix + + - name: create a volume from a snapshot and attach to the instance (check_mode) + ec2_vol: + instance: "{{ test_instance.instance_ids[0] }}" + device_name: /dev/sdi + snapshot: "{{ vol1_snapshot.snapshot_id }}" + tags: + ResourcePrefix: "{{ resource_prefix }}" + check_mode: true + register: attach_new_vol_from_snapshot_result_check_mode + + - assert: + that: + - attach_new_vol_from_snapshot_result_check_mode is changed + + + - name: create a volume from a snapshot and attach to the instance + ec2_vol: + instance: "{{ test_instance.instance_ids[0] }}" + device_name: /dev/sdi + snapshot: "{{ vol1_snapshot.snapshot_id }}" + tags: + ResourcePrefix: "{{ resource_prefix }}" + register: attach_new_vol_from_snapshot_result + + - name: check task return attributes + assert: + that: + - attach_new_vol_from_snapshot_result.changed + - "'device' in attach_new_vol_from_snapshot_result and attach_new_vol_from_snapshot_result.device == '/dev/sdi'" + - "'volume' in attach_new_vol_from_snapshot_result" + - attach_new_vol_from_snapshot_result.volume.attachment_set[0].status in ['attached', 'attaching'] + - attach_new_vol_from_snapshot_result.volume.attachment_set[0].instance_id == test_instance.instance_ids[0] + + - name: get info on ebs volumes + ec2_vol_info: + register: ec2_vol_info + + - name: check task return attributes + assert: + that: + - not ec2_vol_info.failed + + - name: get info on ebs volumes + ec2_vol_info: + filters: + attachment.instance-id: "{{ test_instance.instance_ids[0] }}" + register: ec2_vol_info + + - name: check task return attributes + assert: + that: + - ec2_vol_info.volumes | length == 4 + + - name: must not change because of missing parameter modify_volume + ec2_vol: + id: "{{ new_vol_attach_result.volume_id }}" + zone: "{{ availability_zone }}" + volume_type: gp3 + register: changed_gp3_volume + + - name: volume must not changed + assert: + that: + - not changed_gp3_volume.changed + + - name: change existing volume to gp3 (check_mode) + ec2_vol: + id: "{{ new_vol_attach_result.volume_id }}" + zone: "{{ availability_zone }}" + volume_type: gp3 + modify_volume: yes + check_mode: true + register: changed_gp3_volume_check_mode + + - assert: + that: + - changed_gp3_volume_check_mode is changed + + - name: change existing volume to gp3 + ec2_vol: + id: "{{ new_vol_attach_result.volume_id }}" + zone: "{{ availability_zone }}" + volume_type: gp3 + modify_volume: yes + register: changed_gp3_volume + + - name: check that volume_type has changed + assert: + that: + - changed_gp3_volume.changed + - "'volume_id' in changed_gp3_volume" + - changed_gp3_volume.volume_id == "{{ new_vol_attach_result.volume_id }}" + - "'attachment_set' in changed_gp3_volume.volume" + - "'create_time' in changed_gp3_volume.volume" + - "'id' in changed_gp3_volume.volume" + - "'size' in changed_gp3_volume.volume" + - "'volume_type' in changed_gp3_volume" + - changed_gp3_volume.volume_type == 'gp3' + - "'iops' in changed_gp3_volume.volume" + - changed_gp3_volume.volume.iops == 3000 + # Ensure our tags are still here + - "'tags' in changed_gp3_volume.volume" + - (changed_gp3_volume.volume.tags | length) == 4 + - new_vol_attach_result.volume.tags["lowercase spaced"] == 'hello cruel world ❤️' + - new_vol_attach_result.volume.tags["Title Case"] == 'Hello Cruel World ❤️' + - new_vol_attach_result.volume.tags["snake_case"] == 'simple_snake_case ❤️' + - new_vol_attach_result.volume.tags["ResourcePrefix"] == resource_prefix + + - name: volume must be from type gp3 (idempotent) + ec2_vol: + id: "{{ new_vol_attach_result.volume_id }}" + zone: "{{ availability_zone }}" + volume_type: gp3 + modify_volume: yes + register: changed_gp3_volume + retries: 10 + delay: 3 + until: not changed_gp3_volume.failed + # retry because ebs change is to slow + + - name: must not changed (idempotent) + assert: + that: + - not changed_gp3_volume.changed + - "'volume_id' in changed_gp3_volume" + - changed_gp3_volume.volume_id == "{{ new_vol_attach_result.volume_id }}" + - "'attachment_set' in changed_gp3_volume.volume" + - "'create_time' in changed_gp3_volume.volume" + - "'id' in changed_gp3_volume.volume" + - "'size' in changed_gp3_volume.volume" + - "'volume_type' in changed_gp3_volume" + - changed_gp3_volume.volume_type == 'gp3' + - "'iops' in changed_gp3_volume.volume" + - changed_gp3_volume.volume.iops == 3000 + - "'throughput' in changed_gp3_volume.volume" + - "'tags' in changed_gp3_volume.volume" + - (changed_gp3_volume.volume.tags | length) == 4 + - new_vol_attach_result.volume.tags["lowercase spaced"] == 'hello cruel world ❤️' + - new_vol_attach_result.volume.tags["Title Case"] == 'Hello Cruel World ❤️' + - new_vol_attach_result.volume.tags["snake_case"] == 'simple_snake_case ❤️' + - new_vol_attach_result.volume.tags["ResourcePrefix"] == resource_prefix + + - name: re-read volume information to validate new volume_type + ec2_vol_info: + filters: + volume-id: "{{ changed_gp3_volume.volume_id }}" + register: verify_gp3_change + + - name: volume type must be gp3 + assert: + that: + - v.type == 'gp3' + vars: + v: "{{ verify_gp3_change.volumes[0] }}" + + - name: detach volume from the instance (check_mode) + ec2_vol: + id: "{{ new_vol_attach_result.volume_id }}" + instance: "" + check_mode: true + register: new_vol_attach_result_check_mode + + - assert: + that: + - new_vol_attach_result_check_mode is changed + + - name: detach volume from the instance + ec2_vol: + id: "{{ new_vol_attach_result.volume_id }}" + instance: "" + register: new_vol_attach_result + + - name: check task return attributes + assert: + that: + - new_vol_attach_result.changed + - new_vol_attach_result.volume.status == 'available' + + - name: detach volume from the instance (idempotent - check_mode) + ec2_vol: + id: "{{ new_vol_attach_result.volume_id }}" + instance: "" + register: new_vol_attach_result_idem_check_mode + + - name: check task return attributes + assert: + that: + - not new_vol_attach_result_idem_check_mode.changed + + - name: detach volume from the instance (idempotent) + ec2_vol: + id: "{{ new_vol_attach_result.volume_id }}" + instance: "" + register: new_vol_attach_result_idem + + - name: check task return attributes + assert: + that: + - not new_vol_attach_result_idem.changed + + - name: delete volume (check_mode) + ec2_vol: + id: "{{ volume2.volume_id }}" + state: absent + check_mode: true + register: delete_volume_result_check_mode + + - assert: + that: + - delete_volume_result_check_mode is changed + + - name: delete volume + ec2_vol: + id: "{{ volume2.volume_id }}" + state: absent + register: delete_volume_result + + - name: check task return attributes + assert: + that: + - "delete_volume_result.changed" + + - name: delete volume (idempotent - check_mode) + ec2_vol: + id: "{{ volume2.volume_id }}" + state: absent + check_mode: true + register: delete_volume_result_check_mode + + - assert: + that: + - delete_volume_result_check_mode is not changed + + - name: delete volume (idempotent) + ec2_vol: + id: "{{ volume2.volume_id }}" + state: absent + register: delete_volume_result_idem + + - name: check task return attributes + assert: + that: + - not delete_volume_result_idem.changed + - '"Volume {{ volume2.volume_id }} does not exist" in delete_volume_result_idem.msg' + + # Originally from ec2_vol_info + + - name: Create test volume with Destroy on Terminate + ec2_vol: + instance: "{{ test_instance.instance_ids[0] }}" + volume_size: 4 + name: "{{ resource_prefix }}_delete_on_terminate" + device_name: /dev/sdj + volume_type: io1 + iops: 100 + tags: + Tag Name with Space-and-dash: Tag Value with Space-and-dash + delete_on_termination: yes + register: dot_volume + + - name: check task return attributes + assert: + that: + - dot_volume.changed + - "'attachment_set' in dot_volume.volume" + - "'delete_on_termination' in dot_volume.volume.attachment_set[0]" + - "'create_time' in dot_volume.volume" + - "'id' in dot_volume.volume" + - "'size' in dot_volume.volume" + - dot_volume.volume.size == 4 + - "'volume_type' in dot_volume" + - dot_volume.volume_type == 'io1' + - "'iops' in dot_volume.volume" + - dot_volume.volume.iops == 100 + - "'tags' in dot_volume.volume" + - (dot_volume.volume.tags | length ) == 2 + - dot_volume.volume.tags["Name"] == "{{ resource_prefix }}_delete_on_terminate" + - dot_volume.volume.tags["Tag Name with Space-and-dash"] == 'Tag Value with Space-and-dash' + + - name: Gather volume info without any filters + ec2_vol_info: + register: volume_info_wo_filters + check_mode: no + + - name: Check if info are returned without filters + assert: + that: + - "volume_info_wo_filters.volumes is defined" + + - name: Gather volume info + ec2_vol_info: + filters: + "tag:Name": "{{ resource_prefix }}_delete_on_terminate" + register: volume_info + check_mode: no + + - name: Format check + assert: + that: + - "volume_info.volumes|length == 1" + - "v.attachment_set[0].attach_time is defined" + - "v.attachment_set[0].device is defined and v.attachment_set[0].device == dot_volume.device" + - "v.attachment_set[0].instance_id is defined and v.attachment_set[0].instance_id == test_instance.instance_ids[0]" + - "v.attachment_set[0].status is defined and v.attachment_set[0].status == 'attached'" + - "v.create_time is defined" + - "v.encrypted is defined and v.encrypted == false" + - "v.id is defined and v.id == dot_volume.volume_id" + - "v.iops is defined and v.iops == 100" + - "v.region is defined and v.region == aws_region" + - "v.size is defined and v.size == 4" + - "v.snapshot_id is defined and v.snapshot_id == ''" + - "v.status is defined and v.status == 'in-use'" + - "v.tags.Name is defined and v.tags.Name == resource_prefix + '_delete_on_terminate'" + - "v.tags['Tag Name with Space-and-dash'] == 'Tag Value with Space-and-dash'" + - "v.type is defined and v.type == 'io1'" + - "v.zone is defined and v.zone == test_instance.instances[0].placement.availability_zone" + vars: + v: "{{ volume_info.volumes[0] }}" + + - name: New format check + assert: + that: + - "v.attachment_set[0].delete_on_termination is defined" + vars: + v: "{{ volume_info.volumes[0] }}" + when: ansible_version.full is version('2.7', '>=') + + - name: test create a new gp3 volume + ec2_vol: + volume_size: 70 + zone: "{{ availability_zone }}" + volume_type: gp3 + throughput: 130 + iops: 3001 + name: "GP3-TEST-{{ resource_prefix }}" + tags: + ResourcePrefix: "{{ resource_prefix }}" + register: gp3_volume + + - name: check that volume_type is gp3 + assert: + that: + - gp3_volume.changed + - "'attachment_set' in gp3_volume.volume" + - "'create_time' in gp3_volume.volume" + - "'id' in gp3_volume.volume" + - "'size' in gp3_volume.volume" + - gp3_volume.volume.size == 70 + - "'volume_type' in gp3_volume" + - gp3_volume.volume_type == 'gp3' + - "'iops' in gp3_volume.volume" + - gp3_volume.volume.iops == 3001 + - "'throughput' in gp3_volume.volume" + - gp3_volume.volume.throughput == 130 + - "'tags' in gp3_volume.volume" + - (gp3_volume.volume.tags | length ) == 2 + - gp3_volume.volume.tags["ResourcePrefix"] == "{{ resource_prefix }}" + + - name: Read volume information to validate throughput + ec2_vol_info: + filters: + volume-id: "{{ gp3_volume.volume_id }}" + register: verify_throughput + + - name: throughput must be equal to 130 + assert: + that: + - v.throughput == 130 + vars: + v: "{{ verify_throughput.volumes[0] }}" + + - name: print out facts + debug: + var: vol_facts + + - name: Read volume information to validate throughput + ec2_vol_info: + filters: + volume-id: "{{ gp3_volume.volume_id }}" + register: verify_throughput + + - name: throughput must be equal to 130 + assert: + that: + - v.throughput == 130 + vars: + v: "{{ verify_throughput.volumes[0] }}" + + - name: print out facts + debug: + var: vol_facts + + - name: increase throughput + ec2_vol: + volume_size: 70 + zone: "{{ availability_zone }}" + volume_type: gp3 + throughput: 131 + modify_volume: yes + name: "GP3-TEST-{{ resource_prefix }}" + tags: + ResourcePrefix: "{{ resource_prefix }}" + register: gp3_volume + + - name: check that throughput has changed + assert: + that: + - gp3_volume.changed + - "'create_time' in gp3_volume.volume" + - "'id' in gp3_volume.volume" + - "'size' in gp3_volume.volume" + - gp3_volume.volume.size == 70 + - "'volume_type' in gp3_volume" + - gp3_volume.volume_type == 'gp3' + - "'iops' in gp3_volume.volume" + - gp3_volume.volume.iops == 3001 + - "'throughput' in gp3_volume.volume" + - gp3_volume.volume.throughput == 131 + + # Multi-Attach disk + - name: create disk with multi-attach enabled + ec2_vol: + volume_size: 4 + volume_type: io1 + iops: 102 + zone: "{{ availability_zone }}" + multi_attach: yes + tags: + ResourcePrefix: "{{ resource_prefix }}" + register: multi_attach_disk + + - name: check volume creation + assert: + that: + - multi_attach_disk.changed + - "'volume' in multi_attach_disk" + - multi_attach_disk.volume.multi_attach_enabled + + - name: attach existing volume to an instance + ec2_vol: + id: "{{ multi_attach_disk.volume_id }}" + instance: "{{ test_instance.instance_ids[0] }}" + device_name: /dev/sdk + delete_on_termination: no + register: vol_attach_result + + - name: Wait for instance to start + ec2_instance: + state: running + instance_ids: "{{ test_instance_2.instance_ids }}" + wait: True + + - name: attach existing volume to second instance + ec2_vol: + id: "{{ multi_attach_disk.volume_id }}" + instance: "{{ test_instance_2.instance_ids[0] }}" + device_name: /dev/sdg + delete_on_termination: no + register: vol_attach_result + + - name: check task return attributes + assert: + that: + - vol_attach_result.changed + - "'volume' in vol_attach_result" + - vol_attach_result.volume.attachment_set | length == 2 + - 'test_instance.instance_ids[0] in vol_attach_result.volume.attachment_set | map(attribute="instance_id") | list' + - 'test_instance_2.instance_ids[0] in vol_attach_result.volume.attachment_set | map(attribute="instance_id") | list' + + - name: create a volume without tags + ec2_vol: + volume_size: 5 + zone: "{{ availability_zone }}" + instance: "{{ test_instance_3.instance_ids[0] }}" + register: volume_without_tag + + - assert: + that: + - volume_without_tag.changed + + # idempotency check without tags + - name: create a volume without tags (idempotency check) + ec2_vol: + volume_size: 5 + zone: "{{ availability_zone }}" + instance: "{{ test_instance_3.instance_ids[0] }}" + register: volume_without_tag + + - assert: + that: + - not volume_without_tag.changed + # ==== Cleanup ============================================================ + + always: + - name: Describe the instance before we delete it + ec2_instance_info: + instance_ids: + - "{{ item }}" + ignore_errors: yes + with_items: + - "{{ test_instance.instance_ids[0] }}" + - "{{ test_instance_2.instance_ids[0] }}" + - "{{ test_instance_3.instance_ids[0] }}" + register: pre_delete + + - debug: + var: pre_delete + + - name: delete test instance + ec2_instance: + instance_ids: + - "{{ item }}" + state: terminated + wait: True + with_items: + - "{{ test_instance.instance_ids[0] }}" + - "{{ test_instance_2.instance_ids[0] }}" + - "{{ test_instance_3.instance_ids[0] }}" + ignore_errors: yes + + - name: delete volumes + ec2_vol: + id: "{{ item.volume_id }}" + state: absent + ignore_errors: yes + with_items: + - "{{ volume1 }}" + - "{{ volume2 }}" + - "{{ volume3 }}" + - "{{ new_vol_attach_result }}" + - "{{ attach_new_vol_from_snapshot_result }}" + - "{{ dot_volume }}" + - "{{ gp3_volume }}" + - "{{ multi_attach_disk }}" + - "{{ volume_without_tag }}" + + - name: delete snapshot + ec2_snapshot: + snapshot_id: "{{ vol1_snapshot.snapshot_id }}" + state: absent + ignore_errors: yes + + - name: delete test subnet + ec2_vpc_subnet: + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" + state: absent + ignore_errors: yes + + - name: delete test VPC + ec2_vpc_net: + name: "{{ vpc_name }}" + cidr_block: "{{ vpc_cidr }}" + state: absent + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml new file mode 100644 index 000000000..26403c17d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# defaults file for ec2_dhcp_option_info tests +vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24' +# default option sets get an AWS domain_name, which is different in us-east-1 +aws_domain_name: "{{ (aws_region == 'us-east-1') | ternary('ec2.internal', aws_region + '.compute.internal') }}" \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml new file mode 100644 index 000000000..5441e4f7f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml @@ -0,0 +1,948 @@ +--- +# ============================================================ +# Known issues: +# +# there is no way to associate the `default` option set in the module +# The module doesn't store/return tags in the new_options dictionary +# always reassociated (changed=True) when vpc_id is provided without options +# +# ============================================================ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default('') }}" + region: "{{ aws_region }}" + + block: + + # DHCP option set can be attached to multiple VPCs, we don't want to use any that + # don't belong to this test run + - name: find all DHCP option sets that already exist before running tests + ec2_vpc_dhcp_option_info: + register: result + + - set_fact: + preexisting_option_sets: "{{ result.dhcp_options | map(attribute='dhcp_options_id') | list }}" + + - name: create a VPC with a default DHCP option set to test inheritance and delete_old + ec2_vpc_net: + name: "{{ resource_prefix }}" + cidr_block: "{{ vpc_cidr }}" + state: present + register: vpc + + - name: ensure a DHCP option set is attached to the VPC + assert: + that: + - vpc.vpc.dhcp_options_id is defined + + - set_fact: + vpc_id: "{{ vpc.vpc.id }}" + default_options_id: "{{ vpc.vpc.dhcp_options_id }}" + +## ============================================ + - name: Option Sets can be attached to multiple VPCs, create a new one if the test VPC is reusing a pre-existing one + when: vpc.vpc.dhcp_options_id in preexisting_option_sets + block: + - name: Create the new option set + ec2_vpc_dhcp_option: + state: present + domain_name: "{{ aws_domain_name }}" + dns_servers: + - AmazonProvidedDNS + delete_old: True + tags: + Name: "{{ resource_prefix }}" + register: new_dhcp_options + + - assert: + that: + - new_dhcp_options.dhcp_options_id not in preexisting_option_sets + + - name: Attach the new option set to the VPC + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + purge_tags: False + dhcp_options_id: "{{ new_dhcp_options.dhcp_options_id }}" +## ============================================ + + - name: find the VPC's associated option set + ec2_vpc_net_info: + vpc_ids: "{{ vpc_id }}" + register: vpc_info + + - set_fact: + original_dhcp_options_id: "{{ vpc_info.vpcs[0].dhcp_options_id }}" + + - name: get information about the DHCP option + ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ original_dhcp_options_id }}"] + register: original_dhcp_options_info + + - set_fact: + original_config: "{{ original_dhcp_options_info.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}" + + - assert: + that: + - original_dhcp_options_info.dhcp_options | length == 1 + - original_config.keys() | list | sort == ['domain-name', 'domain-name-servers'] + - original_config['domain-name'][0]['value'] == '{{ aws_domain_name }}' + - original_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS' + - original_dhcp_options_id not in preexisting_option_sets + +## ============================================ + + # FIXME: always reassociated to lowest alphanum dhcp_options_id when vpc_id is provided without options, + # This task will return an unpredictable dhcp_option_id so we can't assert anything about the option's values + - name: test a DHCP option exists (check mode) + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + domain_name: "{{ aws_domain_name }}" + dns_servers: + - AmazonProvidedDNS + tags: + Name: "{{ resource_prefix }}" + register: found_dhcp_options + check_mode: true + + - assert: + that: + - not found_dhcp_options.changed + + # FIXME: always reassociated when vpc_id is provided without options, so here we provide the default options + - name: test a DHCP option exists + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + domain_name: "{{ aws_domain_name }}" + dns_servers: + - AmazonProvidedDNS + tags: + Name: "{{ resource_prefix }}" + register: found_dhcp_options + + - assert: + that: + - found_dhcp_options is not changed + - found_dhcp_options.dhcp_options_id is defined + - original_dhcp_options_id == found_dhcp_options.dhcp_options_id + + # Create a DHCP option set that inherits from the default set and does not delete the old set + - name: create a DHCP option set that inherits from the default set (check mode) + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: True + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + netbios_node_type: 2 + delete_old: False + register: dhcp_options + check_mode: true + + - assert: + that: + - dhcp_options.changed + + - name: create a DHCP option set that inherits from the default set + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: True + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + netbios_node_type: 2 + delete_old: False + register: dhcp_options + + - set_fact: + dhcp_options_config: "{{ dhcp_options.dhcp_options.dhcp_configurations | items2dict(key_name='key', value_name='values') }}" + + - assert: + that: + - dhcp_options.changed + - dhcp_options.new_options + - dhcp_options.new_options.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers'] + - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.new_options['netbios-node-type'] == '2' + - dhcp_options.new_options['domain-name'] == ['{{ aws_domain_name }}'] + - dhcp_options.new_options['domain-name-servers'] == ['AmazonProvidedDNS'] + # We return the list of dicts that boto gives us, in addition to the user-friendly config dict + - dhcp_options_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2'] + - dhcp_options_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options_config['netbios-node-type'][0]['value'] == '2' + - dhcp_options_config['domain-name'][0]['value'] == '{{ aws_domain_name }}' + - dhcp_options_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS' + - original_dhcp_options_id != dhcp_options.dhcp_options_id + + - set_fact: + new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + + - name: get information about the new DHCP option + ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ new_dhcp_options_id }}"] + register: new_dhcp_options + + - set_fact: + new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}" + + - assert: + that: + - new_config.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers'] + - new_config['domain-name'][0]['value'] == '{{ aws_domain_name }}' + - new_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS' + - new_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2'] + - new_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1'] + - new_config['netbios-node-type'][0]['value'] == '2' + # We return the list of dicts that boto gives us, in addition to the user-friendly config dict + - new_dhcp_options.dhcp_config[0]['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - new_dhcp_options.dhcp_config[0]['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - new_dhcp_options.dhcp_config[0]['netbios-node-type'] == '2' + - new_dhcp_options.dhcp_config[0]['domain-name'] == ['{{ aws_domain_name }}'] + - new_dhcp_options.dhcp_config[0]['domain-name-servers'] == ['AmazonProvidedDNS'] + + + # FIXME: no way to associate `default` in the module + - name: Re-associate the default DHCP options set so that the new one can be deleted + ec2_vpc_dhcp_option: + vpc_id: '{{ vpc_id }}' + dhcp_options_id: '{{ default_options_id }}' + state: present + register: result + + - assert: + that: + - result.changed + - result is success + - result.dhcp_options_id == '{{ default_options_id }}' + + - name: delete it for the next test + ec2_vpc_dhcp_option: + dhcp_options_id: "{{ new_dhcp_options_id }}" + state: absent + + # Create a DHCP option set that does not inherit from the old set and doesn't delete the old set + + - name: create a DHCP option set that does not inherit from the default set (check mode) + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: False + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + netbios_node_type: 2 + delete_old: False + register: dhcp_options + check_mode: true + + - assert: + that: + - dhcp_options.changed + + - name: create a DHCP option set that does not inherit from the default set + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: False + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + netbios_node_type: 2 + delete_old: False + register: dhcp_options + + - set_fact: + dhcp_options_config: "{{ dhcp_options.dhcp_options.dhcp_configurations | items2dict(key_name='key', value_name='values') }}" + + - assert: + that: + - dhcp_options.changed + - dhcp_options.new_options + # FIXME extra keys are returned unpredictably + - dhcp_options.new_options.keys() | list | sort == ['netbios-name-servers', 'netbios-node-type', 'ntp-servers'] + - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.new_options['netbios-node-type'] == '2' + - original_dhcp_options_id != dhcp_options.dhcp_options_id + # We return the list of dicts that boto gives us, in addition to the user-friendly config dict + - new_dhcp_options.dhcp_config[0]['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - new_dhcp_options.dhcp_config[0]['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - new_dhcp_options.dhcp_config[0]['netbios-node-type'] == '2' + + - set_fact: + new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + + - name: get information about the new DHCP option + ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ new_dhcp_options_id }}"] + register: new_dhcp_options + + - set_fact: + new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}" + + - assert: + that: + - new_config.keys() | list | sort == ['netbios-name-servers', 'netbios-node-type', 'ntp-servers'] + - new_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2'] + - new_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1'] + - new_config['netbios-node-type'][0]['value'] == '2' + + - name: disassociate the new DHCP option set so it can be deleted + ec2_vpc_dhcp_option: + dhcp_options_id: "{{ original_dhcp_options_id }}" + vpc_id: "{{ vpc_id }}" + state: present + + - name: delete it for the next test + ec2_vpc_dhcp_option: + dhcp_options_id: "{{ new_dhcp_options_id }}" + state: absent + + # Create a DHCP option set that inherits from the default set overwrites a default and deletes the old set + - name: create a DHCP option set that inherits from the default set and deletes the original set (check mode) + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: True + domain_name: us-west-2.compute.internal + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + netbios_node_type: 2 + delete_old: True + register: dhcp_options + check_mode: true + + - assert: + that: + - dhcp_options.changed + + - name: create a DHCP option set that inherits from the default set and deletes the original set + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: True + domain_name: '{{ aws_domain_name }}' + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + netbios_node_type: 1 + delete_old: True + register: dhcp_options + + - assert: + that: + - dhcp_options.changed + - dhcp_options.new_options + - dhcp_options.new_options.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers'] + - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.new_options['netbios-node-type'] == '1' + - dhcp_options.new_options['domain-name'] == ['{{ aws_domain_name }}'] + - original_dhcp_options_id != dhcp_options.dhcp_options_id + + - set_fact: + new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + + - name: get information about the new DHCP option + ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ new_dhcp_options_id }}"] + register: new_dhcp_options + + - set_fact: + new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}" + + - assert: + that: + - new_config.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers'] + - new_config['domain-name'][0]['value'] == '{{ aws_domain_name }}' + - new_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2'] + - new_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1'] + - new_config['netbios-node-type'][0]['value'] == '1' + + - name: verify the original set was deleted + ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ original_dhcp_options_id }}"] + register: dhcp_options + ignore_errors: yes + + - assert: + that: + - dhcp_options.failed + - '"does not exist" in dhcp_options.error.message' + + - name: verify the original set was deleted + ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ original_dhcp_options_id }}"] + register: dhcp_options + ignore_errors: yes + + - assert: + that: + - '"does not exist" in dhcp_options.error.message' + + - set_fact: + original_dhcp_options_id: "{{ new_dhcp_options_id }}" + + # Create a DHCP option set that does not inherit from the old set and deletes the old set + + - name: create a DHCP option set that does not inherit from the default set and deletes the original set (check mode) + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: False + domain_name: '{{ aws_domain_name }}' + dns_servers: + - AmazonProvidedDNS + delete_old: True + register: dhcp_options + check_mode: true + + - assert: + that: + - dhcp_options.changed + + - name: create a DHCP option set that does not inherit from the default set and deletes the original set + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: False + domain_name: "{{ aws_domain_name }}" + dns_servers: + - AmazonProvidedDNS + delete_old: True + register: dhcp_options + + - assert: + that: + - dhcp_options.new_options + - dhcp_options.new_options.keys() | list | sort is superset(['domain-name', 'domain-name-servers']) + - dhcp_options.new_options['domain-name'] == ['{{ aws_domain_name }}'] + - dhcp_options.new_options['domain-name-servers'] == ['AmazonProvidedDNS'] + - original_dhcp_options_id != dhcp_options.dhcp_options_id + + - set_fact: + new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + + - name: get information about the new DHCP option + ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ new_dhcp_options_id }}"] + register: new_dhcp_options + + - set_fact: + new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}" + + - assert: + that: + - new_config.keys() | list | sort == ['domain-name', 'domain-name-servers'] + - new_config['domain-name'][0]['value'] == '{{ aws_domain_name }}' + - new_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS' + + - name: verify the original set was deleted + ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ original_dhcp_options_id }}"] + register: dhcp_options + ignore_errors: yes + + - assert: + that: + - dhcp_options.failed + - '"does not exist" in dhcp_options.error.message' + + - set_fact: + original_dhcp_options_id: "{{ new_dhcp_options_id }}" + + # Create a DHCP option set with tags + + - name: create a DHCP option set with tags (check mode) + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: False + delete_old: True + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + tags: + CreatedBy: ansible-test + Collection: amazon.aws + register: dhcp_options + check_mode: true + ignore_errors: true + + - assert: + that: + - dhcp_options.changed + + - name: create a DHCP option set with tags + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: False + delete_old: True + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + tags: + CreatedBy: ansible-test + Collection: amazon.aws + register: dhcp_options + + - set_fact: + dhcp_options_config: "{{ dhcp_options.dhcp_options.dhcp_configurations | items2dict(key_name='key', value_name='values') }}" + + - assert: + that: + - dhcp_options.changed + - dhcp_options.new_options.keys() | list | sort is superset(['ntp-servers', 'netbios-name-servers']) + - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - original_dhcp_options_id != dhcp_options.dhcp_options_id + # We return the list of dicts that boto gives us, in addition to the user-friendly config dict + - dhcp_options_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2'] + - dhcp_options_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.dhcp_options.tags.keys() | length == 2 + - dhcp_options.dhcp_options.tags['CreatedBy'] == 'ansible-test' + - dhcp_options.dhcp_options.tags['Collection'] == 'amazon.aws' + + - set_fact: + new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + + - name: check if the expected tags are associated + ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ new_dhcp_options_id }}"] + register: dhcp_options_info + + - assert: + that: + - dhcp_options_info.dhcp_options[0].tags is defined + - dhcp_options_info.dhcp_options[0].tags | length == 2 + - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws' + - dhcp_options_info.dhcp_options[0].tags['CreatedBy'] == 'ansible-test' + + - name: test no changes with the same tags (check mode) + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: False + delete_old: True + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + tags: + CreatedBy: ansible-test + Collection: amazon.aws + register: dhcp_options + check_mode: true + + - assert: + that: + - not dhcp_options.changed + - dhcp_options.new_options.keys() | list | sort == ['netbios-name-servers', 'ntp-servers'] + - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + + - name: test no changes with the same tags + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: False + delete_old: True + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + tags: + CreatedBy: ansible-test + Collection: amazon.aws + register: dhcp_options + + - name: check if the expected tags are associated + ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] + register: dhcp_options_info + + - assert: + that: + - not dhcp_options.changed + - dhcp_options.new_options.keys() | list | sort == ['netbios-name-servers', 'ntp-servers'] + - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - new_dhcp_options_id == dhcp_options.dhcp_options_id + - dhcp_options.dhcp_options.tags.keys() | length == 2 + - dhcp_options.dhcp_options.tags['CreatedBy'] == 'ansible-test' + - dhcp_options.dhcp_options.tags['Collection'] == 'amazon.aws' + - dhcp_options_info.dhcp_options[0].tags is defined + - dhcp_options_info.dhcp_options[0].tags.keys() | length == 2 + - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws' + - dhcp_options_info.dhcp_options[0].tags['CreatedBy'] == 'ansible-test' + + - name: test no changes without specifying tags (check mode) + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: False + delete_old: True + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + purge_tags: False + register: dhcp_options + check_mode: true + + - assert: + that: + - not dhcp_options.changed + - dhcp_options.new_options.keys() | list | sort is superset(['netbios-name-servers', 'ntp-servers']) + - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + + - name: test no changes without specifying tags + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: False + delete_old: True + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + purge_tags: False + register: dhcp_options + + - name: check if the expected tags are associated + ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] + register: dhcp_options_info + + - assert: + that: + - not dhcp_options.changed + - dhcp_options.new_options.keys() | list | sort is superset(['netbios-name-servers', 'ntp-servers']) + - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - new_dhcp_options_id == dhcp_options.dhcp_options_id + - dhcp_options_info.dhcp_options[0].tags is defined + - dhcp_options_info.dhcp_options[0].tags.keys() | length == 2 + - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws' + - dhcp_options_info.dhcp_options[0].tags['CreatedBy'] == 'ansible-test' + + - name: add a tag without using dhcp_options_id + ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: False + delete_old: True + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + tags: + CreatedBy: ansible-test + Collection: amazon.aws + another: tag + register: dhcp_options + + - name: check if the expected tags are associated + ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] + register: dhcp_options_info + + - assert: + that: + - dhcp_options.changed + - dhcp_options.new_options.keys() | list | sort is superset(['netbios-name-servers', 'ntp-servers']) + - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - new_dhcp_options_id == dhcp_options.dhcp_options_id + - dhcp_options.dhcp_options.tags.keys() | length == 3 + - dhcp_options.dhcp_options.tags['another'] == 'tag' + - dhcp_options.dhcp_options.tags['CreatedBy'] == 'ansible-test' + - dhcp_options.dhcp_options.tags['Collection'] == 'amazon.aws' + - dhcp_options_info.dhcp_options[0].tags is defined + - dhcp_options_info.dhcp_options[0].tags.keys() | length == 3 + - dhcp_options_info.dhcp_options[0].tags['another'] == 'tag' + - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws' + - dhcp_options_info.dhcp_options[0].tags['CreatedBy'] == 'ansible-test' + + - name: add and removing tags (check mode) + ec2_vpc_dhcp_option: + dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: False + delete_old: True + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + tags: + AnsibleTest: integration + Collection: amazon.aws + register: dhcp_options + check_mode: true + + - assert: + that: + - dhcp_options.changed + + - name: add and remove tags + ec2_vpc_dhcp_option: + dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: False + delete_old: True + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + tags: + AnsibleTest: integration + Collection: amazon.aws + register: dhcp_options + + - name: check if the expected tags are associated + ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] + register: dhcp_options_info + + - assert: + that: + - dhcp_options.changed + - dhcp_options.dhcp_options.tags.keys() | length == 2 + - dhcp_options.dhcp_options.tags['AnsibleTest'] == 'integration' + - dhcp_options.dhcp_options.tags['Collection'] == 'amazon.aws' + - new_dhcp_options_id == dhcp_options.dhcp_options_id + - dhcp_options_info.dhcp_options[0].tags is defined + - dhcp_options_info.dhcp_options[0].tags.keys() | length == 2 + - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws' + - dhcp_options_info.dhcp_options[0].tags['AnsibleTest'] == 'integration' + + - name: add tags with different cases + ec2_vpc_dhcp_option: + dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: False + delete_old: True + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + tags: + "lowercase spaced": 'hello cruel world' + "Title Case": 'Hello Cruel World' + CamelCase: 'SimpleCamelCase' + snake_case: 'simple_snake_case' + register: dhcp_options + + - name: check if the expected tags are associated + ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] + register: dhcp_options_info + + - assert: + that: + - dhcp_options.changed + - new_dhcp_options_id == dhcp_options.dhcp_options_id + - dhcp_options.dhcp_options.tags.keys() | length == 4 + - dhcp_options.dhcp_options.tags['lowercase spaced'] == 'hello cruel world' + - dhcp_options.dhcp_options.tags['Title Case'] == 'Hello Cruel World' + - dhcp_options.dhcp_options.tags['CamelCase'] == 'SimpleCamelCase' + - dhcp_options.dhcp_options.tags['snake_case'] == 'simple_snake_case' + - dhcp_options_info.dhcp_options[0].tags is defined + - dhcp_options_info.dhcp_options[0].tags.keys() | length == 4 + - dhcp_options_info.dhcp_options[0].tags['lowercase spaced'] == 'hello cruel world' + - dhcp_options_info.dhcp_options[0].tags['Title Case'] == 'Hello Cruel World' + - dhcp_options_info.dhcp_options[0].tags['CamelCase'] == 'SimpleCamelCase' + - dhcp_options_info.dhcp_options[0].tags['snake_case'] == 'simple_snake_case' + + - name: test purging all tags + ec2_vpc_dhcp_option: + dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: False + delete_old: True + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + tags: {} + register: dhcp_options + + - name: check if the expected tags are associated + ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] + register: dhcp_options_info + + - assert: + that: + - dhcp_options.changed + - new_dhcp_options_id == dhcp_options.dhcp_options_id + - not dhcp_options_info.dhcp_options[0].tags + + - name: test removing all tags + ec2_vpc_dhcp_option: + dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: False + delete_old: True + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + tags: {} + register: dhcp_options + + - name: check if the expected tags are associated + ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] + register: dhcp_options_info + + - assert: + that: + - dhcp_options.changed + - new_dhcp_options_id == dhcp_options.dhcp_options_id + - not dhcp_options_info.dhcp_options[0].tags + + - name: remove the DHCP option set (check mode) + ec2_vpc_dhcp_option: + state: absent + vpc_id: "{{ vpc_id }}" + dhcp_options_id: "{{ new_dhcp_options_id }}" + register: dhcp_options + check_mode: true + +# - assert: +# that: +# - dhcp_options.changed + + # FIXME: does nothing - the module should associate "default" with the VPC provided but currently does not + - name: removing the DHCP option set + ec2_vpc_dhcp_option: + state: absent + vpc_id: "{{ vpc_id }}" + dhcp_options_id: "{{ new_dhcp_options_id }}" + register: dhcp_options + +# - assert: +# that: +# - dhcp_options.changed + + - name: remove the DHCP option set again (check mode) + ec2_vpc_dhcp_option: + state: absent + vpc_id: "{{ vpc_id }}" + dhcp_options_id: "{{ new_dhcp_options_id }}" + register: dhcp_options + check_mode: true + + - assert: + that: + - not dhcp_options.changed + + - name: remove the DHCP option set again + ec2_vpc_dhcp_option: + state: absent + vpc_id: "{{ vpc_id }}" + dhcp_options_id: "{{ new_dhcp_options_id }}" + register: dhcp_options + + - assert: + that: + - not dhcp_options.changed + + always: + + - name: Re-associate the default DHCP options set so that the new one(s) can be deleted + ec2_vpc_dhcp_option: + vpc_id: '{{ vpc_id }}' + dhcp_options_id: '{{ default_options_id }}' + state: present + register: result + when: vpc_id is defined + ignore_errors: yes + + - name: Query all option sets created by the test + ec2_vpc_dhcp_option_info: + filters: + "tag:Name": "*'{{ resource_prefix }}*" + register: option_sets + + - name: clean up DHCP option sets + ec2_vpc_dhcp_option: + state: absent + dhcp_options_id: "{{ original_dhcp_options_id }}" + vpc_id: "{{ vpc_id }}" + when: original_dhcp_options_id is defined + ignore_errors: yes + + - name: clean up DHCP option sets + ec2_vpc_dhcp_option: + state: absent + dhcp_options_id: "{{ new_dhcp_options_id }}" + vpc_id: "{{ vpc_id }}" + when: new_dhcp_options_id is defined + ignore_errors: yes + + - name: Delete the VPC + ec2_vpc_net: + name: "{{ resource_prefix }}" + cidr_block: "{{ vpc_cidr }}" + state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/aliases new file mode 100644 index 000000000..506820fc1 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/aliases @@ -0,0 +1,3 @@ +cloud/aws +disabled +ec2_vpc_endpoint_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/defaults/main.yml new file mode 100644 index 000000000..3869e983b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/defaults/main.yml @@ -0,0 +1,7 @@ +vpc_name: '{{ resource_prefix }}-vpc' +vpc_seed: '{{ resource_prefix }}' +vpc_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.22.0/24 + +# S3 and EC2 should generally be available... +endpoint_service_a: com.amazonaws.{{ aws_region }}.s3 +endpoint_service_b: com.amazonaws.{{ aws_region }}.ec2 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml new file mode 100644 index 000000000..09e6908b0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml @@ -0,0 +1,862 @@ +- name: ec2_vpc_endpoint tests + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + # ============================================================ + # BEGIN PRE-TEST SETUP + - name: create a VPC + ec2_vpc_net: + state: present + name: '{{ vpc_name }}' + cidr_block: '{{ vpc_cidr }}' + tags: + AnsibleTest: ec2_vpc_endpoint + AnsibleRun: '{{ resource_prefix }}' + register: vpc_creation + - name: Assert success + assert: + that: + - vpc_creation is successful + + - name: Create an IGW + ec2_vpc_igw: + vpc_id: '{{ vpc_creation.vpc.id }}' + state: present + tags: + Name: '{{ resource_prefix }}' + AnsibleTest: ec2_vpc_endpoint + AnsibleRun: '{{ resource_prefix }}' + register: igw_creation + - name: Assert success + assert: + that: + - igw_creation is successful + + - name: Create a minimal route table (no routes) + ec2_vpc_route_table: + vpc_id: '{{ vpc_creation.vpc.id }}' + tags: + AnsibleTest: ec2_vpc_endpoint + AnsibleRun: '{{ resource_prefix }}' + Name: '{{ resource_prefix }}-empty' + subnets: [] + routes: [] + register: rtb_creation_empty + + - name: Create a minimal route table (with IGW) + ec2_vpc_route_table: + vpc_id: '{{ vpc_creation.vpc.id }}' + tags: + AnsibleTest: ec2_vpc_endpoint + AnsibleRun: '{{ resource_prefix }}' + Name: '{{ resource_prefix }}-igw' + subnets: [] + routes: + - dest: 0.0.0.0/0 + gateway_id: '{{ igw_creation.gateway_id }}' + register: rtb_creation_igw + + - name: Save VPC info in a fact + set_fact: + vpc_id: '{{ vpc_creation.vpc.id }}' + rtb_empty_id: '{{ rtb_creation_empty.route_table.id }}' + rtb_igw_id: '{{ rtb_creation_igw.route_table.id }}' + + # ============================================================ + # BEGIN TESTS + + # Minimal check_mode with _info + - name: Fetch Endpoints in check_mode + ec2_vpc_endpoint_info: + query: endpoints + register: endpoint_info + check_mode: true + - name: Assert success + assert: + that: + # May be run in parallel, the only thing we can guarantee is + # - we shouldn't error + # - we should return 'vpc_endpoints' (even if it's empty) + - endpoint_info is successful + - '"vpc_endpoints" in endpoint_info' + + - name: Fetch Services in check_mode + ec2_vpc_endpoint_info: + query: services + register: endpoint_info + check_mode: true + - name: Assert success + assert: + that: + - endpoint_info is successful + - '"service_names" in endpoint_info' + # This is just 2 arbitrary AWS services that should (generally) be + # available. The actual list will vary over time and between regions + - endpoint_service_a in endpoint_info.service_names + - endpoint_service_b in endpoint_info.service_names + + # Fetch services without check mode + # Note: Filters not supported on services via this module, this is all we can test for now + - name: Fetch Services + ec2_vpc_endpoint_info: + query: services + register: endpoint_info + - name: Assert success + assert: + that: + - endpoint_info is successful + - '"service_names" in endpoint_info' + # This is just 2 arbitrary AWS services that should (generally) be + # available. The actual list will vary over time and between regions + - endpoint_service_a in endpoint_info.service_names + - endpoint_service_b in endpoint_info.service_names + + # Attempt to create an endpoint + - name: Create minimal endpoint (check mode) + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + service: '{{ endpoint_service_a }}' + register: create_endpoint_check + check_mode: true + - name: Assert changed + assert: + that: + - create_endpoint_check is changed + + - name: Create minimal endpoint + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + service: '{{ endpoint_service_a }}' + wait: true + register: create_endpoint + - name: Check standard return values + assert: + that: + - create_endpoint is changed + - '"result" in create_endpoint' + - '"creation_timestamp" in create_endpoint.result' + - '"dns_entries" in create_endpoint.result' + - '"groups" in create_endpoint.result' + - '"network_interface_ids" in create_endpoint.result' + - '"owner_id" in create_endpoint.result' + - '"policy_document" in create_endpoint.result' + - '"private_dns_enabled" in create_endpoint.result' + - create_endpoint.result.private_dns_enabled == False + - '"requester_managed" in create_endpoint.result' + - create_endpoint.result.requester_managed == False + - '"service_name" in create_endpoint.result' + - create_endpoint.result.service_name == endpoint_service_a + - '"state" in create_endpoint.result' + - create_endpoint.result.state == "available" + - '"vpc_endpoint_id" in create_endpoint.result' + - create_endpoint.result.vpc_endpoint_id.startswith("vpce-") + - '"vpc_endpoint_type" in create_endpoint.result' + - create_endpoint.result.vpc_endpoint_type == "Gateway" + - '"vpc_id" in create_endpoint.result' + - create_endpoint.result.vpc_id == vpc_id + + - name: Save Endpoint info in a fact + set_fact: + endpoint_id: '{{ create_endpoint.result.vpc_endpoint_id }}' + + # Pull info about the endpoints + - name: Fetch Endpoints (all) + ec2_vpc_endpoint_info: + query: endpoints + register: endpoint_info + - name: Assert success + assert: + that: + # We're fetching all endpoints, there's no guarantee what the values + # will be + - endpoint_info is successful + - '"vpc_endpoints" in endpoint_info' + - '"creation_timestamp" in first_endpoint' + - '"policy_document" in first_endpoint' + - '"route_table_ids" in first_endpoint' + - '"service_name" in first_endpoint' + - '"state" in first_endpoint' + - '"vpc_endpoint_id" in first_endpoint' + - '"vpc_id" in first_endpoint' + # Not yet documented, but returned + - '"dns_entries" in first_endpoint' + - '"groups" in first_endpoint' + - '"network_interface_ids" in first_endpoint' + - '"owner_id" in first_endpoint' + - '"private_dns_enabled" in first_endpoint' + - '"requester_managed" in first_endpoint' + - '"subnet_ids" in first_endpoint' + - '"tags" in first_endpoint' + - '"vpc_endpoint_type" in first_endpoint' + # Make sure our endpoint is included + - endpoint_id in ( endpoint_info | community.general.json_query("vpc_endpoints[*].vpc_endpoint_id") + | list | flatten ) + vars: + first_endpoint: '{{ endpoint_info.vpc_endpoints[0] }}' + + - name: Fetch Endpoints (targetted by ID) + ec2_vpc_endpoint_info: + query: endpoints + vpc_endpoint_ids: '{{ endpoint_id }}' + register: endpoint_info + - name: Assert success + assert: + that: + - endpoint_info is successful + - '"vpc_endpoints" in endpoint_info' + - '"creation_timestamp" in first_endpoint' + - '"policy_document" in first_endpoint' + - '"route_table_ids" in first_endpoint' + - first_endpoint.route_table_ids | length == 0 + - '"service_name" in first_endpoint' + - first_endpoint.service_name == endpoint_service_a + - '"state" in first_endpoint' + - first_endpoint.state == "available" + - '"vpc_endpoint_id" in first_endpoint' + - first_endpoint.vpc_endpoint_id == endpoint_id + - '"vpc_id" in first_endpoint' + - first_endpoint.vpc_id == vpc_id + # Not yet documented, but returned + - '"dns_entries" in first_endpoint' + - '"groups" in first_endpoint' + - '"network_interface_ids" in first_endpoint' + - '"owner_id" in first_endpoint' + - '"private_dns_enabled" in first_endpoint' + - first_endpoint.private_dns_enabled == False + - '"requester_managed" in first_endpoint' + - first_endpoint.requester_managed == False + - '"subnet_ids" in first_endpoint' + - '"tags" in first_endpoint' + - '"vpc_endpoint_type" in first_endpoint' + vars: + first_endpoint: '{{ endpoint_info.vpc_endpoints[0] }}' + + - name: Fetch Endpoints (targetted by VPC) + ec2_vpc_endpoint_info: + query: endpoints + filters: + vpc-id: + - '{{ vpc_id }}' + register: endpoint_info + - name: Assert success + assert: + that: + - endpoint_info is successful + - '"vpc_endpoints" in endpoint_info' + - '"creation_timestamp" in first_endpoint' + - '"policy_document" in first_endpoint' + - '"route_table_ids" in first_endpoint' + - '"service_name" in first_endpoint' + - first_endpoint.service_name == endpoint_service_a + - '"state" in first_endpoint' + - first_endpoint.state == "available" + - '"vpc_endpoint_id" in first_endpoint' + - first_endpoint.vpc_endpoint_id == endpoint_id + - '"vpc_id" in first_endpoint' + - first_endpoint.vpc_id == vpc_id + # Not yet documented, but returned + - '"dns_entries" in first_endpoint' + - '"groups" in first_endpoint' + - '"network_interface_ids" in first_endpoint' + - '"owner_id" in first_endpoint' + - '"private_dns_enabled" in first_endpoint' + - first_endpoint.private_dns_enabled == False + - '"requester_managed" in first_endpoint' + - first_endpoint.requester_managed == False + - '"subnet_ids" in first_endpoint' + - '"tags" in first_endpoint' + - '"vpc_endpoint_type" in first_endpoint' + vars: + first_endpoint: '{{ endpoint_info.vpc_endpoints[0] }}' + + + # matches on parameters without explicitly passing the endpoint ID + - name: Create minimal endpoint - idempotency (check mode) + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + service: '{{ endpoint_service_a }}' + register: create_endpoint_idem_check + check_mode: true + - assert: + that: + - create_endpoint_idem_check is not changed + + - name: Create minimal endpoint - idempotency + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + service: '{{ endpoint_service_a }}' + register: create_endpoint_idem + - assert: + that: + - create_endpoint_idem is not changed + + - name: Delete minimal endpoint by ID (check_mode) + ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: '{{ endpoint_id }}' + check_mode: true + register: endpoint_delete_check + - assert: + that: + - endpoint_delete_check is changed + + + - name: Delete minimal endpoint by ID + ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: '{{ endpoint_id }}' + register: endpoint_delete_check + - assert: + that: + - endpoint_delete_check is changed + + - name: Delete minimal endpoint by ID - idempotency (check_mode) + ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: '{{ endpoint_id }}' + check_mode: true + register: endpoint_delete_check + - assert: + that: + - endpoint_delete_check is not changed + + - name: Delete minimal endpoint by ID - idempotency + ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: '{{ endpoint_id }}' + register: endpoint_delete_check + - assert: + that: + - endpoint_delete_check is not changed + + - name: Fetch Endpoints by ID (expect failed) + ec2_vpc_endpoint_info: + query: endpoints + vpc_endpoint_ids: '{{ endpoint_id }}' + ignore_errors: true + register: endpoint_info + - name: Assert endpoint does not exist + assert: + that: + - endpoint_info is successful + - '"does not exist" in endpoint_info.msg' + - endpoint_info.vpc_endpoints | length == 0 + + # Attempt to create an endpoint with a route table + - name: Create an endpoint with route table (check mode) + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + service: '{{ endpoint_service_a }}' + route_table_ids: + - '{{ rtb_empty_id }}' + register: create_endpoint_check + check_mode: true + - name: Assert changed + assert: + that: + - create_endpoint_check is changed + + - name: Create an endpoint with route table + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + service: '{{ endpoint_service_a }}' + route_table_ids: + - '{{ rtb_empty_id }}' + wait: true + register: create_rtb_endpoint + - name: Check standard return values + assert: + that: + - create_rtb_endpoint is changed + - '"result" in create_rtb_endpoint' + - '"creation_timestamp" in create_rtb_endpoint.result' + - '"dns_entries" in create_rtb_endpoint.result' + - '"groups" in create_rtb_endpoint.result' + - '"network_interface_ids" in create_rtb_endpoint.result' + - '"owner_id" in create_rtb_endpoint.result' + - '"policy_document" in create_rtb_endpoint.result' + - '"private_dns_enabled" in create_rtb_endpoint.result' + - '"route_table_ids" in create_rtb_endpoint.result' + - create_rtb_endpoint.result.route_table_ids | length == 1 + - create_rtb_endpoint.result.route_table_ids[0] == '{{ rtb_empty_id }}' + - create_rtb_endpoint.result.private_dns_enabled == False + - '"requester_managed" in create_rtb_endpoint.result' + - create_rtb_endpoint.result.requester_managed == False + - '"service_name" in create_rtb_endpoint.result' + - create_rtb_endpoint.result.service_name == endpoint_service_a + - '"state" in create_endpoint.result' + - create_rtb_endpoint.result.state == "available" + - '"vpc_endpoint_id" in create_rtb_endpoint.result' + - create_rtb_endpoint.result.vpc_endpoint_id.startswith("vpce-") + - '"vpc_endpoint_type" in create_rtb_endpoint.result' + - create_rtb_endpoint.result.vpc_endpoint_type == "Gateway" + - '"vpc_id" in create_rtb_endpoint.result' + - create_rtb_endpoint.result.vpc_id == vpc_id + + - name: Save Endpoint info in a fact + set_fact: + rtb_endpoint_id: '{{ create_rtb_endpoint.result.vpc_endpoint_id }}' + + - name: Create an endpoint with route table - idempotency (check mode) + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + service: '{{ endpoint_service_a }}' + route_table_ids: + - '{{ rtb_empty_id }}' + register: create_endpoint_check + check_mode: true + - name: Assert changed + assert: + that: + - create_endpoint_check is not changed + + - name: Create an endpoint with route table - idempotency + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + service: '{{ endpoint_service_a }}' + route_table_ids: + - '{{ rtb_empty_id }}' + register: create_endpoint_check + check_mode: true + - name: Assert changed + assert: + that: + - create_endpoint_check is not changed + +# # Endpoint modifications are not yet supported by the module +# # A Change the route table for the endpoint +# - name: Change the route table for the endpoint (check_mode) +# ec2_vpc_endpoint: +# state: present +# vpc_id: '{{ vpc_id }}' +# vpc_endpoint_id: "{{ rtb_endpoint_id }}" +# service: '{{ endpoint_service_a }}' +# route_table_ids: +# - '{{ rtb_igw_id }}' +# check_mode: True +# register: check_two_rtbs_endpoint +# +# - name: Assert second route table would be added +# assert: +# that: +# - check_two_rtbs_endpoint.changed +# +# - name: Change the route table for the endpoint +# ec2_vpc_endpoint: +# state: present +# vpc_id: '{{ vpc_id }}' +# vpc_endpoint_id: "{{ rtb_endpoint_id }}" +# service: '{{ endpoint_service_a }}' +# route_table_ids: +# - '{{ rtb_igw_id }}' +# register: two_rtbs_endpoint +# +# - name: Assert second route table would be added +# assert: +# that: +# - check_two_rtbs_endpoint.changed +# - two_rtbs_endpoint.result.route_table_ids | length == 1 +# - two_rtbs_endpoint.result.route_table_ids[0] == '{{ rtb_igw_id }}' +# +# - name: Change the route table for the endpoint - idempotency (check_mode) +# ec2_vpc_endpoint: +# state: present +# vpc_id: '{{ vpc_id }}' +# vpc_endpoint_id: "{{ rtb_endpoint_id }}" +# service: '{{ endpoint_service_a }}' +# route_table_ids: +# - '{{ rtb_igw_id }}' +# check_mode: True +# register: check_two_rtbs_endpoint +# +# - name: Assert route table would not change +# assert: +# that: +# - not check_two_rtbs_endpoint.changed +# +# - name: Change the route table for the endpoint - idempotency +# ec2_vpc_endpoint: +# state: present +# vpc_id: '{{ vpc_id }}' +# vpc_endpoint_id: "{{ rtb_endpoint_id }}" +# service: '{{ endpoint_service_a }}' +# route_table_ids: +# - '{{ rtb_igw_id }}' +# register: two_rtbs_endpoint +# +# - name: Assert route table would not change +# assert: +# that: +# - not check_two_rtbs_endpoint.changed + + - name: Tag the endpoint (check_mode) + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + vpc_endpoint_id: '{{ rtb_endpoint_id }}' + service: '{{ endpoint_service_a }}' + route_table_ids: + - '{{ rtb_empty_id }}' + tags: + camelCase: helloWorld + PascalCase: HelloWorld + snake_case: hello_world + Title Case: Hello World + lowercase spaced: hello world + check_mode: true + register: check_tag_vpc_endpoint + + - name: Assert tags would have changed + assert: + that: + - check_tag_vpc_endpoint.changed + + - name: Tag the endpoint + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + vpc_endpoint_id: '{{ rtb_endpoint_id }}' + service: '{{ endpoint_service_a }}' + route_table_ids: + - '{{ rtb_igw_id }}' + tags: + testPrefix: '{{ resource_prefix }}' + camelCase: helloWorld + PascalCase: HelloWorld + snake_case: hello_world + Title Case: Hello World + lowercase spaced: hello world + register: tag_vpc_endpoint + + - name: Assert tags are successful + assert: + that: + - tag_vpc_endpoint.changed + - tag_vpc_endpoint.result.tags | length == 6 + - endpoint_tags["testPrefix"] == resource_prefix + - endpoint_tags["camelCase"] == "helloWorld" + - endpoint_tags["PascalCase"] == "HelloWorld" + - endpoint_tags["snake_case"] == "hello_world" + - endpoint_tags["Title Case"] == "Hello World" + - endpoint_tags["lowercase spaced"] == "hello world" + vars: + endpoint_tags: "{{ tag_vpc_endpoint.result.tags | items2dict(key_name='Key',\ + \ value_name='Value') }}" + + - name: Query by tag + ec2_vpc_endpoint_info: + query: endpoints + filters: + tag:testPrefix: + - '{{ resource_prefix }}' + register: tag_result + + - name: Assert tag lookup found endpoint + assert: + that: + - tag_result is successful + - '"vpc_endpoints" in tag_result' + - first_endpoint.vpc_endpoint_id == rtb_endpoint_id + vars: + first_endpoint: '{{ tag_result.vpc_endpoints[0] }}' + + - name: Tag the endpoint - idempotency (check_mode) + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + vpc_endpoint_id: '{{ rtb_endpoint_id }}' + service: '{{ endpoint_service_a }}' + route_table_ids: + - '{{ rtb_igw_id }}' + tags: + testPrefix: '{{ resource_prefix }}' + camelCase: helloWorld + PascalCase: HelloWorld + snake_case: hello_world + Title Case: Hello World + lowercase spaced: hello world + register: tag_vpc_endpoint_again + + - name: Assert tags would not change + assert: + that: + - not tag_vpc_endpoint_again.changed + + - name: Tag the endpoint - idempotency + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + vpc_endpoint_id: '{{ rtb_endpoint_id }}' + service: '{{ endpoint_service_a }}' + route_table_ids: + - '{{ rtb_igw_id }}' + tags: + testPrefix: '{{ resource_prefix }}' + camelCase: helloWorld + PascalCase: HelloWorld + snake_case: hello_world + Title Case: Hello World + lowercase spaced: hello world + register: tag_vpc_endpoint_again + + - name: Assert tags would not change + assert: + that: + - not tag_vpc_endpoint_again.changed + + - name: Add a tag (check_mode) + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + vpc_endpoint_id: '{{ rtb_endpoint_id }}' + service: '{{ endpoint_service_a }}' + route_table_ids: + - '{{ rtb_igw_id }}' + tags: + new_tag: ANewTag + check_mode: true + register: check_tag_vpc_endpoint + + - name: Assert tags would have changed + assert: + that: + - check_tag_vpc_endpoint.changed + + - name: Add a tag (purge_tags=False) + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + vpc_endpoint_id: '{{ rtb_endpoint_id }}' + service: '{{ endpoint_service_a }}' + route_table_ids: + - '{{ rtb_igw_id }}' + tags: + new_tag: ANewTag + register: add_tag_vpc_endpoint + + - name: Assert tags changed + assert: + that: + - add_tag_vpc_endpoint.changed + - add_tag_vpc_endpoint.result.tags | length == 7 + - endpoint_tags["testPrefix"] == resource_prefix + - endpoint_tags["camelCase"] == "helloWorld" + - endpoint_tags["PascalCase"] == "HelloWorld" + - endpoint_tags["snake_case"] == "hello_world" + - endpoint_tags["Title Case"] == "Hello World" + - endpoint_tags["lowercase spaced"] == "hello world" + - endpoint_tags["new_tag"] == "ANewTag" + vars: + endpoint_tags: "{{ add_tag_vpc_endpoint.result.tags | items2dict(key_name='Key',\ + \ value_name='Value') }}" + + - name: Add a tag (purge_tags=True) + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + vpc_endpoint_id: '{{ rtb_endpoint_id }}' + service: '{{ endpoint_service_a }}' + route_table_ids: + - '{{ rtb_igw_id }}' + tags: + another_new_tag: AnotherNewTag + purge_tags: true + register: purge_tag_vpc_endpoint + + - name: Assert tags changed + assert: + that: + - purge_tag_vpc_endpoint.changed + - purge_tag_vpc_endpoint.result.tags | length == 1 + - endpoint_tags["another_new_tag"] == "AnotherNewTag" + vars: + endpoint_tags: "{{ purge_tag_vpc_endpoint.result.tags | items2dict(key_name='Key',\ + \ value_name='Value') }}" + + - name: Delete minimal route table (no routes) + ec2_vpc_route_table: + state: absent + lookup: id + route_table_id: '{{ rtb_empty_id }}' + register: rtb_delete + - assert: + that: + - rtb_delete is changed + + - name: Delete minimal route table (IGW route) + ec2_vpc_route_table: + state: absent + lookup: id + route_table_id: '{{ rtb_igw_id }}' + - assert: + that: + - rtb_delete is changed + + - name: Delete route table endpoint by ID + ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: '{{ rtb_endpoint_id }}' + register: endpoint_delete_check + - assert: + that: + - endpoint_delete_check is changed + + - name: Delete minimal endpoint by ID - idempotency (check_mode) + ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: '{{ rtb_endpoint_id }}' + check_mode: true + register: endpoint_delete_check + - assert: + that: + - endpoint_delete_check is not changed + + - name: Delete endpoint by ID - idempotency + ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: '{{ endpoint_id }}' + register: endpoint_delete_check + - assert: + that: + - endpoint_delete_check is not changed + + - name: Create interface endpoint + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + service: '{{ endpoint_service_a }}' + vpc_endpoint_type: Interface + register: create_interface_endpoint + - name: Check that the interface endpoint was created properly + assert: + that: + - create_interface_endpoint is changed + - create_interface_endpoint.result.vpc_endpoint_type == "Interface" + - name: Delete interface endpoint + ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: '{{ create_interface_endpoint.result.vpc_endpoint_id }}' + register: interface_endpoint_delete_check + - assert: + that: + - interface_endpoint_delete_check is changed + + - name: Create a subnet + ec2_vpc_subnet: + state: present + vpc_id: '{{ vpc_id }}' + az: "{{ aws_region}}a" + cidr: "{{ vpc_cidr }}" + register: interface_endpoint_create_subnet_check + + - name: Create a security group + ec2_group: + name: securitygroup-prodext + description: "security group for Ansible interface endpoint" + state: present + vpc_id: "{{ vpc.vpc.id }}" + rules: + - proto: tcp + from_port: 1 + to_port: 65535 + cidr_ip: 0.0.0.0/0 + register: interface_endpoint_create_sg_check + + - name: Create interface endpoint attached to a subnet + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc_id }}' + service: '{{ endpoint_service_a }}' + vpc_endpoint_type: Interface + vpc_endpoint_subnets: "{{ interface_endpoint_create_subnet_check.subnet.id') }}" + vpc_endpoint_security_groups: "{{ interface_endpoint_create_sg_check.group_id }}" + register: create_interface_endpoint_with_sg_subnets + - name: Check that the interface endpoint was created properly + assert: + that: + - create_interface_endpoint_with_sg_subnets is changed + - create_interface_endpoint_with_sg_subnets.result.vpc_endpoint_type == "Interface" + + - name: Delete interface endpoint + ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: "{{ create_interface_endpoint_with_sg_subnets.result.vpc_endpoint_id }}" + register: create_interface_endpoint_with_sg_subnets_delete_check + - assert: + that: + - create_interface_endpoint_with_sg_subnets_delete_check is changed + + # ============================================================ + # BEGIN POST-TEST CLEANUP + always: + # Delete the routes first - you can't delete an endpoint with a route + # attached. + - name: Delete minimal route table (no routes) + ec2_vpc_route_table: + state: absent + lookup: id + route_table_id: '{{ rtb_creation_empty.route_table.id }}' + ignore_errors: true + + - name: Delete minimal route table (IGW route) + ec2_vpc_route_table: + state: absent + lookup: id + route_table_id: '{{ rtb_creation_igw.route_table.id }}' + ignore_errors: true + + - name: Delete endpoint + ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: '{{ create_endpoint.result.vpc_endpoint_id }}' + ignore_errors: true + + - name: Delete endpoint + ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: '{{ create_rtb_endpoint.result.vpc_endpoint_id }}' + ignore_errors: true + + - name: Query any remain endpoints we created (idempotency work is ongoing) # FIXME + ec2_vpc_endpoint_info: + query: endpoints + filters: + vpc-id: + - '{{ vpc_id }}' + register: test_endpoints + + - name: Delete all endpoints + ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: '{{ item.vpc_endpoint_id }}' + with_items: '{{ test_endpoints.vpc_endpoints }}' + ignore_errors: true + + - name: Remove IGW + ec2_vpc_igw: + state: absent + vpc_id: '{{ vpc_id }}' + register: igw_deletion + retries: 10 + delay: 5 + until: igw_deletion is success + ignore_errors: yes + + - name: Remove VPC + ec2_vpc_net: + state: absent + name: '{{ vpc_name }}' + cidr_block: '{{ vpc_cidr }}' + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/aliases new file mode 100644 index 000000000..760a04f5d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/aliases @@ -0,0 +1,2 @@ +cloud/aws +ec2_vpc_endpoint_service_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/defaults/main.yml new file mode 100644 index 000000000..445cc7f3c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/defaults/main.yml @@ -0,0 +1,3 @@ +search_service_names: +- 'com.amazonaws.{{ aws_region }}.s3' +- 'com.amazonaws.{{ aws_region }}.ec2' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/tasks/main.yml new file mode 100644 index 000000000..22b290a34 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/tasks/main.yml @@ -0,0 +1,135 @@ +--- +- module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + collections: + - amazon.aws + - community.aws + block: + + - name: 'List all available services (Check Mode)' + ec2_vpc_endpoint_service_info: + check_mode: True + register: services_check + + - name: 'Verify services (Check Mode)' + vars: + first_service: '{{ services_check.service_details[0] }}' + assert: + that: + - services_check is successful + - services_check is not changed + - '"service_names" in services_check' + - '"service_details" in services_check' + - '"acceptance_required" in first_service' + - '"availability_zones" in first_service' + - '"base_endpoint_dns_names" in first_service' + - '"manages_vpc_endpoints" in first_service' + - '"owner" in first_service' + - '"private_dns_name" in first_service' + - '"private_dns_name_verification_state" in first_service' + - '"service_id" in first_service' + - '"service_name" in first_service' + - '"service_type" in first_service' + - '"tags" in first_service' + - '"vpc_endpoint_policy_supported" in first_service' + + - name: 'List all available services' + ec2_vpc_endpoint_service_info: + register: services_info + + - name: 'Verify services' + vars: + first_service: '{{ services_info.service_details[0] }}' + assert: + that: + - services_info is successful + - services_info is not changed + - '"service_names" in services_info' + - '"service_details" in services_info' + - '"acceptance_required" in first_service' + - '"availability_zones" in first_service' + - '"base_endpoint_dns_names" in first_service' + - '"manages_vpc_endpoints" in first_service' + - '"owner" in first_service' + - '"private_dns_name" in first_service' + - '"private_dns_name_verification_state" in first_service' + - '"service_id" in first_service' + - '"service_name" in first_service' + - '"service_type" in first_service' + - '"tags" in first_service' + - '"vpc_endpoint_policy_supported" in first_service' + + - name: 'Limit services by name' + ec2_vpc_endpoint_service_info: + service_names: '{{ search_service_names }}' + register: services_info + + - name: 'Verify services' + vars: + first_service: '{{ services_info.service_details[0] }}' + # The same service sometimes pop up twice. s3 for example has + # s3.us-east-1.amazonaws.com and s3.us-east-1.vpce.amazonaws.com which are + # part of com.amazonaws.us-east-1.s3 so we need to run the results through + # the unique filter to know if we've got what we think we have + unique_names: '{{ services_info.service_names | unique | list }}' + unique_detail_names: '{{ services_info.service_details | map(attribute="service_name") | unique | list }}' + assert: + that: + - services_info is successful + - services_info is not changed + - '"service_names" in services_info' + - (unique_names | length) == (search_service_names | length) + - (unique_detail_names | length ) == (search_service_names | length) + - (unique_names | difference(search_service_names) | length) == 0 + - (unique_detail_names | difference(search_service_names) | length) == 0 + - '"service_details" in services_info' + - '"acceptance_required" in first_service' + - '"availability_zones" in first_service' + - '"base_endpoint_dns_names" in first_service' + - '"manages_vpc_endpoints" in first_service' + - '"owner" in first_service' + - '"private_dns_name" in first_service' + - '"private_dns_name_verification_state" in first_service' + - '"service_id" in first_service' + - '"service_name" in first_service' + - '"service_type" in first_service' + - '"tags" in first_service' + - '"vpc_endpoint_policy_supported" in first_service' + + - name: 'Grab single service details to test filters' + set_fact: + example_service: '{{ services_info.service_details[0] }}' + + - name: 'Limit services by filter' + ec2_vpc_endpoint_service_info: + filters: + service-name: '{{ example_service.service_name }}' + register: filtered_service + + - name: 'Verify services' + vars: + first_service: '{{ filtered_service.service_details[0] }}' + assert: + that: + - filtered_service is successful + - filtered_service is not changed + - '"service_names" in filtered_service' + - filtered_service.service_names | length == 1 + - '"service_details" in filtered_service' + - filtered_service.service_details | length == 1 + - '"acceptance_required" in first_service' + - '"availability_zones" in first_service' + - '"base_endpoint_dns_names" in first_service' + - '"manages_vpc_endpoints" in first_service' + - '"owner" in first_service' + - '"private_dns_name" in first_service' + - '"private_dns_name_verification_state" in first_service' + - '"service_id" in first_service' + - '"service_name" in first_service' + - '"service_type" in first_service' + - '"tags" in first_service' + - '"vpc_endpoint_policy_supported" in first_service' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/aliases new file mode 100644 index 000000000..877a442d7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/aliases @@ -0,0 +1,3 @@ +cloud/aws + +ec2_vpc_igw_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/defaults/main.yml new file mode 100644 index 000000000..a4590b4c0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/defaults/main.yml @@ -0,0 +1,3 @@ +vpc_name: '{{ resource_prefix }}-vpc' +vpc_seed: '{{ resource_prefix }}' +vpc_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.0.0/16 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/tasks/main.yml new file mode 100644 index 000000000..05b15d0b7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/tasks/main.yml @@ -0,0 +1,550 @@ +- name: ec2_vpc_igw tests + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + # ============================================================ + - name: Fetch IGWs in check_mode + ec2_vpc_igw_info: + register: igw_info + check_mode: true + - name: Assert success + assert: + that: + - igw_info is successful + - '"internet_gateways" in igw_info' + + # ============================================================ + - name: Create a VPC + ec2_vpc_net: + name: '{{ vpc_name }}' + state: present + cidr_block: '{{ vpc_cidr }}' + tags: + Name: '{{ resource_prefix }}-vpc' + Description: Created by ansible-test + register: vpc_result + - name: Assert success + assert: + that: + - vpc_result is successful + - '"vpc" in vpc_result' + - '"id" in vpc_result.vpc' + - vpc_result.vpc.state == 'available' + - '"tags" in vpc_result.vpc' + - vpc_result.vpc.tags | length == 2 + - vpc_result.vpc.tags["Name"] == "{{ resource_prefix }}-vpc" + - vpc_result.vpc.tags["Description"] == "Created by ansible-test" + + # ============================================================ + - name: Search for internet gateway by VPC - no matches + ec2_vpc_igw_info: + filters: + attachment.vpc-id: '{{ vpc_result.vpc.id }}' + register: igw_info + + - name: Assert success + assert: + that: + - igw_info is successful + - '"internet_gateways" in igw_info' + - (igw_info.internet_gateways | length) == 0 + + # ============================================================ + - name: Create internet gateway (expected changed=true) - CHECK_MODE + ec2_vpc_igw: + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + tags: + tag_one: '{{ resource_prefix }} One' + Tag Two: two {{ resource_prefix }} + register: vpc_igw_create + check_mode: yes + + - name: Assert creation would happen (expected changed=true) - CHECK_MODE + assert: + that: + - vpc_igw_create is changed + + - name: Create internet gateway (expected changed=true) + ec2_vpc_igw: + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + tags: + tag_one: '{{ resource_prefix }} One' + Tag Two: two {{ resource_prefix }} + register: vpc_igw_create + + - name: Assert creation happened (expected changed=true) + assert: + that: + - vpc_igw_create is changed + - vpc_igw_create.gateway_id.startswith("igw-") + - vpc_igw_create.vpc_id == vpc_result.vpc.id + - '"tags" in vpc_igw_create' + - vpc_igw_create.tags | length == 2 + - vpc_igw_create.tags["tag_one"] == '{{ resource_prefix }} One' + - vpc_igw_create.tags["Tag Two"] == 'two {{ resource_prefix }}' + - '"gateway_id" in vpc_igw_create' + + # ============================================================ + - name: Save IDs for later + set_fact: + igw_id: '{{ vpc_igw_create.gateway_id }}' + vpc_id: '{{ vpc_result.vpc.id }}' + + - name: Search for internet gateway by VPC + ec2_vpc_igw_info: + filters: + attachment.vpc-id: '{{ vpc_id }}' + convert_tags: false + register: igw_info + + - name: Check standard IGW details + assert: + that: + - '"internet_gateways" in igw_info' + - igw_info.internet_gateways | length == 1 + - '"attachments" in current_igw' + - current_igw.attachments | length == 1 + - '"state" in current_igw.attachments[0]' + - current_igw.attachments[0].state == "available" + - '"vpc_id" in current_igw.attachments[0]' + - current_igw.attachments[0].vpc_id == vpc_id + - '"internet_gateway_id" in current_igw' + - current_igw.internet_gateway_id == igw_id + - '"tags" in current_igw' + - current_igw.tags | length == 2 + - '"key" in current_igw.tags[0]' + - '"value" in current_igw.tags[0]' + - '"key" in current_igw.tags[1]' + - '"value" in current_igw.tags[1]' + # Order isn't guaranteed in boto3 style, so just check the keys and + # values we expect are in there. + - current_igw.tags[0].key in ["tag_one", "Tag Two"] + - current_igw.tags[1].key in ["tag_one", "Tag Two"] + - current_igw.tags[0].value in [resource_prefix + " One", "two " + resource_prefix] + - current_igw.tags[1].value in [resource_prefix + " One", "two " + resource_prefix] + vars: + current_igw: '{{ igw_info.internet_gateways[0] }}' + + - name: Fetch IGW by ID + ec2_vpc_igw_info: + internet_gateway_ids: '{{ igw_id }}' + register: igw_info + + - name: Check standard IGW details + assert: + that: + - '"internet_gateways" in igw_info' + - igw_info.internet_gateways | length == 1 + - '"attachments" in current_igw' + - current_igw.attachments | length == 1 + - '"state" in current_igw.attachments[0]' + - current_igw.attachments[0].state == "available" + - '"vpc_id" in current_igw.attachments[0]' + - current_igw.attachments[0].vpc_id == vpc_id + - '"internet_gateway_id" in current_igw' + - current_igw.internet_gateway_id == igw_id + - '"tags" in current_igw' + - current_igw.tags | length == 2 + - '"tag_one" in current_igw.tags' + - '"Tag Two" in current_igw.tags' + - current_igw.tags["tag_one"] == '{{ resource_prefix }} One' + - current_igw.tags["Tag Two"] == 'two {{ resource_prefix }}' + vars: + current_igw: '{{ igw_info.internet_gateways[0] }}' + + - name: Fetch IGW by ID (list) + ec2_vpc_igw_info: + internet_gateway_ids: + - '{{ igw_id }}' + register: igw_info + + - name: Check standard IGW details + assert: + that: + - '"internet_gateways" in igw_info' + - igw_info.internet_gateways | length == 1 + - '"attachments" in current_igw' + - current_igw.attachments | length == 1 + - '"state" in current_igw.attachments[0]' + - current_igw.attachments[0].state == "available" + - '"vpc_id" in current_igw.attachments[0]' + - current_igw.attachments[0].vpc_id == vpc_id + - '"internet_gateway_id" in current_igw' + - current_igw.internet_gateway_id == igw_id + - '"tags" in current_igw' + vars: + current_igw: '{{ igw_info.internet_gateways[0] }}' + + - name: Attempt to recreate internet gateway on VPC (expected changed=false) - CHECK_MODE + ec2_vpc_igw: + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + register: vpc_igw_recreate + check_mode: yes + + - name: Assert recreation would do nothing (expected changed=false) - CHECK_MODE + assert: + that: + - vpc_igw_recreate is not changed + - vpc_igw_recreate.gateway_id == igw_id + - vpc_igw_recreate.vpc_id == vpc_id + - '"tags" in vpc_igw_create' + - vpc_igw_create.tags | length == 2 + - vpc_igw_create.tags["tag_one"] == '{{ resource_prefix }} One' + - vpc_igw_create.tags["Tag Two"] == 'two {{ resource_prefix }}' + + - name: Attempt to recreate internet gateway on VPC (expected changed=false) + ec2_vpc_igw: + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + register: vpc_igw_recreate + + - name: Assert recreation did nothing (expected changed=false) + assert: + that: + - vpc_igw_recreate is not changed + - vpc_igw_recreate.gateway_id == igw_id + - vpc_igw_recreate.vpc_id == vpc_id + - '"tags" in vpc_igw_create' + - vpc_igw_create.tags | length == 2 + - vpc_igw_create.tags["tag_one"] == '{{ resource_prefix }} One' + - vpc_igw_create.tags["Tag Two"] == 'two {{ resource_prefix }}' + + # ============================================================ + - name: Update the tags (no change) - CHECK_MODE + ec2_vpc_igw: + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + tags: + tag_one: '{{ resource_prefix }} One' + Tag Two: two {{ resource_prefix }} + register: vpc_igw_recreate + check_mode: yes + + - name: Assert tag update would do nothing (expected changed=false) - CHECK_MODE + assert: + that: + - vpc_igw_recreate is not changed + - vpc_igw_recreate.gateway_id == igw_id + - vpc_igw_recreate.vpc_id == vpc_id + - '"tags" in vpc_igw_recreate' + - vpc_igw_recreate.tags | length == 2 + - vpc_igw_recreate.tags["tag_one"] == '{{ resource_prefix }} One' + - vpc_igw_recreate.tags["Tag Two"] == 'two {{ resource_prefix }}' + + - name: Update the tags (no change) + ec2_vpc_igw: + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + tags: + tag_one: '{{ resource_prefix }} One' + Tag Two: two {{ resource_prefix }} + register: vpc_igw_recreate + + - name: Assert tag update did nothing (expected changed=false) + assert: + that: + - vpc_igw_recreate is not changed + - vpc_igw_recreate.gateway_id == igw_id + - vpc_igw_recreate.vpc_id == vpc_id + - '"tags" in vpc_igw_recreate' + - vpc_igw_recreate.tags | length == 2 + - vpc_igw_recreate.tags["tag_one"] == '{{ resource_prefix }} One' + - vpc_igw_recreate.tags["Tag Two"] == 'two {{ resource_prefix }}' + + # ============================================================ + - name: Update the tags (remove and add) - CHECK_MODE + ec2_vpc_igw: + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + tags: + tag_three: '{{ resource_prefix }} Three' + Tag Two: two {{ resource_prefix }} + register: vpc_igw_update + check_mode: yes + + - name: Assert tag update would happen (expected changed=true) - CHECK_MODE + assert: + that: + - vpc_igw_update is changed + - vpc_igw_update.gateway_id == igw_id + - vpc_igw_update.vpc_id == vpc_id + - '"tags" in vpc_igw_update' + - vpc_igw_update.tags | length == 2 + + - name: Update the tags (remove and add) + ec2_vpc_igw: + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + tags: + tag_three: '{{ resource_prefix }} Three' + Tag Two: two {{ resource_prefix }} + register: vpc_igw_update + + - name: Assert tags are updated (expected changed=true) + assert: + that: + - vpc_igw_update is changed + - vpc_igw_update.gateway_id == igw_id + - vpc_igw_update.vpc_id == vpc_id + - '"tags" in vpc_igw_update' + - vpc_igw_update.tags | length == 2 + - vpc_igw_update.tags["tag_three"] == '{{ resource_prefix }} Three' + - vpc_igw_update.tags["Tag Two"] == 'two {{ resource_prefix }}' + + # ============================================================ + - name: Update the tags add without purge - CHECK_MODE + ec2_vpc_igw: + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + purge_tags: no + tags: + tag_one: '{{ resource_prefix }} One' + register: vpc_igw_update + check_mode: yes + + - name: Assert tags would be added - CHECK_MODE + assert: + that: + - vpc_igw_update is changed + - vpc_igw_update.gateway_id == igw_id + - vpc_igw_update.vpc_id == vpc_id + - '"tags" in vpc_igw_update' + + - name: Update the tags add without purge + ec2_vpc_igw: + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + purge_tags: no + tags: + tag_one: '{{ resource_prefix }} One' + register: vpc_igw_update + + - name: Assert tags added + assert: + that: + - vpc_igw_update is changed + - vpc_igw_update.gateway_id == igw_id + - vpc_igw_update.vpc_id == vpc_id + - '"tags" in vpc_igw_update' + - vpc_igw_update.tags | length == 3 + - vpc_igw_update.tags["tag_one"] == '{{ resource_prefix }} One' + - vpc_igw_update.tags["tag_three"] == '{{ resource_prefix }} Three' + - vpc_igw_update.tags["Tag Two"] == 'two {{ resource_prefix }}' + + + # ============================================================ + - name: Update with CamelCase tags - CHECK_MODE + ec2_vpc_igw: + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + tags: + lowercase spaced: "hello cruel world" + Title Case: "Hello Cruel World" + CamelCase: "SimpleCamelCase" + snake_case: "simple_snake_case" + register: vpc_igw_update + check_mode: yes + + - name: Assert tag update would happen (expected changed=true) - CHECK_MODE + assert: + that: + - vpc_igw_update is changed + - vpc_igw_update.gateway_id == igw_id + - vpc_igw_update.vpc_id == vpc_id + - '"tags" in vpc_igw_update' + + - name: Update the tags - remove and add + ec2_vpc_igw: + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + tags: + lowercase spaced: "hello cruel world" + Title Case: "Hello Cruel World" + CamelCase: "SimpleCamelCase" + snake_case: "simple_snake_case" + register: vpc_igw_update + + - name: assert tags are updated (expected changed=true) + assert: + that: + - vpc_igw_update is changed + - vpc_igw_update.gateway_id == igw_id + - vpc_igw_update.vpc_id == vpc_id + - '"tags" in vpc_igw_update' + - vpc_igw_update.tags | length == 4 + - vpc_igw_update.tags["lowercase spaced"] == 'hello cruel world' + - vpc_igw_update.tags["Title Case"] == 'Hello Cruel World' + - vpc_igw_update.tags["CamelCase"] == 'SimpleCamelCase' + - vpc_igw_update.tags["snake_case"] == 'simple_snake_case' + + # ============================================================ + - name: Gather information about a filtered list of Internet Gateways using tags + ec2_vpc_igw_info: + filters: + tag:Title Case: "Hello Cruel World" + register: igw_info + + - name: Assert success + assert: + that: + - igw_info is successful + - '"internet_gateways" in igw_info' + - igw_info.internet_gateways | selectattr("internet_gateway_id",'equalto',"{{ + igw_id }}") + + - name: Gather information about a filtered list of Internet Gateways using tags - CHECK_MODE + ec2_vpc_igw_info: + filters: + tag:Title Case: "Hello Cruel World" + register: igw_info + check_mode: yes + + - name: Assert success - CHECK_MODE + assert: + that: + - igw_info is successful + - '"internet_gateways" in igw_info' + - igw_info.internet_gateways | selectattr("internet_gateway_id",'equalto',"{{ + igw_id }}") + + # ============================================================ + - name: Gather information about a filtered list of Internet Gateways using tags (no match) + ec2_vpc_igw_info: + filters: + tag:tag_one: '{{ resource_prefix }} One' + register: igw_info + + - name: Assert success + assert: + that: + - igw_info is successful + - '"internet_gateways" in igw_info' + - igw_info.internet_gateways | length == 0 + + - name: Gather information about a filtered list of Internet Gateways using tags (no match) - CHECK_MODE + ec2_vpc_igw_info: + filters: + tag:tag_one: '{{ resource_prefix }} One' + register: igw_info + check_mode: yes + + - name: Assert success - CHECK_MODE + assert: + that: + - igw_info is successful + - '"internet_gateways" in igw_info' + - igw_info.internet_gateways | length == 0 + + # ============================================================ + - name: Remove all tags - CHECK_MODE + ec2_vpc_igw: + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + tags: {} + register: vpc_igw_update + check_mode: yes + + - name: Assert tags would be removed - CHECK_MODE + assert: + that: + - vpc_igw_update is changed + + - name: Remove all tags + ec2_vpc_igw: + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + tags: {} + register: vpc_igw_update + + - name: Assert tags removed + assert: + that: + - vpc_igw_update is changed + - vpc_igw_update.gateway_id == igw_id + - vpc_igw_update.vpc_id == vpc_id + - '"tags" in vpc_igw_update' + - vpc_igw_update.tags | length == 0 + + # ============================================================ + - name: Test state=absent (expected changed=true) - CHECK_MODE + ec2_vpc_igw: + state: absent + vpc_id: '{{ vpc_result.vpc.id }}' + register: vpc_igw_delete + check_mode: yes + + - name: Assert state=absent (expected changed=true) - CHECK_MODE + assert: + that: + - vpc_igw_delete is changed + + - name: Test state=absent (expected changed=true) + ec2_vpc_igw: + state: absent + vpc_id: '{{ vpc_result.vpc.id }}' + register: vpc_igw_delete + + - name: Assert state=absent (expected changed=true) + assert: + that: + - vpc_igw_delete is changed + + # ============================================================ + - name: Fetch IGW by ID (list) + ec2_vpc_igw_info: + internet_gateway_ids: + - '{{ igw_id }}' + register: igw_info + ignore_errors: true + + - name: Check IGW does not exist + assert: + that: + # Deliberate choice not to change bevahiour when searching by ID + - igw_info is failed + + # ============================================================ + - name: Test state=absent when already deleted (expected changed=false) - CHECK_MODE + ec2_vpc_igw: + state: absent + vpc_id: '{{ vpc_result.vpc.id }}' + register: vpc_igw_delete + check_mode: yes + + - name: Assert state=absent (expected changed=false) - CHECK_MODE + assert: + that: + - vpc_igw_delete is not changed + + - name: Test state=absent when already deleted (expected changed=false) + ec2_vpc_igw: + state: absent + vpc_id: '{{ vpc_result.vpc.id }}' + register: vpc_igw_delete + + - name: Assert state=absent (expected changed=false) + assert: + that: + - vpc_igw_delete is not changed + + always: + # ============================================================ + - name: Tidy up IGW + ec2_vpc_igw: + state: absent + vpc_id: '{{ vpc_result.vpc.id }}' + ignore_errors: true + + - name: Tidy up VPC + ec2_vpc_net: + name: '{{ vpc_name }}' + state: absent + cidr_block: '{{ vpc_cidr }}' + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/aliases new file mode 100644 index 000000000..5a9dd5bcd --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/aliases @@ -0,0 +1,5 @@ +time=10m + +cloud/aws + +ec2_vpc_nat_gateway_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/defaults/main.yml new file mode 100644 index 000000000..3794da102 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/defaults/main.yml @@ -0,0 +1,4 @@ +vpc_name: '{{ resource_prefix }}-vpc' +vpc_seed: '{{ resource_prefix }}' +vpc_cidr: 10.0.0.0/16 +subnet_cidr: 10.0.{{ 256 | random(seed=vpc_seed) }}.0/24 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml new file mode 100644 index 000000000..501cccaf9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml @@ -0,0 +1,978 @@ +- name: ec2_vpc_nat_gateway tests + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + # ============================================================ + - name: Create a VPC + ec2_vpc_net: + name: '{{ vpc_name }}' + state: present + cidr_block: '{{ vpc_cidr }}' + register: vpc_result + + - name: Assert success + assert: + that: + - vpc_result is successful + - '"vpc" in vpc_result' + - '"cidr_block" in vpc_result.vpc' + - vpc_result.vpc.cidr_block == vpc_cidr + - '"id" in vpc_result.vpc' + - vpc_result.vpc.id.startswith("vpc-") + - '"state" in vpc_result.vpc' + - vpc_result.vpc.state == 'available' + - '"tags" in vpc_result.vpc' + + - name: 'Set fact: VPC ID' + set_fact: + vpc_id: '{{ vpc_result.vpc.id }}' + + + # ============================================================ + - name: Allocate a new EIP + ec2_eip: + in_vpc: true + reuse_existing_ip_allowed: true + tag_name: FREE + register: eip_result + + - name: Assert success + assert: + that: + - eip_result is successful + - '"allocation_id" in eip_result' + - eip_result.allocation_id.startswith("eipalloc-") + - '"public_ip" in eip_result' + + - name: 'set fact: EIP allocation ID and EIP public IP' + set_fact: + eip_address: '{{ eip_result.public_ip }}' + allocation_id: '{{ eip_result.allocation_id }}' + + + # ============================================================ + - name: Create subnet and associate to the VPC + ec2_vpc_subnet: + state: present + vpc_id: '{{ vpc_id }}' + cidr: '{{ subnet_cidr }}' + register: subnet_result + + - name: Assert success + assert: + that: + - subnet_result is successful + - '"subnet" in subnet_result' + - '"cidr_block" in subnet_result.subnet' + - subnet_result.subnet.cidr_block == subnet_cidr + - '"id" in subnet_result.subnet' + - subnet_result.subnet.id.startswith("subnet-") + - '"state" in subnet_result.subnet' + - subnet_result.subnet.state == 'available' + - '"tags" in subnet_result.subnet' + - subnet_result.subnet.vpc_id == vpc_id + + - name: 'set fact: VPC subnet ID' + set_fact: + subnet_id: '{{ subnet_result.subnet.id }}' + + + # ============================================================ + - name: Search for NAT gateways by subnet (no matches) - CHECK_MODE + ec2_vpc_nat_gateway_info: + filters: + subnet-id: '{{ subnet_id }}' + state: [available] + register: existing_ngws + check_mode: yes + + - name: Assert no NAT gateway found - CHECK_MODE + assert: + that: + - existing_ngws is successful + - (existing_ngws.result|length) == 0 + + - name: Search for NAT gateways by subnet - no matches + ec2_vpc_nat_gateway_info: + filters: + subnet-id: '{{ subnet_id }}' + state: [available] + register: existing_ngws + + - name: Assert no NAT gateway found + assert: + that: + - existing_ngws is successful + - (existing_ngws.result|length) == 0 + + + # ============================================================ + - name: Create IGW + ec2_vpc_igw: + vpc_id: '{{ vpc_id }}' + register: create_igw + + - name: Assert success + assert: + that: + - create_igw is successful + - create_igw.gateway_id.startswith("igw-") + - create_igw.vpc_id == vpc_id + - '"gateway_id" in create_igw' + + + # ============================================================ + - name: Create new NAT gateway with eip allocation-id - CHECK_MODE + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + allocation_id: '{{ allocation_id }}' + wait: yes + register: create_ngw + check_mode: yes + + - name: Assert creation happened (expected changed=true) - CHECK_MODE + assert: + that: + - create_ngw.changed + + - name: Create new NAT gateway with eip allocation-id + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + allocation_id: '{{ allocation_id }}' + wait: yes + register: create_ngw + + - name: Assert creation happened (expected changed=true) + assert: + that: + - create_ngw.changed + - '"create_time" in create_ngw' + - '"nat_gateway_addresses" in create_ngw' + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + + - name: 'set facts: NAT gateway ID' + set_fact: + nat_gateway_id: '{{ create_ngw.nat_gateway_id }}' + network_interface_id: '{{ create_ngw.nat_gateway_addresses[0].network_interface_id }}' + + + # ============================================================ + - name: Get NAT gateway with specific filters (state and subnet) + ec2_vpc_nat_gateway_info: + filters: + subnet-id: '{{ subnet_id }}' + state: [available] + register: avalaible_ngws + + - name: Assert success + assert: + that: + - avalaible_ngws is successful + - avalaible_ngws.result | length == 1 + - '"create_time" in first_ngw' + - '"nat_gateway_addresses" in first_ngw' + - '"nat_gateway_id" in first_ngw' + - first_ngw.nat_gateway_id == nat_gateway_id + - '"state" in first_ngw' + - first_ngw.state == 'available' + - '"subnet_id" in first_ngw' + - first_ngw.subnet_id == subnet_id + - '"tags" in first_ngw' + - '"vpc_id" in first_ngw' + - first_ngw.vpc_id == vpc_id + vars: + first_ngw: '{{ avalaible_ngws.result[0] }}' + + # ============================================================ + - name: Trying this again for idempotency - create new NAT gateway with eip allocation-id + - CHECK_MODE + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + allocation_id: '{{ allocation_id }}' + wait: yes + register: create_ngw + check_mode: yes + + - name: Assert recreation would do nothing (expected changed=false) - CHECK_MODE + assert: + that: + - not create_ngw.changed + - '"create_time" in create_ngw' + - '"nat_gateway_addresses" in create_ngw' + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + + - name: Trying this again for idempotency - create new NAT gateway with eip allocation-id + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + allocation_id: '{{ allocation_id }}' + wait: yes + register: create_ngw + + - name: Assert recreation would do nothing (expected changed=false) + assert: + that: + - not create_ngw.changed + - '"create_time" in create_ngw' + - '"nat_gateway_addresses" in create_ngw' + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + + + # ============================================================ + - name: Create new NAT gateway only if one does not exist already - CHECK_MODE + ec2_vpc_nat_gateway: + if_exist_do_not_create: yes + subnet_id: '{{ subnet_id }}' + wait: yes + register: create_ngw + check_mode: yes + + - name: Assert recreation would do nothing (expected changed=false) - CHECK_MODE + assert: + that: + - not create_ngw.changed + - '"create_time" in create_ngw' + - '"nat_gateway_addresses" in create_ngw' + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + + - name: Create new NAT gateway only if one does not exist already + ec2_vpc_nat_gateway: + if_exist_do_not_create: yes + subnet_id: '{{ subnet_id }}' + wait: yes + register: create_ngw + + - name: Assert recreation would do nothing (expected changed=false) + assert: + that: + - not create_ngw.changed + - '"create_time" in create_ngw' + - '"nat_gateway_addresses" in create_ngw' + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + + + # ============================================================ + - name: Allocate a new EIP + ec2_eip: + in_vpc: true + reuse_existing_ip_allowed: true + tag_name: FREE + register: eip_result + + - name: Assert success + assert: + that: + - eip_result is successful + - '"allocation_id" in eip_result' + - eip_result.allocation_id.startswith("eipalloc-") + - '"public_ip" in eip_result' + + - name: 'Set fact: EIP allocation ID and EIP public IP' + set_fact: + second_eip_address: '{{ eip_result.public_ip }}' + second_allocation_id: '{{ eip_result.allocation_id }}' + + + # ============================================================ + - name: Create new nat gateway with eip address - CHECK_MODE + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + eip_address: '{{ second_eip_address }}' + wait: yes + register: create_ngw + check_mode: yes + + - name: Assert creation happened (expected changed=true) - CHECK_MODE + assert: + that: + - create_ngw.changed + + - name: Create new NAT gateway with eip address + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + eip_address: '{{ second_eip_address }}' + wait: yes + register: create_ngw + + - name: Assert creation happened (expected changed=true) + assert: + that: + - create_ngw.changed + - '"create_time" in create_ngw' + - '"nat_gateway_addresses" in create_ngw' + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == second_allocation_id + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + + + # ============================================================ + - name: Trying this again for idempotency - create new NAT gateway with eip address - CHECK_MODE + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + eip_address: '{{ second_eip_address }}' + wait: yes + register: create_ngw + check_mode: yes + + - name: Assert recreation would do nothing (expected changed=false) - CHECK_MODE + assert: + that: + - not create_ngw.changed + - '"create_time" in create_ngw' + - '"nat_gateway_addresses" in create_ngw' + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == second_allocation_id + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + + - name: Trying this again for idempotency - create new NAT gateway with eip address + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + eip_address: '{{ second_eip_address }}' + wait: yes + register: create_ngw + + - name: Assert recreation would do nothing (expected changed=false) + assert: + that: + - not create_ngw.changed + - '"create_time" in create_ngw' + - '"nat_gateway_addresses" in create_ngw' + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == second_allocation_id + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + + + # ============================================================ + - name: Fetch NAT gateway by ID (list) + ec2_vpc_nat_gateway_info: + nat_gateway_ids: + - '{{ nat_gateway_id }}' + register: ngw_info + + - name: Check NAT gateway exists + assert: + that: + - ngw_info is successful + - ngw_info.result | length == 1 + - '"create_time" in first_ngw' + - '"nat_gateway_addresses" in first_ngw' + - '"nat_gateway_id" in first_ngw' + - first_ngw.nat_gateway_id == nat_gateway_id + - '"state" in first_ngw' + - first_ngw.state == 'available' + - '"subnet_id" in first_ngw' + - first_ngw.subnet_id == subnet_id + - '"tags" in first_ngw' + - '"vpc_id" in first_ngw' + - first_ngw.vpc_id == vpc_id + vars: + first_ngw: '{{ ngw_info.result[0] }}' + + + # ============================================================ + - name: Delete NAT gateway - CHECK_MODE + ec2_vpc_nat_gateway: + nat_gateway_id: '{{ nat_gateway_id }}' + state: absent + wait: yes + register: delete_nat_gateway + check_mode: yes + + - name: Assert state=absent (expected changed=true) - CHECK_MODE + assert: + that: + - delete_nat_gateway.changed + + - name: Delete NAT gateway + ec2_vpc_nat_gateway: + nat_gateway_id: '{{ nat_gateway_id }}' + state: absent + wait: yes + register: delete_nat_gateway + + - name: Assert state=absent (expected changed=true) + assert: + that: + - delete_nat_gateway.changed + - '"delete_time" in delete_nat_gateway' + - '"nat_gateway_addresses" in delete_nat_gateway' + - '"nat_gateway_id" in delete_nat_gateway' + - delete_nat_gateway.nat_gateway_id == nat_gateway_id + - '"state" in delete_nat_gateway' + - delete_nat_gateway.state in ['deleted', 'deleting'] + - '"subnet_id" in delete_nat_gateway' + - delete_nat_gateway.subnet_id == subnet_id + - '"tags" in delete_nat_gateway' + - '"vpc_id" in delete_nat_gateway' + - delete_nat_gateway.vpc_id == vpc_id + + + # ============================================================ + - name: Create new NAT gateway with eip allocation-id and tags - CHECK_MODE + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + allocation_id: '{{ allocation_id }}' + tags: + tag_one: '{{ resource_prefix }} One' + Tag Two: two {{ resource_prefix }} + wait: yes + register: create_ngw + check_mode: yes + + - name: Assert creation happened (expected changed=true) - CHECK_MODE + assert: + that: + - create_ngw.changed + + - name: Create new NAT gateway with eip allocation-id and tags + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + allocation_id: '{{ allocation_id }}' + tags: + tag_one: '{{ resource_prefix }} One' + Tag Two: two {{ resource_prefix }} + wait: yes + register: create_ngw + + - name: Assert creation happened (expected changed=true) + assert: + that: + - create_ngw.changed + - '"create_time" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - create_ngw.tags | length == 2 + - create_ngw.tags["tag_one"] == '{{ resource_prefix }} One' + - create_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + - create_ngw.connectivity_type == 'public' + + - name: 'Set facts: NAT gateway ID' + set_fact: + ngw_id: '{{ create_ngw.nat_gateway_id }}' + + + # ============================================================ + - name: Update the tags (no change) - CHECK_MODE + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + allocation_id: '{{ allocation_id }}' + tags: + tag_one: '{{ resource_prefix }} One' + Tag Two: two {{ resource_prefix }} + wait: yes + register: update_tags_ngw + check_mode: yes + + - name: Assert tag update would do nothing (expected changed=false) - CHECK_MODE + assert: + that: + - not update_tags_ngw.changed + - '"nat_gateway_id" in update_tags_ngw' + - update_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in update_tags_ngw' + - update_tags_ngw.subnet_id == subnet_id + - '"tags" in update_tags_ngw' + - update_tags_ngw.tags | length == 2 + - update_tags_ngw.tags["tag_one"] == '{{ resource_prefix }} One' + - update_tags_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}' + - '"vpc_id" in update_tags_ngw' + - update_tags_ngw.vpc_id == vpc_id + + - name: Update the tags (no change) + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + allocation_id: '{{ allocation_id }}' + tags: + tag_one: '{{ resource_prefix }} One' + Tag Two: two {{ resource_prefix }} + wait: yes + register: update_tags_ngw + + - name: Assert tag update would do nothing (expected changed=false) + assert: + that: + - not update_tags_ngw.changed + - '"nat_gateway_id" in update_tags_ngw' + - update_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in update_tags_ngw' + - update_tags_ngw.subnet_id == subnet_id + - '"tags" in update_tags_ngw' + - update_tags_ngw.tags | length == 2 + - update_tags_ngw.tags["tag_one"] == '{{ resource_prefix }} One' + - update_tags_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}' + - '"vpc_id" in update_tags_ngw' + - update_tags_ngw.vpc_id == vpc_id + + + # ============================================================ + - name: Gather information about a filtered list of NAT Gateways using tags and state - CHECK_MODE + ec2_vpc_nat_gateway_info: + filters: + tag:Tag Two: two {{ resource_prefix }} + state: [available] + register: ngw_info + check_mode: yes + + - name: Assert success - CHECK_MODE + assert: + that: + - ngw_info is successful + - ngw_info.result | length == 1 + - '"create_time" in second_ngw' + - '"nat_gateway_addresses" in second_ngw' + - '"nat_gateway_id" in second_ngw' + - second_ngw.nat_gateway_id == ngw_id + - '"state" in second_ngw' + - second_ngw.state == 'available' + - '"subnet_id" in second_ngw' + - second_ngw.subnet_id == subnet_id + - '"tags" in second_ngw' + - second_ngw.tags | length == 2 + - '"tag_one" in second_ngw.tags' + - '"Tag Two" in second_ngw.tags' + - second_ngw.tags["tag_one"] == '{{ resource_prefix }} One' + - second_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}' + - '"vpc_id" in second_ngw' + - second_ngw.vpc_id == vpc_id + vars: + second_ngw: '{{ ngw_info.result[0] }}' + + - name: Gather information about a filtered list of NAT Gateways using tags and state + ec2_vpc_nat_gateway_info: + filters: + tag:Tag Two: two {{ resource_prefix }} + state: [available] + register: ngw_info + + - name: Assert success + assert: + that: + - ngw_info is successful + - ngw_info.result | length == 1 + - '"create_time" in second_ngw' + - '"nat_gateway_addresses" in second_ngw' + - '"nat_gateway_id" in second_ngw' + - second_ngw.nat_gateway_id == ngw_id + - '"state" in second_ngw' + - second_ngw.state == 'available' + - '"subnet_id" in second_ngw' + - second_ngw.subnet_id == subnet_id + - '"tags" in second_ngw' + - second_ngw.tags | length == 2 + - '"tag_one" in second_ngw.tags' + - '"Tag Two" in second_ngw.tags' + - second_ngw.tags["tag_one"] == '{{ resource_prefix }} One' + - second_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}' + - '"vpc_id" in second_ngw' + - second_ngw.vpc_id == vpc_id + vars: + second_ngw: '{{ ngw_info.result[0] }}' + + + # ============================================================ + - name: Update the tags (remove and add) - CHECK_MODE + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + allocation_id: '{{ allocation_id }}' + tags: + tag_three: '{{ resource_prefix }} Three' + Tag Two: two {{ resource_prefix }} + wait: yes + register: update_tags_ngw + check_mode: yes + + - name: Assert tag update would happen (expected changed=true) - CHECK_MODE + assert: + that: + - update_tags_ngw.changed + - '"nat_gateway_id" in update_tags_ngw' + - update_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in update_tags_ngw' + - update_tags_ngw.subnet_id == subnet_id + - '"tags" in update_tags_ngw' + - '"vpc_id" in update_tags_ngw' + - update_tags_ngw.vpc_id == vpc_id + + - name: Update the tags (remove and add) + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + allocation_id: '{{ allocation_id }}' + tags: + tag_three: '{{ resource_prefix }} Three' + Tag Two: two {{ resource_prefix }} + wait: yes + register: update_tags_ngw + + - name: Assert tag update would happen (expected changed=true) + assert: + that: + - update_tags_ngw.changed + - '"nat_gateway_id" in update_tags_ngw' + - update_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in update_tags_ngw' + - update_tags_ngw.subnet_id == subnet_id + - '"tags" in update_tags_ngw' + - update_tags_ngw.tags | length == 2 + - update_tags_ngw.tags["tag_three"] == '{{ resource_prefix }} Three' + - update_tags_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}' + - '"vpc_id" in update_tags_ngw' + - update_tags_ngw.vpc_id == vpc_id + + + # ============================================================ + - name: Gather information about a filtered list of NAT Gateways using tags and state (no match) - CHECK_MODE + ec2_vpc_nat_gateway_info: + filters: + tag:tag_one: '{{ resource_prefix }} One' + state: [available] + register: ngw_info + check_mode: yes + + - name: Assert success - CHECK_MODE + assert: + that: + - ngw_info is successful + - ngw_info.result | length == 0 + + - name: Gather information about a filtered list of NAT Gateways using tags and + state (no match) + ec2_vpc_nat_gateway_info: + filters: + tag:tag_one: '{{ resource_prefix }} One' + state: [available] + register: ngw_info + + - name: Assert success + assert: + that: + - ngw_info is successful + - ngw_info.result | length == 0 + + + # ============================================================ + - name: Update the tags add without purge - CHECK_MODE + ec2_vpc_nat_gateway: + if_exist_do_not_create: yes + subnet_id: '{{ subnet_id }}' + allocation_id: '{{ allocation_id }}' + purge_tags: no + tags: + tag_one: '{{ resource_prefix }} One' + wait: yes + register: update_tags_ngw + check_mode: yes + + - name: Assert tags would be added - CHECK_MODE + assert: + that: + - update_tags_ngw.changed + - '"nat_gateway_id" in update_tags_ngw' + - update_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in update_tags_ngw' + - update_tags_ngw.subnet_id == subnet_id + - '"tags" in update_tags_ngw' + - '"vpc_id" in update_tags_ngw' + - update_tags_ngw.vpc_id == vpc_id + + - name: Update the tags add without purge + ec2_vpc_nat_gateway: + if_exist_do_not_create: yes + subnet_id: '{{ subnet_id }}' + allocation_id: '{{ allocation_id }}' + purge_tags: no + tags: + tag_one: '{{ resource_prefix }} One' + wait: yes + register: update_tags_ngw + + - name: Assert tags would be added + assert: + that: + - update_tags_ngw.changed + - '"nat_gateway_id" in update_tags_ngw' + - update_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in update_tags_ngw' + - update_tags_ngw.subnet_id == subnet_id + - '"tags" in update_tags_ngw' + - update_tags_ngw.tags | length == 3 + - update_tags_ngw.tags["tag_one"] == '{{ resource_prefix }} One' + - update_tags_ngw.tags["tag_three"] == '{{ resource_prefix }} Three' + - update_tags_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}' + - '"vpc_id" in update_tags_ngw' + - update_tags_ngw.vpc_id == vpc_id + + + # ============================================================ + - name: Remove all tags - CHECK_MODE + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + allocation_id: '{{ allocation_id }}' + tags: {} + register: delete_tags_ngw + check_mode: yes + + - name: assert tags would be removed - CHECK_MODE + assert: + that: + - delete_tags_ngw.changed + - '"nat_gateway_id" in delete_tags_ngw' + - delete_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in delete_tags_ngw' + - delete_tags_ngw.subnet_id == subnet_id + - '"tags" in delete_tags_ngw' + - '"vpc_id" in delete_tags_ngw' + - delete_tags_ngw.vpc_id == vpc_id + + - name: Remove all tags + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + allocation_id: '{{ allocation_id }}' + tags: {} + register: delete_tags_ngw + + - name: Assert tags would be removed + assert: + that: + - delete_tags_ngw.changed + - '"nat_gateway_id" in delete_tags_ngw' + - delete_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in delete_tags_ngw' + - delete_tags_ngw.subnet_id == subnet_id + - '"tags" in delete_tags_ngw' + - delete_tags_ngw.tags | length == 0 + - '"vpc_id" in delete_tags_ngw' + - delete_tags_ngw.vpc_id == vpc_id + + + # ============================================================ + - name: Update with CamelCase tags - CHECK_MODE + ec2_vpc_nat_gateway: + if_exist_do_not_create: yes + subnet_id: '{{ subnet_id }}' + allocation_id: '{{ allocation_id }}' + purge_tags: no + tags: + lowercase spaced: "hello cruel world" + Title Case: "Hello Cruel World" + CamelCase: "SimpleCamelCase" + snake_case: "simple_snake_case" + wait: yes + register: update_tags_ngw + check_mode: yes + + - name: Assert tags would be added - CHECK_MODE + assert: + that: + - update_tags_ngw.changed + - '"nat_gateway_id" in update_tags_ngw' + - update_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in update_tags_ngw' + - update_tags_ngw.subnet_id == subnet_id + - '"tags" in update_tags_ngw' + - '"vpc_id" in update_tags_ngw' + - update_tags_ngw.vpc_id == vpc_id + + - name: Update with CamelCase tags + ec2_vpc_nat_gateway: + if_exist_do_not_create: yes + subnet_id: '{{ subnet_id }}' + allocation_id: '{{ allocation_id }}' + purge_tags: no + tags: + lowercase spaced: "hello cruel world" + Title Case: "Hello Cruel World" + CamelCase: "SimpleCamelCase" + snake_case: "simple_snake_case" + wait: yes + register: update_tags_ngw + + - name: Assert tags would be added + assert: + that: + - update_tags_ngw.changed + - '"nat_gateway_id" in update_tags_ngw' + - update_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in update_tags_ngw' + - update_tags_ngw.subnet_id == subnet_id + - '"tags" in update_tags_ngw' + - update_tags_ngw.tags | length == 4 + - update_tags_ngw.tags["lowercase spaced"] == 'hello cruel world' + - update_tags_ngw.tags["Title Case"] == 'Hello Cruel World' + - update_tags_ngw.tags["CamelCase"] == 'SimpleCamelCase' + - update_tags_ngw.tags["snake_case"] == 'simple_snake_case' + - '"vpc_id" in update_tags_ngw' + - update_tags_ngw.vpc_id == vpc_id + + + # ============================================================ + + - name: Delete NAT gateway + ec2_vpc_nat_gateway: + nat_gateway_id: '{{ nat_gateway_id }}' + state: absent + wait: yes + register: delete_nat_gateway + + # ============================================================ + + - name: Create new NAT gateway with connectivity_type = private - CHECK_MODE + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + connectivity_type: 'private' + wait: yes + register: create_ngw + check_mode: yes + + - name: Assert creation happened (expected changed=true) - CHECK_MODE + assert: + that: + - create_ngw.changed + - '"ec2:CreateNatGateway" not in create_ngw.resource_actions' + + - name: Create new NAT gateway with eip connectivity_type = private + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + connectivity_type: 'private' + wait: yes + register: create_ngw + + - name: Assert creation happened (expected changed=true) + assert: + that: + - create_ngw.changed + - create_ngw.connectivity_type == 'private' + - '"create_time" in create_ngw' + + - name: 'set facts: NAT gateway ID' + set_fact: + nat_gateway_id: '{{ create_ngw.nat_gateway_id }}' + network_interface_id: '{{ create_ngw.nat_gateway_addresses[0].network_interface_id }}' + + # ============================================================ + + + always: + - name: Get NAT gateways + ec2_vpc_nat_gateway_info: + filters: + vpc-id: '{{ vpc_id }}' + state: [available] + register: existing_ngws + ignore_errors: true + + - name: Tidy up NAT gateway + ec2_vpc_nat_gateway: + subnet_id: '{{ item.subnet_id }}' + nat_gateway_id: '{{ item.nat_gateway_id }}' + connectivity_type: '{{ item.connectivity_type }}' + release_eip: yes + state: absent + wait: yes + with_items: '{{ existing_ngws.result }}' + ignore_errors: true + + - name: Delete IGW + ec2_vpc_igw: + vpc_id: '{{ vpc_id }}' + state: absent + ignore_errors: true + + - name: Remove subnet + ec2_vpc_subnet: + state: absent + cidr: '{{ subnet_cidr }}' + vpc_id: '{{ vpc_id }}' + ignore_errors: true + + - name: Ensure EIP is actually released + ec2_eip: + state: absent + device_id: '{{ item.nat_gateway_addresses[0].network_interface_id }}' + in_vpc: yes + with_items: '{{ existing_ngws.result }}' + ignore_errors: yes + + - name: Delete VPC + ec2_vpc_net: + name: '{{ vpc_name }}' + cidr_block: '{{ vpc_cidr }}' + state: absent + purge_cidrs: yes + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/aliases new file mode 100644 index 000000000..92bd4d57f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/aliases @@ -0,0 +1,2 @@ +ec2_vpc_net_info +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/defaults/main.yml new file mode 100644 index 000000000..f35d4cb87 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/defaults/main.yml @@ -0,0 +1,8 @@ +--- +# defaults file for ec2_vpc_net +vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24' +vpc_cidr_a: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24' +vpc_cidr_b: '10.{{ 256 | random(seed=resource_prefix) }}.2.0/24' + +vpc_name: '{{ resource_prefix }}-vpc-net' +vpc_name_updated: '{{ resource_prefix }}-updated-vpc-net' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/tasks/main.yml new file mode 100644 index 000000000..da40c16f6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/tasks/main.yml @@ -0,0 +1,1560 @@ +--- +- name: Setup AWS Environment + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + vars: + first_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + second_tags: + 'New Key with Spaces': Value with spaces + NewCamelCaseKey: CamelCaseValue + newPascalCaseKey: pascalCaseValue + new_snake_case_key: snake_case_value + third_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + 'New Key with Spaces': Updated Value with spaces + final_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + 'New Key with Spaces': Updated Value with spaces + NewCamelCaseKey: CamelCaseValue + newPascalCaseKey: pascalCaseValue + new_snake_case_key: snake_case_value + name_tags: + Name: "{{ vpc_name }}" + block: + + # ============================================================ + + - name: Get the current caller identity facts + aws_caller_info: + register: caller_facts + + - name: run the module without parameters + ec2_vpc_net: + ignore_errors: yes + register: result + + - name: assert failure + assert: + that: + - result is failed + #- result.msg.startswith("missing required arguments") + - result.msg.startswith("one of") + + # ============================================================ + + - name: Fetch existing VPC info + ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + - name: Check no-one is using the Prefix before we start + assert: + that: + - vpc_info.vpcs | length == 0 + + - name: test check mode creating a VPC + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + check_mode: true + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: check for a change + assert: + that: + - result is changed + - vpc_info.vpcs | length == 0 + + # ============================================================ + + - name: create a VPC + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + ipv6_cidr: True + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the VPC was created successfully + assert: + that: + - result is successful + - result is changed + - vpc_info.vpcs | length == 1 + + - name: assert the output + assert: + that: + - '"cidr_block" in result.vpc' + - result.vpc.cidr_block == vpc_cidr + - result.vpc.cidr_block_association_set | length == 1 + - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[0].cidr_block == vpc_cidr + - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - '"classic_link_enabled" in result.vpc' + - result.vpc.dhcp_options_id.startswith("dopt-") + - result.vpc.id.startswith("vpc-") + - '"instance_tenancy" in result.vpc' + - result.vpc.ipv6_cidr_block_association_set | length == 1 + - result.vpc.ipv6_cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | ansible.utils.ipv6 + - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"] + - '"is_default" in result.vpc' + - '"state" in result.vpc' + - result.vpc.tags.keys() | length == 1 + - result.vpc.tags.Name == vpc_name + + - name: set the first VPC's details as facts for comparison and cleanup + set_fact: + vpc_1_result: "{{ result }}" + vpc_1: "{{ result.vpc.id }}" + vpc_1_ipv6_cidr: "{{ result.vpc.ipv6_cidr_block_association_set.0.ipv6_cidr_block }}" + default_dhcp_options_id: "{{ result.vpc.dhcp_options_id }}" + + - name: create a VPC (retry) + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + ipv6_cidr: True + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert nothing changed + assert: + that: + - result is successful + - result is not changed + - vpc_info.vpcs | length == 1 + - '"cidr_block" in result.vpc' + - result.vpc.cidr_block == vpc_cidr + - result.vpc.cidr_block_association_set | length == 1 + - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[0].cidr_block == vpc_cidr + - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - '"classic_link_enabled" in result.vpc' + - result.vpc.dhcp_options_id.startswith("dopt-") + - result.vpc.id.startswith("vpc-") + - '"instance_tenancy" in result.vpc' + - result.vpc.ipv6_cidr_block_association_set | length == 1 + - result.vpc.ipv6_cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | ansible.utils.ipv6 + - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"] + - '"is_default" in result.vpc' + - '"state" in result.vpc' + - result.vpc.tags.keys() | length == 1 + - result.vpc.tags.Name == vpc_name + - result.vpc.id == vpc_1 + + - name: No-op VPC configuration, missing ipv6_cidr property + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + # Intentionaly commenting out 'ipv6_cidr' + # When the 'ipv6_cidr' property is missing, the VPC should retain its configuration. + # That should not cause the module to set default value 'false' and disassociate the IPv6 block. + #ipv6_cidr: True + register: result + - name: assert configuration did not change + assert: + that: + - result is successful + - result is not changed + + # ============================================================ + + - name: VPC info (no filters) + ec2_vpc_net_info: + register: vpc_info + retries: 3 + delay: 3 + until: '"InvalidVpcID.NotFound" not in ( vpc_info.msg | default("") )' + + - name: Test that our new VPC shows up in the results + assert: + that: + - vpc_1 in ( vpc_info.vpcs | map(attribute="vpc_id") | list ) + + - name: VPC info (Simple tag filter) + ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: Test vpc_info results + assert: + that: + - vpc_info.vpcs[0].cidr_block == vpc_cidr + - vpc_info.vpcs[0].cidr_block_association_set | length == 1 + - vpc_info.vpcs[0].cidr_block_association_set[0].association_id == result.vpc.cidr_block_association_set[0].association_id + - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block == result.vpc.cidr_block_association_set[0].cidr_block + - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - '"classic_link_dns_supported" in vpc_info.vpcs[0]' + - '"classic_link_enabled" in vpc_info.vpcs[0]' + - vpc_info.vpcs[0].dhcp_options_id == result.vpc.dhcp_options_id + - ( vpc_info.vpcs[0].enable_dns_hostnames | bool ) == True + - ( vpc_info.vpcs[0].enable_dns_support | bool ) == True + - vpc_info.vpcs[0].id == result.vpc.id + - '"instance_tenancy" in vpc_info.vpcs[0]' + - vpc_info.vpcs[0].ipv6_cidr_block_association_set | length == 1 + - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].association_id == result.vpc.ipv6_cidr_block_association_set[0].association_id + - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block == result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block + - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"] + - '"is_default" in vpc_info.vpcs[0]' + - vpc_info.vpcs[0].owner_id == caller_facts.account + - '"state" in vpc_info.vpcs[0]' + - vpc_info.vpcs[0].vpc_id == result.vpc.id + + # ============================================================ + + - name: Try to add IPv6 CIDR when one already exists + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + ipv6_cidr: True + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: Assert no changes made + assert: + that: + - result is not changed + - vpc_info.vpcs | length == 1 + + # ============================================================ + + - name: test check mode creating an identical VPC (multi_ok) + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + ipv6_cidr: True + multi_ok: yes + check_mode: true + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert a change would be made + assert: + that: + - result is changed + - name: assert a change was not actually made + assert: + that: + - vpc_info.vpcs | length == 1 + + # ============================================================ + + - name: create a VPC with a dedicated tenancy using the same CIDR and name + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + ipv6_cidr: True + tenancy: dedicated + multi_ok: yes + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert a new VPC was created + assert: + that: + - result is successful + - result is changed + - result.vpc.instance_tenancy == "dedicated" + - result.vpc.id != vpc_1 + - vpc_info.vpcs | length == 2 + + - name: set the second VPC's details as facts for comparison and cleanup + set_fact: + vpc_2_result: "{{ result }}" + vpc_2: "{{ result.vpc.id }}" + + # ============================================================ + + - name: VPC info (Simple VPC-ID filter) + ec2_vpc_net_info: + filters: + "vpc-id": "{{ vpc_2 }}" + register: vpc_info + + - name: Test vpc_info results + assert: + that: + - vpc_info.vpcs[0].cidr_block == vpc_cidr + - vpc_info.vpcs[0].cidr_block_association_set | length == 1 + - vpc_info.vpcs[0].cidr_block_association_set[0].association_id == result.vpc.cidr_block_association_set[0].association_id + - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block == result.vpc.cidr_block_association_set[0].cidr_block + - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - '"classic_link_dns_supported" in vpc_info.vpcs[0]' + - '"classic_link_enabled" in vpc_info.vpcs[0]' + - vpc_info.vpcs[0].dhcp_options_id == result.vpc.dhcp_options_id + - ( vpc_info.vpcs[0].enable_dns_hostnames | bool ) == True + - ( vpc_info.vpcs[0].enable_dns_support | bool ) == True + - vpc_info.vpcs[0].id == vpc_2 + - '"instance_tenancy" in vpc_info.vpcs[0]' + - vpc_info.vpcs[0].ipv6_cidr_block_association_set | length == 1 + - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].association_id == result.vpc.ipv6_cidr_block_association_set[0].association_id + - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block == result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block + - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"] + - '"is_default" in vpc_info.vpcs[0]' + - vpc_info.vpcs[0].owner_id == caller_facts.account + - '"state" in vpc_info.vpcs[0]' + - vpc_info.vpcs[0].vpc_id == vpc_2 + + # ============================================================ + + # This will only fail if there are already *2* vpcs otherwise ec2_vpc_net + # assumes you want to update your existing VPC... + - name: attempt to create another VPC with the same CIDR and name without multi_ok + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + ipv6_cidr: True + tenancy: dedicated + multi_ok: no + register: new_result + ignore_errors: yes + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert failure + assert: + that: + - new_result is failed + - '"If you would like to create the VPC anyway please pass True to the multi_ok param" in new_result.msg' + - vpc_info.vpcs | length == 2 + + # ============================================================ + + - name: Set new name for second VPC + ec2_vpc_net: + state: present + vpc_id: "{{ vpc_2 }}" + name: "{{ vpc_name_updated }}" + cidr_block: "{{ vpc_cidr }}" + register: result + + - name: assert name changed + assert: + that: + - '"cidr_block" in result.vpc' + - result.vpc.cidr_block == vpc_cidr + - result.vpc.cidr_block_association_set | length == 1 + - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[0].cidr_block == vpc_cidr + - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - '"classic_link_enabled" in result.vpc' + - result.vpc.dhcp_options_id.startswith("dopt-") + - '"instance_tenancy" in result.vpc' + - result.vpc.ipv6_cidr_block_association_set | length == 1 + - result.vpc.ipv6_cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | ansible.utils.ipv6 + - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"] + - '"is_default" in result.vpc' + - '"state" in result.vpc' + - result.vpc.tags.keys() | length == 1 + - result.vpc.tags.Name == vpc_name_updated + - result.vpc.id == vpc_2 + + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert success + assert: + that: + - result is changed + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].vpc_id == vpc_1 + + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name_updated }}" + register: vpc_info + + - name: assert success + assert: + that: + - result is changed + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].vpc_id == vpc_2 + + - name: delete second VPC (by id) + ec2_vpc_net: + vpc_id: "{{ vpc_2 }}" + state: absent + cidr_block: "{{ vpc_cidr }}" + register: result + + # ============================================================ + + - name: attempt to delete a VPC that doesn't exist + ec2_vpc_net: + state: absent + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}-does-not-exist" + register: result + + - name: assert no changes were made + assert: + that: + - result is not changed + - not result.vpc + + # ============================================================ + + - name: create a DHCP option set to use in next test + ec2_vpc_dhcp_option: + dns_servers: + - 4.4.4.4 + - 8.8.8.8 + tags: + Name: "{{ vpc_name }}" + register: new_dhcp + - name: assert the DHCP option set was successfully created + assert: + that: + - new_dhcp is changed + + - name: modify the DHCP options set for a VPC (check_mode) + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + dhcp_opts_id: "{{ new_dhcp.dhcp_options_id }}" + register: result + check_mode: True + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the DHCP option set changed but didn't update + assert: + that: + - result is changed + - result.vpc.id == vpc_1 + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].dhcp_options_id == default_dhcp_options_id + + - name: modify the DHCP options set for a VPC + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + dhcp_opts_id: "{{ new_dhcp.dhcp_options_id }}" + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the DHCP option set changed + assert: + that: + - result is changed + - result.vpc.id == vpc_1 + - default_dhcp_options_id != result.vpc.dhcp_options_id + - result.vpc.dhcp_options_id == new_dhcp.dhcp_options_id + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].dhcp_options_id == new_dhcp.dhcp_options_id + + - name: modify the DHCP options set for a VPC (retry) + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + dhcp_opts_id: "{{ new_dhcp.dhcp_options_id }}" + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the DHCP option set changed + assert: + that: + - result is not changed + - result.vpc.id == vpc_1 + - result.vpc.dhcp_options_id == new_dhcp.dhcp_options_id + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].dhcp_options_id == new_dhcp.dhcp_options_id + + # ============================================================ + + - name: disable dns_hostnames (check mode) + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + dns_hostnames: False + register: result + check_mode: True + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert changed was set but not made + assert: + that: + - result is successful + - result is changed + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].enable_dns_hostnames | bool == True + - vpc_info.vpcs[0].enable_dns_support | bool == True + + - name: disable dns_hostnames + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + dns_hostnames: False + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert a change was made + assert: + that: + - result is successful + - result is changed + - result.vpc.id == vpc_1 + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].enable_dns_hostnames | bool == False + - vpc_info.vpcs[0].enable_dns_support | bool == True + + - name: disable dns_hostnames (retry) + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + dns_hostnames: False + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert a change was made + assert: + that: + - result is successful + - result is not changed + - result.vpc.id == vpc_1 + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].enable_dns_hostnames | bool == False + - vpc_info.vpcs[0].enable_dns_support | bool == True + + - name: disable dns_support (check mode) + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + dns_hostnames: False + dns_support: False + check_mode: True + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert changed was set but not made + assert: + that: + - result is successful + - result is changed + - result.vpc.id == vpc_1 + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].enable_dns_hostnames | bool == False + - vpc_info.vpcs[0].enable_dns_support | bool == True + + - name: disable dns_support + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + dns_hostnames: False + dns_support: False + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert a change was made + assert: + that: + - result is successful + - result is changed + - result.vpc.id == vpc_1 + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].enable_dns_hostnames | bool == False + - vpc_info.vpcs[0].enable_dns_support | bool == False + + - name: disable dns_support (retry) + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + dns_hostnames: False + dns_support: False + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert a change was not made + assert: + that: + - result is successful + - result is not changed + - result.vpc.id == vpc_1 + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].enable_dns_hostnames | bool == False + - vpc_info.vpcs[0].enable_dns_support | bool == False + + - name: re-enable dns_support (check mode) + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + dns_hostnames: True + dns_support: True + register: result + check_mode: True + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert a change would be made but has not been + assert: + that: + - result is successful + - result is changed + - result.vpc.id == vpc_1 + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].enable_dns_hostnames | bool == False + - vpc_info.vpcs[0].enable_dns_support | bool == False + + - name: re-enable dns_support + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + dns_hostnames: True + dns_support: True + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert a change was made + assert: + that: + - result is successful + - result is changed + - result.vpc.id == vpc_1 + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].enable_dns_hostnames | bool == True + - vpc_info.vpcs[0].enable_dns_support | bool == True + + - name: re-enable dns_support (retry) + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + dns_hostnames: True + dns_support: True + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert a change was not made + assert: + that: + - result is successful + - result is not changed + - result.vpc.id == vpc_1 + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].enable_dns_hostnames | bool == True + - vpc_info.vpcs[0].enable_dns_support | bool == True + + # ============================================================ + + - name: add tags (check mode) + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + tags: "{{ first_tags }}" + check_mode: true + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the VPC has Name but not Ansible tag + assert: + that: + - result is successful + - result is changed + - result.vpc.id == vpc_1 + - result.vpc.tags | length == 1 + - result.vpc.tags.Name == vpc_name + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].tags == name_tags + + - name: add tags + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + tags: "{{ first_tags }}" + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the VPC has Name and Ansible tags + assert: + that: + - result is successful + - result is changed + - result.vpc.id == vpc_1 + - result.vpc.tags == (first_tags | combine(name_tags)) + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].tags == (first_tags | combine(name_tags)) + + - name: add tags (no change) + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + tags: "{{ first_tags }}" + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the VPC has Name and Ansible tags + assert: + that: + - result is successful + - result is not changed + - result.vpc.id == vpc_1 + - result.vpc.tags == (first_tags | combine(name_tags)) + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].tags == (first_tags | combine(name_tags)) + + # ============================================================ + + - name: modify tags with purge (check mode) + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + tags: "{{ second_tags }}" + purge_tags: true + check_mode: true + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the VPC has Name but not Ansible tag + assert: + that: + - result is successful + - result is changed + - result.vpc.id == vpc_1 + - result.vpc.tags == (first_tags | combine(name_tags)) + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].tags == (first_tags | combine(name_tags)) + + - name: modify tags with purge + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + tags: "{{ second_tags }}" + purge_tags: true + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the VPC has Name and Ansible tags + assert: + that: + - result is successful + - result is changed + - result.vpc.id == vpc_1 + - result.vpc.tags == (second_tags | combine(name_tags)) + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].tags == (second_tags | combine(name_tags)) + + - name: modify tags with purge (no change) + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + tags: "{{ second_tags }}" + purge_tags: true + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the VPC has Name and Ansible tags + assert: + that: + - result is successful + - result is not changed + - result.vpc.id == vpc_1 + - result.vpc.tags == (second_tags | combine(name_tags)) + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].tags == (second_tags | combine(name_tags)) + + # ============================================================ + + - name: modify tags without purge (check mode) + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + tags: "{{ third_tags }}" + purge_tags: false + check_mode: true + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the VPC has Name but not Ansible tag + assert: + that: + - result is successful + - result is changed + - result.vpc.id == vpc_1 + - result.vpc.tags == (second_tags | combine(name_tags)) + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].tags == (second_tags | combine(name_tags)) + + - name: modify tags with purge + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + tags: "{{ third_tags }}" + purge_tags: false + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the VPC has Name and Ansible tags + assert: + that: + - result is successful + - result is changed + - result.vpc.id == vpc_1 + - result.vpc.tags == (final_tags | combine(name_tags)) + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].tags == (final_tags | combine(name_tags)) + + - name: modify tags with purge (no change) + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + tags: "{{ third_tags }}" + purge_tags: false + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the VPC has Name and Ansible tags + assert: + that: + - result is successful + - result is not changed + - result.vpc.id == vpc_1 + - result.vpc.tags == (final_tags | combine(name_tags)) + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].tags == (final_tags | combine(name_tags)) + + # ============================================================ + + - name: modify CIDR (check mode) + ec2_vpc_net: + state: present + cidr_block: + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_a }}" + name: "{{ vpc_name }}" + check_mode: true + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: Check the CIDRs weren't changed + assert: + that: + - result is successful + - result is changed + - result.vpc.id == vpc_1 + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].cidr_block == vpc_cidr + - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a not in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b not in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_info.vpcs[0].cidr_block_association_set | length == 1 + - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a not in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b not in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + + - name: modify CIDR + ec2_vpc_net: + state: present + cidr_block: + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_a }}" + name: "{{ vpc_name }}" + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the CIDRs changed + assert: + that: + - result is successful + - result is changed + - result.vpc.id == vpc_1 + - vpc_info.vpcs | length == 1 + - result.vpc.cidr_block == vpc_cidr + - vpc_info.vpcs[0].cidr_block == vpc_cidr + - result.vpc.cidr_block_association_set | length == 2 + - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b not in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_info.vpcs[0].cidr_block_association_set | length == 2 + - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b not in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + + - name: modify CIDR (no change) + ec2_vpc_net: + state: present + cidr_block: + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_a }}" + name: "{{ vpc_name }}" + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the CIDRs didn't change + assert: + that: + - result is successful + - result is not changed + - result.vpc.id == vpc_1 + - vpc_info.vpcs | length == 1 + - result.vpc.cidr_block == vpc_cidr + - vpc_info.vpcs[0].cidr_block == vpc_cidr + - result.vpc.cidr_block_association_set | length == 2 + - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b not in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_info.vpcs[0].cidr_block_association_set | length == 2 + - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b not in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + + - name: modify CIDR - no purge (check mode) + ec2_vpc_net: + state: present + cidr_block: + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_b }}" + name: "{{ vpc_name }}" + check_mode: true + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: Check the CIDRs weren't changed + assert: + that: + - result is successful + - result is changed + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].cidr_block == vpc_cidr + - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b not in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_info.vpcs[0].cidr_block_association_set | length == 2 + - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b not in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + + - name: modify CIDR - no purge + ec2_vpc_net: + state: present + cidr_block: + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_b }}" + name: "{{ vpc_name }}" + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the CIDRs changed + assert: + that: + - result is successful + - result is changed + - result.vpc.id == vpc_1 + - vpc_info.vpcs | length == 1 + - result.vpc.cidr_block == vpc_cidr + - vpc_info.vpcs[0].cidr_block == vpc_cidr + - result.vpc.cidr_block_association_set | length == 3 + - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_info.vpcs[0].cidr_block_association_set | length == 3 + - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + + - name: modify CIDR - no purge (no change) + ec2_vpc_net: + state: present + cidr_block: + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_b }}" + name: "{{ vpc_name }}" + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the CIDRs didn't change + assert: + that: + - result is successful + - result is not changed + - vpc_info.vpcs | length == 1 + - result.vpc.cidr_block == vpc_cidr + - vpc_info.vpcs[0].cidr_block == vpc_cidr + - result.vpc.cidr_block_association_set | length == 3 + - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_info.vpcs[0].cidr_block_association_set | length == 3 + - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + + - name: modify CIDR - no purge (no change - list all - check mode) + ec2_vpc_net: + state: present + cidr_block: + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_a }}" + - "{{ vpc_cidr_b }}" + name: "{{ vpc_name }}" + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the CIDRs didn't change + assert: + that: + - result is successful + - result is not changed + - vpc_info.vpcs | length == 1 + - result.vpc.cidr_block == vpc_cidr + - vpc_info.vpcs[0].cidr_block == vpc_cidr + - result.vpc.cidr_block_association_set | length == 3 + - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_info.vpcs[0].cidr_block_association_set | length == 3 + - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + + - name: modify CIDR - no purge (no change - list all) + ec2_vpc_net: + state: present + cidr_block: + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_a }}" + - "{{ vpc_cidr_b }}" + name: "{{ vpc_name }}" + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the CIDRs didn't change + assert: + that: + - result is successful + - result is not changed + - vpc_info.vpcs | length == 1 + - result.vpc.cidr_block == vpc_cidr + - vpc_info.vpcs[0].cidr_block == vpc_cidr + - result.vpc.cidr_block_association_set | length == 3 + - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_info.vpcs[0].cidr_block_association_set | length == 3 + - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + + - name: modify CIDR - no purge (no change - different order - check mode) + ec2_vpc_net: + state: present + cidr_block: + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_b }}" + - "{{ vpc_cidr_a }}" + name: "{{ vpc_name }}" + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the CIDRs didn't change + assert: + that: + - result is successful + - result is not changed + - vpc_info.vpcs | length == 1 + - result.vpc.cidr_block == vpc_cidr + - vpc_info.vpcs[0].cidr_block == vpc_cidr + - result.vpc.cidr_block_association_set | length == 3 + - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_info.vpcs[0].cidr_block_association_set | length == 3 + - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + + - name: modify CIDR - no purge (no change - different order) + ec2_vpc_net: + state: present + cidr_block: + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_b }}" + - "{{ vpc_cidr_a }}" + name: "{{ vpc_name }}" + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the CIDRs didn't change + assert: + that: + - result is successful + - result is not changed + - vpc_info.vpcs | length == 1 + - result.vpc.cidr_block == vpc_cidr + - vpc_info.vpcs[0].cidr_block == vpc_cidr + - result.vpc.cidr_block_association_set | length == 3 + - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_info.vpcs[0].cidr_block_association_set | length == 3 + - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + + - name: modify CIDR - purge (check mode) + ec2_vpc_net: + state: present + cidr_block: + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_b }}" + name: "{{ vpc_name }}" + purge_cidrs: yes + check_mode: true + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: Check the CIDRs weren't changed + assert: + that: + - result is successful + - result is changed + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].cidr_block == vpc_cidr + - vpc_info.vpcs[0].cidr_block_association_set | length == 3 + - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-") + - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"] + - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"] + - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) + + - name: modify CIDR - purge + ec2_vpc_net: + state: present + cidr_block: + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_b }}" + name: "{{ vpc_name }}" + purge_cidrs: yes + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the CIDRs changed + assert: + that: + - result is successful + - result is changed + - result.vpc.id == vpc_1 + - vpc_info.vpcs | length == 1 + - result.vpc.cidr_block == vpc_cidr + - vpc_info.vpcs[0].cidr_block == vpc_cidr + - result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list | length == 2 + - vpc_cidr in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) + - vpc_cidr_a not in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block')) + - vpc_cidr_b in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block')) + - vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list | length == 2 + - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) + - vpc_cidr_a not in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) + - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) + + - name: modify CIDR - purge (no change) + ec2_vpc_net: + state: present + cidr_block: + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_b }}" + name: "{{ vpc_name }}" + purge_cidrs: yes + register: result + - ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_info + + - name: assert the CIDRs didn't change + assert: + that: + - result is successful + - result is not changed + - result.vpc.id == vpc_1 + - vpc_info.vpcs | length == 1 + - result.vpc.cidr_block == vpc_cidr + - vpc_info.vpcs[0].cidr_block == vpc_cidr + - result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list | length == 2 + - vpc_cidr in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) + - vpc_cidr_a not in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) + - vpc_cidr_b in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) + - vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list | length == 2 + - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) + - vpc_cidr_a not in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) + - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) + + # ============================================================ + + - name: Remove IPv6 CIDR association from VPC in check mode + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + ipv6_cidr: False + check_mode: true + register: result + - name: assert configuration would change + assert: + that: + - result is successful + - result is changed + + - name: Set IPv6 CIDR association to VPC, no change expected + # I.e. assert the previous ec2_vpc_net task in check_mode did not + # mistakenly modify the VPC configuration. + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + ipv6_cidr: True + register: result + - name: assert configuration did not change + assert: + that: + - result is successful + - result is not changed + + - name: Remove IPv6 CIDR association from VPC + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + ipv6_cidr: False + register: result + - name: assert IPv6 CIDR association removed from VPC + assert: + that: + - result is successful + - result is changed + - result.vpc.ipv6_cidr_block_association_set | length == 1 + - result.vpc.ipv6_cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | ansible.netcommon.ipv6 + - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["disassociated"] + + - name: Add IPv6 CIDR association to VPC again + ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + ipv6_cidr: True + register: result + - name: assert configuration change + assert: + that: + - result is successful + - result is changed + # Because the IPv6 CIDR was associated, then disassociated, then reassociated, + # now there should be one disassociated block and one associated block. + - result.vpc.ipv6_cidr_block_association_set | length == 2 + - result.vpc.ipv6_cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | ansible.netcommon.ipv6 + - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["disassociated", "disassociating"] + - result.vpc.ipv6_cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-") + - result.vpc.ipv6_cidr_block_association_set[1].ipv6_cidr_block | ansible.netcommon.ipv6 + - result.vpc.ipv6_cidr_block_association_set[1].ipv6_cidr_block_state.state in ["associated", "associating"] + + + # ============================================================ + + - name: test check mode to delete a VPC + ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + state: absent + check_mode: true + register: result + + - name: assert that a change would have been made + assert: + that: + - result is changed + + # ============================================================ + + always: + + - name: Describe VPCs before deleting them (for debugging) + ec2_vpc_net_info: + ignore_errors: true + + - name: replace the DHCP options set so the new one can be deleted + ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + state: present + multi_ok: no + dhcp_opts_id: "{{ default_dhcp_options_id }}" + ignore_errors: true + + - name: remove the DHCP option set + ec2_vpc_dhcp_option: + dhcp_options_id: "{{ new_dhcp.dhcp_options_id }}" + state: absent + ignore_errors: true + + - name: remove the VPC + ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + name: "{{ vpc_name }}" + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/aliases new file mode 100644 index 000000000..4b396a8bb --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/aliases @@ -0,0 +1,5 @@ +time=15m + +cloud/aws + +ec2_vpc_route_table_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/defaults/main.yml new file mode 100644 index 000000000..111510850 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/defaults/main.yml @@ -0,0 +1,4 @@ +--- +availability_zone_a: '{{ ec2_availability_zone_names[0] }}' +availability_zone_b: '{{ ec2_availability_zone_names[1] }}' +vpc_cidr: 10.228.224.0/21 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/meta/main.yml new file mode 100644 index 000000000..1d40168d0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: +- setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/tasks/main.yml new file mode 100644 index 000000000..f5fa7c740 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/tasks/main.yml @@ -0,0 +1,1499 @@ +- name: ec2_vpc_route_table integration tests + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + - name: create VPC + ec2_vpc_net: + cidr_block: '{{ vpc_cidr }}' + name: '{{ resource_prefix }}_vpc' + state: present + register: vpc + - name: assert that VPC has an id + assert: + that: + - vpc.vpc.id is defined + - vpc.changed + - name: Assign IPv6 CIDR block to existing VPC, check mode + ec2_vpc_net: + cidr_block: '{{ vpc_cidr }}' + name: '{{ resource_prefix }}_vpc' + ipv6_cidr: true + check_mode: true + register: vpc_update + - name: assert that VPC would changed + assert: + that: + - vpc_update.changed + - name: Assign Amazon-provided IPv6 CIDR block to existing VPC + ec2_vpc_net: + cidr_block: '{{ vpc_cidr }}' + name: '{{ resource_prefix }}_vpc' + ipv6_cidr: true + register: vpc_update + - name: assert that VPC was changed, IPv6 CIDR is configured + assert: + that: + - vpc_update.vpc.id == vpc.vpc.id + - vpc_update.changed + - vpc_update.vpc.ipv6_cidr_block_association_set | length == 1 + - name: Fetch existing VPC info + ec2_vpc_net_info: + filters: + "tag:Name": "{{ resource_prefix }}_vpc" + register: vpc_info + - name: assert vpc net info after configuring IPv6 CIDR + assert: + that: + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].id == vpc.vpc.id + - vpc_info.vpcs[0].ipv6_cidr_block_association_set | length == 1 + - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state == "associated" + - name: get Amazon-provided IPv6 CIDR associated with the VPC + set_fact: + # Example value: 2600:1f1c:1b3:8f00::/56 + vpc_ipv6_cidr_block: '{{ vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block }}' + - name: create subnets + ec2_vpc_subnet: + cidr: '{{ item.cidr }}' + az: '{{ item.zone }}' + assign_instances_ipv6: '{{ item.assign_instances_ipv6 }}' + ipv6_cidr: '{{ item.ipv6_cidr }}' + vpc_id: '{{ vpc.vpc.id }}' + state: present + tags: + Public: '{{ item.public | string }}' + Name: "{{ (item.public | bool) | ternary('public', 'private') }}-{{ item.zone }}" + with_items: + - cidr: 10.228.224.0/24 + zone: '{{ availability_zone_a }}' + public: 'True' + assign_instances_ipv6: false + ipv6_cidr: null + - cidr: 10.228.225.0/24 + zone: '{{ availability_zone_b }}' + public: 'True' + assign_instances_ipv6: false + ipv6_cidr: null + - cidr: 10.228.226.0/24 + zone: '{{ availability_zone_a }}' + public: 'False' + assign_instances_ipv6: false + ipv6_cidr: null + - cidr: 10.228.227.0/24 + zone: '{{ availability_zone_b }}' + public: 'False' + assign_instances_ipv6: false + ipv6_cidr: null + - cidr: 10.228.228.0/24 + zone: '{{ availability_zone_a }}' + public: 'False' + assign_instances_ipv6: true + # Carve first /64 subnet of the Amazon-provided CIDR for the VPC. + ipv6_cidr: "{{ vpc_ipv6_cidr_block | ansible.netcommon.ipsubnet(64, 1) }}" + - cidr: 10.228.229.0/24 + zone: '{{ availability_zone_a }}' + public: 'True' + assign_instances_ipv6: true + ipv6_cidr: "{{ vpc_ipv6_cidr_block | ansible.netcommon.ipsubnet(64, 2) }}" + - cidr: 10.228.230.0/24 + zone: '{{ availability_zone_b }}' + public: 'False' + assign_instances_ipv6: true + ipv6_cidr: "{{ vpc_ipv6_cidr_block | ansible.netcommon.ipsubnet(64, 3) }}" + register: subnets + - ec2_vpc_subnet_info: + filters: + vpc-id: '{{ vpc.vpc.id }}' + register: vpc_subnets + - set_fact: + public_subnets: "{{ (vpc_subnets.subnets | selectattr('tags.Public', 'equalto',\ + \ 'True') | map(attribute='id') | list) }}" + public_cidrs: "{{ (vpc_subnets.subnets | selectattr('tags.Public', 'equalto',\ + \ 'True') | map(attribute='cidr_block') | list) }}" + private_subnets: "{{ (vpc_subnets.subnets | selectattr('tags.Public', 'equalto',\ + \ 'False') | map(attribute='id') | list) }}" + - name: create IGW + ec2_vpc_igw: + vpc_id: '{{ vpc.vpc.id }}' + register: vpc_igw + - name: create NAT GW + ec2_vpc_nat_gateway: + if_exist_do_not_create: yes + wait: yes + subnet_id: '{{ subnets.results[0].subnet.id }}' + register: nat_gateway + - name: CHECK MODE - route table should be created + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'true' + Name: Public route table + check_mode: true + register: check_mode_results + - name: assert that the public route table would be created + assert: + that: + - check_mode_results.changed + + - name: create public route table + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'true' + Name: Public route table + register: create_public_table + - name: assert that public route table has an id + assert: + that: + - create_public_table.changed + - create_public_table.route_table.id.startswith('rtb-') + - "'Public' in create_public_table.route_table.tags" + - create_public_table.route_table.tags['Public'] == 'true' + - create_public_table.route_table.associations | length == 0 + - create_public_table.route_table.vpc_id == "{{ vpc.vpc.id }}" + - create_public_table.route_table.propagating_vgws | length == 0 + # One route for IPv4, one route for IPv6 + - create_public_table.route_table.routes | length == 2 + + - name: CHECK MODE - route table should already exist + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'true' + Name: Public route table + check_mode: true + register: check_mode_results + - name: assert the table already exists + assert: + that: + - not check_mode_results.changed + + - name: recreate public route table + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'true' + Name: Public route table + register: recreate_public_route_table + - name: assert that public route table did not change + assert: + that: + - not recreate_public_route_table.changed + - create_public_table.route_table.id.startswith('rtb-') + - "'Public' in create_public_table.route_table.tags" + - create_public_table.route_table.tags['Public'] == 'true' + - create_public_table.route_table.associations | length == 0 + - create_public_table.route_table.vpc_id == "{{ vpc.vpc.id }}" + - create_public_table.route_table.propagating_vgws | length == 0 + - create_public_table.route_table.routes | length == 2 + + - name: CHECK MODE - add route to public route table + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'true' + Name: Public route table + routes: + - dest: 0.0.0.0/0 + gateway_id: igw + - dest: ::/0 + gateway_id: igw + check_mode: true + register: check_mode_results + - name: assert a route would be added + assert: + that: + - check_mode_results.changed + + - name: add a route to public route table + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'true' + Name: Public route table + routes: + - dest: 0.0.0.0/0 + gateway_id: igw + - dest: ::/0 + gateway_id: igw + register: add_routes + - name: assert route table contains new route + assert: + that: + - add_routes.changed + - add_routes.route_table.id.startswith('rtb-') + - "'Public' in add_routes.route_table.tags" + - add_routes.route_table.tags['Public'] == 'true' + # 10.228.224.0/21 + # 0.0.0.0/0 + # ::/0 + # Amazon-provide IPv6 block + - add_routes.route_table.routes | length == 4 + - add_routes.route_table.associations | length == 0 + - add_routes.route_table.vpc_id == "{{ vpc.vpc.id }}" + - add_routes.route_table.propagating_vgws | length == 0 + + - name: CHECK MODE - re-add route to public route table + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'true' + Name: Public route table + routes: + - dest: 0.0.0.0/0 + gateway_id: igw + check_mode: true + register: check_mode_results + - name: assert a route would not be added + assert: + that: + - check_mode_results is not changed + + - name: re-add a route to public route table + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'true' + Name: Public route table + routes: + - dest: 0.0.0.0/0 + gateway_id: igw + register: add_routes + - name: assert route table contains route + assert: + that: + - add_routes is not changed + - add_routes.route_table.routes | length == 4 + + - name: CHECK MODE - add subnets to public route table + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'true' + Name: Public route table + routes: + - dest: 0.0.0.0/0 + gateway_id: igw + subnets: '{{ public_subnets }}' + check_mode: true + register: check_mode_results + - name: assert the subnets would be added to the route table + assert: + that: + - check_mode_results.changed + + - name: add subnets to public route table + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'true' + Name: Public route table + routes: + - dest: 0.0.0.0/0 + gateway_id: igw + subnets: '{{ public_subnets }}' + register: add_subnets + - name: assert route table contains subnets + assert: + that: + - add_subnets.changed + - add_subnets.route_table.associations | length == 3 + + - name: CHECK MODE - no routes but purge_routes set to false + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'true' + Name: Public route table + purge_routes: no + subnets: '{{ public_subnets }}' + check_mode: true + register: check_mode_results + - name: assert no routes would be removed + assert: + that: + - not check_mode_results.changed + + - name: rerun with purge_routes set to false + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'true' + Name: Public route table + purge_routes: no + subnets: '{{ public_subnets }}' + register: no_purge_routes + - name: assert route table still has routes + assert: + that: + - not no_purge_routes.changed + - no_purge_routes.route_table.routes | length == 4 + - no_purge_routes.route_table.associations | length == 3 + + - name: rerun with purge_subnets set to false + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'true' + Name: Public route table + purge_subnets: no + routes: + - dest: 0.0.0.0/0 + gateway_id: igw + register: no_purge_subnets + - name: assert route table still has subnets + assert: + that: + - not no_purge_subnets.changed + - no_purge_subnets.route_table.routes | length == 4 + - no_purge_subnets.route_table.associations | length == 3 + + - name: rerun with purge_tags not set (implicitly false) + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + routes: + - dest: 0.0.0.0/0 + gateway_id: igw + lookup: id + route_table_id: '{{ create_public_table.route_table.id }}' + subnets: '{{ public_subnets }}' + register: no_purge_tags + - name: assert route table still has tags + assert: + that: + - not no_purge_tags.changed + - "'Public' in no_purge_tags.route_table.tags" + - no_purge_tags.route_table.tags['Public'] == 'true' + + - name: CHECK MODE - purge subnets + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + routes: + - dest: 0.0.0.0/0 + gateway_id: igw + subnets: [] + tags: + Public: 'true' + Name: Public route table + check_mode: true + register: check_mode_results + - name: assert subnets would be removed + assert: + that: + - check_mode_results.changed + + - name: purge subnets + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + routes: + - dest: 0.0.0.0/0 + gateway_id: igw + subnets: [] + tags: + Public: 'true' + Name: Public route table + register: purge_subnets + - name: assert purge subnets worked + assert: + that: + - purge_subnets.changed + - purge_subnets.route_table.associations | length == 0 + - purge_subnets.route_table.id == create_public_table.route_table.id + + - name: CHECK MODE - purge routes + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'true' + Name: Public route table + routes: [] + check_mode: true + register: check_mode_results + - name: assert routes would be removed + assert: + that: + - check_mode_results.changed + + - name: add subnets by cidr to public route table + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + routes: + - dest: 0.0.0.0/0 + gateway_id: igw + subnets: '{{ public_cidrs }}' + lookup: id + route_table_id: '{{ create_public_table.route_table.id }}' + register: add_subnets_cidr + - name: assert route table contains subnets added by cidr + assert: + that: + - add_subnets_cidr.changed + - add_subnets_cidr.route_table.associations | length == 3 + + - name: purge subnets added by cidr + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + routes: + - dest: 0.0.0.0/0 + gateway_id: igw + subnets: [] + lookup: id + route_table_id: '{{ create_public_table.route_table.id }}' + register: purge_subnets_cidr + - name: assert purge subnets added by cidr worked + assert: + that: + - purge_subnets_cidr.changed + - purge_subnets_cidr.route_table.associations | length == 0 + + - name: add subnets by name to public route table + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + routes: + - dest: 0.0.0.0/0 + gateway_id: igw + subnets: '{{ public_subnets }}' + lookup: id + route_table_id: '{{ create_public_table.route_table.id }}' + register: add_subnets_name + - name: assert route table contains subnets added by name + assert: + that: + - add_subnets_name.changed + - add_subnets_name.route_table.associations | length == 3 + + - name: purge subnets added by name + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + routes: + - dest: 0.0.0.0/0 + gateway_id: igw + subnets: [] + lookup: id + route_table_id: '{{ create_public_table.route_table.id }}' + register: purge_subnets_name + - name: assert purge subnets added by name worked + assert: + that: + - purge_subnets_name.changed + - purge_subnets_name.route_table.associations | length == 0 + + - name: purge routes + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'true' + Name: Public route table + routes: [] + register: purge_routes + - name: assert purge routes worked + assert: + that: + - purge_routes.changed + - purge_routes.route_table.routes | length == 3 + - purge_routes.route_table.id == create_public_table.route_table.id + + - name: CHECK MODE - update tags + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + route_table_id: '{{ create_public_table.route_table.id }}' + lookup: id + purge_tags: yes + tags: + Name: Public route table + Updated: new_tag + check_mode: true + register: check_mode_results + - name: assert tags would be changed + assert: + that: + - check_mode_results.changed + + - name: update tags + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + route_table_id: '{{ create_public_table.route_table.id }}' + lookup: id + purge_tags: yes + tags: + Name: Public route table + Updated: new_tag + register: update_tags + - name: assert update tags worked + assert: + that: + - update_tags.changed + - "'Updated' in update_tags.route_table.tags" + - update_tags.route_table.tags['Updated'] == 'new_tag' + - "'Public' not in update_tags.route_table.tags" + + - name: create NAT GW + ec2_vpc_nat_gateway: + if_exist_do_not_create: yes + wait: yes + subnet_id: '{{ subnets.results[0].subnet.id }}' + register: nat_gateway + - name: CHECK MODE - create private route table + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'false' + Name: Private route table + routes: + - gateway_id: '{{ nat_gateway.nat_gateway_id }}' + dest: 0.0.0.0/0 + subnets: '{{ private_subnets }}' + check_mode: true + register: check_mode_results + - name: assert the route table would be created + assert: + that: + - check_mode_results.changed + + - name: create private route table + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'false' + Name: Private route table + routes: + - gateway_id: '{{ nat_gateway.nat_gateway_id }}' + dest: 0.0.0.0/0 + subnets: '{{ private_subnets }}' + register: create_private_table + - name: assert creating private route table worked + assert: + that: + - create_private_table.changed + - create_private_table.route_table.id != create_public_table.route_table.id + - "'Public' in create_private_table.route_table.tags" + + - name: CHECK MODE - destroy public route table by tags + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + state: absent + tags: + Updated: new_tag + Name: Public route table + check_mode: true + register: check_mode_results + - name: assert the route table would be deleted + assert: + that: check_mode_results.changed + - name: destroy public route table by tags + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + state: absent + tags: + Updated: new_tag + Name: Public route table + register: destroy_table + - name: assert destroy table worked + assert: + that: + - destroy_table.changed + + - name: CHECK MODE - redestroy public route table + ec2_vpc_route_table: + route_table_id: '{{ create_public_table.route_table.id }}' + lookup: id + state: absent + check_mode: true + register: check_mode_results + - name: assert the public route table does not exist + assert: + that: + - not check_mode_results.changed + + - name: redestroy public route table + ec2_vpc_route_table: + route_table_id: '{{ create_public_table.route_table.id }}' + lookup: id + state: absent + register: redestroy_table + - name: assert redestroy table worked + assert: + that: + - not redestroy_table.changed + + - name: destroy NAT GW + ec2_vpc_nat_gateway: + state: absent + wait: yes + release_eip: yes + subnet_id: '{{ subnets.results[0].subnet.id }}' + nat_gateway_id: '{{ nat_gateway.nat_gateway_id }}' + register: nat_gateway + - name: show route table info, get table using route-table-id + ec2_vpc_route_table_info: + filters: + route-table-id: '{{ create_private_table.route_table.id }}' + register: route_table_info + - name: assert route_table_info has correct attributes + assert: + that: + - '"route_tables" in route_table_info' + - route_table_info.route_tables | length == 1 + - '"id" in route_table_info.route_tables[0]' + - '"routes" in route_table_info.route_tables[0]' + - '"associations" in route_table_info.route_tables[0]' + - '"tags" in route_table_info.route_tables[0]' + - '"vpc_id" in route_table_info.route_tables[0]' + - route_table_info.route_tables[0].id == create_private_table.route_table.id + - '"propagating_vgws" in route_table_info.route_tables[0]' + + - name: show route table info, get table using tags + ec2_vpc_route_table_info: + filters: + tag:Public: 'false' + tag:Name: Private route table + vpc-id: '{{ vpc.vpc.id }}' + register: route_table_info + - name: assert route_table_info has correct tags + assert: + that: + - route_table_info.route_tables | length == 1 + - '"tags" in route_table_info.route_tables[0]' + - '"Public" in route_table_info.route_tables[0].tags' + - route_table_info.route_tables[0].tags["Public"] == "false" + - '"Name" in route_table_info.route_tables[0].tags' + - route_table_info.route_tables[0].tags["Name"] == "Private route table" + + - name: create NAT GW + ec2_vpc_nat_gateway: + if_exist_do_not_create: yes + wait: yes + subnet_id: '{{ subnets.results[0].subnet.id }}' + register: nat_gateway + - name: show route table info + ec2_vpc_route_table_info: + filters: + route-table-id: '{{ create_private_table.route_table.id }}' + - name: recreate private route table with new NAT GW + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'false' + Name: Private route table + routes: + - nat_gateway_id: '{{ nat_gateway.nat_gateway_id }}' + dest: 0.0.0.0/0 + subnets: '{{ private_subnets }}' + register: recreate_private_table + - name: assert creating private route table worked + assert: + that: + - recreate_private_table.changed + - recreate_private_table.route_table.id != create_public_table.route_table.id + + - name: create a VPC endpoint to test ec2_vpc_route_table ignores it + ec2_vpc_endpoint: + state: present + vpc_id: '{{ vpc.vpc.id }}' + service: com.amazonaws.{{ aws_region }}.s3 + route_table_ids: + - '{{ recreate_private_table.route_table.route_table_id }}' + wait: True + register: vpc_endpoint + - name: purge routes + ec2_vpc_route_table: + vpc_id: '{{ vpc.vpc.id }}' + tags: + Public: 'false' + Name: Private route table + routes: + - nat_gateway_id: '{{ nat_gateway.nat_gateway_id }}' + dest: 0.0.0.0/0 + subnets: '{{ private_subnets }}' + purge_routes: true + register: result + - name: Get endpoint infos to verify that it wasn't purged from the route table + ec2_vpc_endpoint_info: + query: endpoints + vpc_endpoint_ids: + - '{{ vpc_endpoint.result.vpc_endpoint_id }}' + register: endpoint_details + - name: assert the route table is associated with the VPC endpoint + assert: + that: + - endpoint_details.vpc_endpoints[0].route_table_ids[0] == recreate_private_table.route_table.route_table_id + + # ------------------------------------------------------------------------------------------ + + - name: Create gateway route table - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + register: create_gateway_table + check_mode: yes + + - assert: + that: + - create_gateway_table is changed + + - name: Create gateway route table + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + register: create_gateway_table + + - assert: + that: + - create_gateway_table is changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 2 + - create_gateway_table.route_table.associations | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + - name: Create gateway route table (idempotence) - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + register: create_gateway_table + check_mode: yes + + - assert: + that: + - create_gateway_table is not changed + + - name: Create gateway route table (idempotence) + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + register: create_gateway_table + + - assert: + that: + - create_gateway_table is not changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 2 + - create_gateway_table.route_table.associations | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Create ENI for gateway route table + ec2_eni: + subnet_id: '{{ public_subnets[0] }}' + register: eni + + - name: Replace route to gateway route table - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + routes: + - dest: "{{ vpc_cidr }}" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + check_mode: yes + + - assert: + that: + - create_gateway_table is changed + + - name: Replace route to gateway route table + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + routes: + - dest: "{{ vpc_cidr }}" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + + - assert: + that: + - create_gateway_table is changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 2 + - create_gateway_table.route_table.associations | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + - create_gateway_table.route_table.routes[0].destination_cidr_block == vpc_cidr + - create_gateway_table.route_table.routes[0].network_interface_id == eni.interface.id + + - name: Replace route to gateway route table (idempotence) - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + routes: + - dest: "{{ vpc_cidr }}" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + check_mode: yes + + - assert: + that: + - create_gateway_table is not changed + + - name: Replace route to gateway route table (idempotence) + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + routes: + - dest: "{{ vpc_cidr }}" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + + - assert: + that: + - create_gateway_table is not changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 2 + - create_gateway_table.route_table.associations | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + - create_gateway_table.route_table.routes[0].destination_cidr_block == vpc_cidr + - create_gateway_table.route_table.routes[0].network_interface_id == eni.interface.id + + # ------------------------------------------------------------------------------------------ + + - name: Add route to gateway route table - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + routes: + - dest: "10.228.228.0/24" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + check_mode: yes + + - assert: + that: + - create_gateway_table is changed + + - name: Add route to gateway route table + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + routes: + - dest: "10.228.228.0/24" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + + - assert: + that: + - create_gateway_table is changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + - name: Add route to gateway route table (idempotence) - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + routes: + - dest: "10.228.228.0/24" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + check_mode: yes + + - assert: + that: + - create_gateway_table is not changed + + - name: Add route to gateway route table (idempotence) + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + routes: + - dest: "10.228.228.0/24" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + + - assert: + that: + - create_gateway_table is not changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Ensure gateway doesn't disassociate when not passed in - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + routes: + - dest: "10.228.228.0/24" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + check_mode: yes + + - assert: + that: + - create_gateway_table is not changed + + - name: Ensure gateway doesn't disassociate when not passed in + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + routes: + - dest: "10.228.228.0/24" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + + - assert: + that: + - create_gateway_table is not changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 }}" + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Disassociate gateway when gateway_id is 'None' - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: None + routes: + - dest: "10.228.228.0/24" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + check_mode: yes + + - assert: + that: + - create_gateway_table is changed + + - name: Disassociate gateway when gateway_id is 'None' + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: None + routes: + - dest: "10.228.228.0/24" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + + - assert: + that: + - create_gateway_table is changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 0 }}" + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + - name: Disassociate gateway when gateway_id is 'None' (idempotence) - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: None + routes: + - dest: "10.228.228.0/24" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + check_mode: yes + + - assert: + that: + - create_gateway_table is not changed + + - name: Disassociate gateway when gateway_id is 'None' (idempotence) + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: None + routes: + - dest: "10.228.228.0/24" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + + - assert: + that: + - create_gateway_table is not changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 0 }}" + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Associate gateway with route table - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + purge_routes: no + register: create_gateway_table + check_mode: yes + + - assert: + that: + - create_gateway_table is changed + + - name: Associate gateway with route table + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + purge_routes: no + register: create_gateway_table + + - assert: + that: + - create_gateway_table is changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | length == 1 + - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 }}" + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + - name: Associate gateway with route table (idempotence) - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + purge_routes: no + register: create_gateway_table + check_mode: yes + + - assert: + that: + - create_gateway_table is not changed + + - name: Associate gateway with route table (idempotence) + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + purge_routes: no + register: create_gateway_table + + - assert: + that: + - create_gateway_table is not changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | length == 1 + - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 }}" + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Disassociate gateway when gateway_id is '' - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: '' + routes: + - dest: "10.228.228.0/24" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + check_mode: yes + + - assert: + that: + - create_gateway_table is changed + + - name: Disassociate gateway when gateway_id is '' + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: '' + routes: + - dest: "10.228.228.0/24" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + + - assert: + that: + - create_gateway_table is changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 0 }}" + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + - name: Disassociate gateway when gateway_id is '' (idempotence) - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: '' + routes: + - dest: "10.228.228.0/24" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + check_mode: yes + + - assert: + that: + - create_gateway_table is not changed + + - name: Disassociate gateway when gateway_id is '' (idempotence) + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: '' + routes: + - dest: "10.228.228.0/24" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: no + register: create_gateway_table + + - assert: + that: + - create_gateway_table is not changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 0 }}" + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Create vgw for gateway route table + ec2_vpc_vgw: + state: present + vpc_id: "{{ vpc.vpc.id }}" + type: ipsec.1 + name: '{{ resource_prefix }}_vpc' + register: vgw + + - name: Associate vgw with route table - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vgw.vgw.id }}" + purge_routes: no + register: create_gateway_table + check_mode: yes + + - assert: + that: + - create_gateway_table is changed + + - name: Associate vgw with route table + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vgw.vgw.id }}" + purge_routes: no + register: create_gateway_table + + - assert: + that: + - create_gateway_table is changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | length == 2 + - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 }}" + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + - name: Associate vgw with route table (idempotence) - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vgw.vgw.id }}" + purge_routes: no + register: create_gateway_table + check_mode: yes + + - assert: + that: + - create_gateway_table is not changed + + - name: Associate vgw with route table (idempotence) + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + gateway_id: "{{ vgw.vgw.id }}" + purge_routes: no + register: create_gateway_table + + - assert: + that: + - create_gateway_table is not changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | length == 2 + - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 }}" + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Get route table info + ec2_vpc_route_table_info: + filters: + route-table-id: "{{ create_gateway_table.route_table.id }}" + register: rt_info + + - name: Assert route table exists prior to deletion + assert: + that: + - rt_info.route_tables | length == 1 + + - name: Delete gateway route table - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + state: absent + register: delete_gateway_table + check_mode: yes + + - assert: + that: + - delete_gateway_table is changed + + - name: Delete gateway route table + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + state: absent + register: delete_gateway_table + + - name: Get route table info + ec2_vpc_route_table_info: + filters: + route-table-id: "{{ create_gateway_table.route_table.id }}" + register: rt_info + + - name: Assert route table was deleted + assert: + that: + - delete_gateway_table is changed + - rt_info.route_tables | length == 0 + + - name: Delete gateway route table (idempotence) - check_mode + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + state: absent + register: delete_gateway_table + check_mode: yes + + - assert: + that: + - delete_gateway_table is not changed + + - name: Delete gateway route table (idempotence) + ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: 'true' + Name: Gateway route table + state: absent + register: delete_gateway_table + + - name: Get route table info + ec2_vpc_route_table_info: + filters: + route-table-id: "{{ create_gateway_table.route_table.id }}" + register: rt_info + + - name: Assert route table was deleted + assert: + that: + - delete_gateway_table is not changed + - rt_info.route_tables | length == 0 + + always: + ############################################################################# + # TEAR DOWN STARTS HERE + ############################################################################# + - name: remove the VPC endpoint + ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: '{{ vpc_endpoint.result.vpc_endpoint_id }}' + when: vpc_endpoint is defined + ignore_errors: yes + - name: destroy route tables + ec2_vpc_route_table: + route_table_id: '{{ item.route_table.id }}' + lookup: id + state: absent + with_items: + - '{{ create_public_table | default() }}' + - '{{ create_private_table | default() }}' + - '{{ create_gateway_table | default() }}' + when: item and not item.failed + ignore_errors: yes + - name: destroy NAT GW + ec2_vpc_nat_gateway: + state: absent + wait: yes + release_eip: yes + subnet_id: '{{ subnets.results[0].subnet.id }}' + nat_gateway_id: '{{ nat_gateway.nat_gateway_id }}' + ignore_errors: yes + - name: destroy IGW + ec2_vpc_igw: + vpc_id: '{{ vpc.vpc.id }}' + state: absent + ignore_errors: yes + - name: destroy VGW + ec2_vpc_vgw: + state: absent + type: ipsec.1 + name: '{{ resource_prefix }}_vpc' + vpc_id: "{{ vpc.vpc.id }}" + ignore_errors: yes + - name: destroy ENI + ec2_eni: + state: absent + eni_id: '{{ eni.interface.id }}' + ignore_errors: yes + - name: destroy subnets + ec2_vpc_subnet: + cidr: '{{ item.cidr }}' + vpc_id: '{{ vpc.vpc.id }}' + state: absent + with_items: + - cidr: 10.228.224.0/24 + - cidr: 10.228.225.0/24 + - cidr: 10.228.226.0/24 + - cidr: 10.228.227.0/24 + - cidr: 10.228.228.0/24 + - cidr: 10.228.229.0/24 + - cidr: 10.228.230.0/24 + ignore_errors: yes + - name: destroy VPC + ec2_vpc_net: + cidr_block: 10.228.224.0/21 + name: '{{ resource_prefix }}_vpc' + state: absent + ignore_errors: yes \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/aliases new file mode 100644 index 000000000..5dcc9055d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/aliases @@ -0,0 +1,2 @@ +cloud/aws +ec2_vpc_subnet_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/defaults/main.yml new file mode 100644 index 000000000..75ff93f1b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/defaults/main.yml @@ -0,0 +1,9 @@ +--- +availability_zone: '{{ ec2_availability_zone_names[0] }}' + +# defaults file for ec2_vpc_subnet +ec2_vpc_subnet_name: '{{resource_prefix}}' +ec2_vpc_subnet_description: 'Created by ansible integration tests' +vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' +subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24' +subnet_cidr_b: '10.{{ 256 | random(seed=resource_prefix) }}.2.0/24' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/meta/main.yml new file mode 100644 index 000000000..1471b11f6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/tasks/main.yml new file mode 100644 index 000000000..fd367f0c3 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/tasks/main.yml @@ -0,0 +1,683 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + # ============================================================ + - name: create a VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: present + cidr_block: "{{ vpc_cidr }}" + ipv6_cidr: True + tags: + Name: "{{ resource_prefix }}-vpc" + Description: "Created by ansible-test" + register: vpc_result + + - set_fact: + vpc_ipv6_cidr: "{{ vpc_result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block }}" + + - set_fact: + subnet_ipv6_cidr: "{{ vpc_ipv6_cidr | regex_replace('::/.*', '::/64') }}" + + # ============================================================ + - name: check subnet does not exist + ec2_vpc_subnet_info: + filters: + "tag:Name": '{{ec2_vpc_subnet_name}}' + register: vpc_subnet_info + + - name: Assert info result is zero + assert: + that: + - (vpc_subnet_info.subnets|length) == 0 + + - name: create subnet (expected changed=true) (CHECK MODE) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: '{{ec2_vpc_subnet_name}}' + Description: '{{ec2_vpc_subnet_description}}' + state: present + check_mode: true + register: vpc_subnet_create + + - name: assert creation would happen + assert: + that: + - vpc_subnet_create is changed + + - name: create subnet (expected changed=true) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: '{{ec2_vpc_subnet_name}}' + Description: '{{ec2_vpc_subnet_description}}' + state: present + register: vpc_subnet_create + + - name: assert creation happened (expected changed=true) + assert: + that: + - 'vpc_subnet_create' + - 'vpc_subnet_create.subnet.id.startswith("subnet-")' + - '"Name" in vpc_subnet_create.subnet.tags and vpc_subnet_create.subnet.tags["Name"] == ec2_vpc_subnet_name' + - '"Description" in vpc_subnet_create.subnet.tags and vpc_subnet_create.subnet.tags["Description"] == ec2_vpc_subnet_description' + + - name: get info about the subnet + ec2_vpc_subnet_info: + subnet_ids: '{{ vpc_subnet_create.subnet.id }}' + register: vpc_subnet_info + + - name: Assert info result matches create result + assert: + that: + - 'vpc_subnet_info.subnets | length == 1' + - '"assign_ipv6_address_on_creation" in subnet_info' + - 'subnet_info.assign_ipv6_address_on_creation == False' + - '"availability_zone" in subnet_info' + - 'subnet_info.availability_zone == availability_zone' + - '"available_ip_address_count" in subnet_info' + - '"cidr_block" in subnet_info' + - 'subnet_info.cidr_block == subnet_cidr' + - '"default_for_az" in subnet_info' + - '"id" in subnet_info' + - 'subnet_info.id == vpc_subnet_create.subnet.id' + - '"map_public_ip_on_launch" in subnet_info' + - 'subnet_info.map_public_ip_on_launch == False' + - '"state" in subnet_info' + - '"subnet_id" in subnet_info' + - 'subnet_info.subnet_id == vpc_subnet_create.subnet.id' + - '"tags" in subnet_info' + - 'subnet_info.tags["Description"] == ec2_vpc_subnet_description' + - 'subnet_info.tags["Name"] == vpc_subnet_create.subnet.tags["Name"]' + - '"vpc_id" in subnet_info' + - 'subnet_info.vpc_id == vpc_result.vpc.id' + vars: + subnet_info: '{{ vpc_subnet_info.subnets[0] }}' + + # ============================================================ + - name: recreate subnet (expected changed=false) (CHECK MODE) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: '{{ec2_vpc_subnet_name}}' + Description: '{{ec2_vpc_subnet_description}}' + state: present + check_mode: true + register: vpc_subnet_recreate + + - name: assert recreation changed nothing (expected changed=false) + assert: + that: + - vpc_subnet_recreate is not changed + + - name: recreate subnet (expected changed=false) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: '{{ec2_vpc_subnet_name}}' + Description: '{{ec2_vpc_subnet_description}}' + state: present + register: vpc_subnet_recreate + + - name: assert recreation changed nothing (expected changed=false) + assert: + that: + - vpc_subnet_recreate is not changed + - 'vpc_subnet_recreate.subnet == vpc_subnet_create.subnet' + + # ============================================================ + - name: update subnet so instances launched in it are assigned an IP (CHECK MODE) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: '{{ec2_vpc_subnet_name}}' + Description: '{{ec2_vpc_subnet_description}}' + state: present + map_public: true + check_mode: true + register: vpc_subnet_modify + + - name: assert subnet changed + assert: + that: + - vpc_subnet_modify is changed + + - name: update subnet so instances launched in it are assigned an IP + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: '{{ec2_vpc_subnet_name}}' + Description: '{{ec2_vpc_subnet_description}}' + state: present + map_public: true + register: vpc_subnet_modify + + - name: assert subnet changed + assert: + that: + - vpc_subnet_modify is changed + - vpc_subnet_modify.subnet.map_public_ip_on_launch + + # ============================================================ + - name: add invalid ipv6 block to subnet (expected failed) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + ipv6_cidr: 2001:db8::/64 + tags: + Name: '{{ec2_vpc_subnet_name}}' + Description: '{{ec2_vpc_subnet_description}}' + state: present + register: vpc_subnet_ipv6_failed + ignore_errors: yes + + - name: assert failure happened (expected failed) + assert: + that: + - vpc_subnet_ipv6_failed is failed + - "'Couldn\\'t associate ipv6 cidr' in vpc_subnet_ipv6_failed.msg" + + # ============================================================ + - name: add a tag (expected changed=true) (CHECK MODE) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: '{{ec2_vpc_subnet_name}}' + Description: '{{ec2_vpc_subnet_description}}' + AnotherTag: SomeValue + state: present + check_mode: true + register: vpc_subnet_add_a_tag + + - name: assert tag addition happened (expected changed=true) + assert: + that: + - vpc_subnet_add_a_tag is changed + + - name: add a tag (expected changed=true) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: '{{ec2_vpc_subnet_name}}' + Description: '{{ec2_vpc_subnet_description}}' + AnotherTag: SomeValue + state: present + register: vpc_subnet_add_a_tag + + - name: assert tag addition happened (expected changed=true) + assert: + that: + - vpc_subnet_add_a_tag is changed + - '"Name" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["Name"] == ec2_vpc_subnet_name' + - '"Description" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["Description"] == ec2_vpc_subnet_description' + - '"AnotherTag" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["AnotherTag"] == "SomeValue"' + + - name: Get info by tag + ec2_vpc_subnet_info: + filters: + "tag:Name": '{{ec2_vpc_subnet_name}}' + register: vpc_subnet_info_by_tag + + - name: assert info matches expected output + assert: + that: + - 'vpc_subnet_info_by_tag.subnets[0].id == vpc_subnet_add_a_tag.subnet.id' + - (vpc_subnet_info_by_tag.subnets[0].tags|length) == 3 + - '"Description" in vpc_subnet_info_by_tag.subnets[0].tags and vpc_subnet_info_by_tag.subnets[0].tags["Description"] == ec2_vpc_subnet_description' + - '"AnotherTag" in vpc_subnet_info_by_tag.subnets[0].tags and vpc_subnet_info_by_tag.subnets[0].tags["AnotherTag"] == "SomeValue"' + + # ============================================================ + - name: remove tags with default purge_tags=true (expected changed=true) (CHECK MODE) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + AnotherTag: SomeValue + state: present + check_mode: true + register: vpc_subnet_remove_tags + + - name: assert tag removal happened (expected changed=true) + assert: + that: + - vpc_subnet_remove_tags is changed + + - name: remove tags with default purge_tags=true (expected changed=true) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + AnotherTag: SomeValue + state: present + register: vpc_subnet_remove_tags + + - name: assert tag removal happened (expected changed=true) + assert: + that: + - vpc_subnet_remove_tags is changed + - '"Name" not in vpc_subnet_remove_tags.subnet.tags' + - '"Description" not in vpc_subnet_remove_tags.subnet.tags' + - '"AnotherTag" in vpc_subnet_remove_tags.subnet.tags and vpc_subnet_remove_tags.subnet.tags["AnotherTag"] == "SomeValue"' + + - name: Check tags by info + ec2_vpc_subnet_info: + subnet_id: '{{ vpc_subnet_remove_tags.subnet.id }}' + register: vpc_subnet_info_removed_tags + + - name: assert info matches expected output + assert: + that: + - '"Name" not in vpc_subnet_info_removed_tags.subnets[0].tags' + - '"Description" not in vpc_subnet_info_removed_tags.subnets[0].tags' + - '"AnotherTag" in vpc_subnet_info_removed_tags.subnets[0].tags and vpc_subnet_info_removed_tags.subnets[0].tags["AnotherTag"] == "SomeValue"' + + + # ============================================================ + - name: change tags with purge_tags=false (expected changed=true) (CHECK MODE) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: '{{ec2_vpc_subnet_name}}' + Description: '{{ec2_vpc_subnet_description}}' + state: present + purge_tags: false + check_mode: true + register: vpc_subnet_change_tags + + - name: assert tag addition happened (expected changed=true) + assert: + that: + - vpc_subnet_change_tags is changed + + - name: change tags with purge_tags=false (expected changed=true) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: '{{ec2_vpc_subnet_name}}' + Description: '{{ec2_vpc_subnet_description}}' + state: present + purge_tags: false + register: vpc_subnet_change_tags + + - name: assert tag addition happened (expected changed=true) + assert: + that: + - vpc_subnet_change_tags is changed + - '"Name" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["Name"] == ec2_vpc_subnet_name' + - '"Description" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["Description"] == ec2_vpc_subnet_description' + - '"AnotherTag" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["AnotherTag"] == "SomeValue"' + + # ============================================================ + - name: test state=absent (expected changed=true) (CHECK MODE) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: absent + check_mode: true + register: result + + - name: assert state=absent (expected changed=true) + assert: + that: + - result is changed + + - name: test state=absent (expected changed=true) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: absent + register: result + + - name: assert state=absent (expected changed=true) + assert: + that: + - result is changed + + # ============================================================ + - name: test state=absent (expected changed=false) (CHECK MODE) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: absent + check_mode: true + register: result + + - name: assert state=absent (expected changed=false) + assert: + that: + - result is not changed + + - name: test state=absent (expected changed=false) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: absent + register: result + + - name: assert state=absent (expected changed=false) + assert: + that: + - result is not changed + + # ============================================================ + - name: create subnet without AZ (CHECK MODE) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: present + check_mode: true + register: subnet_without_az + + - name: check that subnet without AZ works fine + assert: + that: + - subnet_without_az is changed + + - name: create subnet without AZ + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: present + register: subnet_without_az + + - name: check that subnet without AZ works fine + assert: + that: + - subnet_without_az is changed + + # ============================================================ + - name: remove subnet without AZ (CHECK MODE) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: absent + check_mode: true + register: result + + - name: assert state=absent (expected changed=true) + assert: + that: + - result is changed + + - name: remove subnet without AZ + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: absent + register: result + + - name: assert state=absent (expected changed=true) + assert: + that: + - result is changed + + + # ============================================================ + - name: create subnet with IPv6 (expected changed=true) (CHECK MODE) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + ipv6_cidr: "{{ subnet_ipv6_cidr }}" + assign_instances_ipv6: true + state: present + tags: + Name: '{{ec2_vpc_subnet_name}}' + Description: '{{ec2_vpc_subnet_description}}' + check_mode: true + register: vpc_subnet_ipv6_create + + - name: assert creation with IPv6 happened (expected changed=true) + assert: + that: + - vpc_subnet_ipv6_create is changed + + - name: create subnet with IPv6 (expected changed=true) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + ipv6_cidr: "{{ subnet_ipv6_cidr }}" + assign_instances_ipv6: true + state: present + tags: + Name: '{{ec2_vpc_subnet_name}}' + Description: '{{ec2_vpc_subnet_description}}' + register: vpc_subnet_ipv6_create + + - name: assert creation with IPv6 happened (expected changed=true) + assert: + that: + - vpc_subnet_ipv6_create is changed + - 'vpc_subnet_ipv6_create.subnet.id.startswith("subnet-")' + - "vpc_subnet_ipv6_create.subnet.ipv6_cidr_block == subnet_ipv6_cidr" + - '"Name" in vpc_subnet_ipv6_create.subnet.tags and vpc_subnet_ipv6_create.subnet.tags["Name"] == ec2_vpc_subnet_name' + - '"Description" in vpc_subnet_ipv6_create.subnet.tags and vpc_subnet_ipv6_create.subnet.tags["Description"] == ec2_vpc_subnet_description' + - 'vpc_subnet_ipv6_create.subnet.assign_ipv6_address_on_creation' + + # ============================================================ + - name: recreate subnet (expected changed=false) (CHECK MODE) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + ipv6_cidr: "{{ subnet_ipv6_cidr }}" + assign_instances_ipv6: true + state: present + tags: + Name: '{{ec2_vpc_subnet_name}}' + Description: '{{ec2_vpc_subnet_description}}' + check_mode: true + register: vpc_subnet_ipv6_recreate + + - name: assert recreation changed nothing (expected changed=false) + assert: + that: + - vpc_subnet_ipv6_recreate is not changed + + - name: recreate subnet (expected changed=false) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + ipv6_cidr: "{{ subnet_ipv6_cidr }}" + assign_instances_ipv6: true + state: present + tags: + Name: '{{ec2_vpc_subnet_name}}' + Description: '{{ec2_vpc_subnet_description}}' + register: vpc_subnet_ipv6_recreate + + - name: assert recreation changed nothing (expected changed=false) + assert: + that: + - vpc_subnet_ipv6_recreate is not changed + - 'vpc_subnet_ipv6_recreate.subnet == vpc_subnet_ipv6_create.subnet' + + # ============================================================ + - name: change subnet assign_instances_ipv6 attribute (expected changed=true) (CHECK MODE) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + ipv6_cidr: "{{ subnet_ipv6_cidr }}" + assign_instances_ipv6: false + state: present + purge_tags: false + check_mode: true + register: vpc_change_attribute + + - name: assert assign_instances_ipv6 attribute changed (expected changed=true) + assert: + that: + - vpc_change_attribute is changed + + - name: change subnet assign_instances_ipv6 attribute (expected changed=true) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + ipv6_cidr: "{{ subnet_ipv6_cidr }}" + assign_instances_ipv6: false + state: present + purge_tags: false + register: vpc_change_attribute + + - name: assert assign_instances_ipv6 attribute changed (expected changed=true) + assert: + that: + - vpc_change_attribute is changed + - 'not vpc_change_attribute.subnet.assign_ipv6_address_on_creation' + + # ============================================================ + - name: add second subnet with duplicate ipv6 cidr (expected failure) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr_b }}" + vpc_id: "{{ vpc_result.vpc.id }}" + ipv6_cidr: "{{ subnet_ipv6_cidr }}" + state: present + purge_tags: false + register: vpc_add_duplicate_ipv6 + ignore_errors: true + + - name: assert graceful failure (expected failed) + assert: + that: + - vpc_add_duplicate_ipv6 is failed + - "'The IPv6 CIDR \\'{{ subnet_ipv6_cidr }}\\' conflicts with another subnet' in vpc_add_duplicate_ipv6.msg" + + # ============================================================ + - name: remove subnet ipv6 cidr (expected changed=true) (CHECK MODE) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: present + purge_tags: false + check_mode: true + register: vpc_remove_ipv6_cidr + + - name: assert subnet ipv6 cidr removed (expected changed=true) + assert: + that: + - vpc_remove_ipv6_cidr is changed + + - name: remove subnet ipv6 cidr (expected changed=true) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: present + purge_tags: false + register: vpc_remove_ipv6_cidr + + - name: assert subnet ipv6 cidr removed (expected changed=true) + assert: + that: + - vpc_remove_ipv6_cidr is changed + - "vpc_remove_ipv6_cidr.subnet.ipv6_cidr_block == ''" + - 'not vpc_remove_ipv6_cidr.subnet.assign_ipv6_address_on_creation' + + # ============================================================ + - name: test adding a tag that looks like a boolean to the subnet (CHECK MODE) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: present + purge_tags: false + tags: + looks_like_boolean: true + check_mode: true + register: vpc_subnet_info + + - name: assert a tag was added + assert: + that: + - vpc_subnet_info is changed + + - name: test adding a tag that looks like a boolean to the subnet + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: present + purge_tags: false + tags: + looks_like_boolean: true + register: vpc_subnet_info + + - name: assert a tag was added + assert: + that: + - vpc_subnet_info is changed + - 'vpc_subnet_info.subnet.tags.looks_like_boolean == "True"' + + # ============================================================ + - name: test idempotence adding a tag that looks like a boolean (CHECK MODE) + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: present + purge_tags: false + tags: + looks_like_boolean: true + check_mode: true + register: vpc_subnet_info + + - name: assert tags haven't changed + assert: + that: + - vpc_subnet_info is not changed + + - name: test idempotence adding a tag that looks like a boolean + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: present + purge_tags: false + tags: + looks_like_boolean: true + register: vpc_subnet_info + + - name: assert tags haven't changed + assert: + that: + - vpc_subnet_info is not changed + + always: + + ################################################ + # TEARDOWN STARTS HERE + ################################################ + + - name: tidy up subnet + ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: absent + + - name: tidy up VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: absent + cidr_block: "{{ vpc_cidr }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/aliases b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/aliases new file mode 100644 index 000000000..948352f20 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/aliases @@ -0,0 +1,3 @@ +cloud/aws +slow +elb_application_lb_info \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml new file mode 100644 index 000000000..719851924 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml @@ -0,0 +1,28 @@ +# defaults file for elb_application_lb + +resource_short: "{{ '%0.8x'%((16**8) | random(seed=resource_prefix)) }}" +alb_name: alb-test-{{ resource_short }} +alb_2_name: alb-test-2-{{ resource_short }} +tg_name: alb-test-{{ resource_short }} +tg_2_name: alb-test-2-{{ resource_short }} +vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 +private_subnet_cidr_1: 10.{{ 256 | random(seed=resource_prefix) }}.1.0/24 +private_subnet_cidr_2: 10.{{ 256 | random(seed=resource_prefix) }}.2.0/24 +public_subnet_cidr_1: 10.{{ 256 | random(seed=resource_prefix) }}.3.0/24 +public_subnet_cidr_2: 10.{{ 256 | random(seed=resource_prefix) }}.4.0/24 +s3_bucket_name: alb-test-{{ resource_short }} + +# Amazon's SDKs don't provide the list of account ID's. Amazon only provide a +# web page. If you want to run the tests outside the US regions you'll need to +# update this. +# https://docs.aws.amazon.com/elasticloadbalancing/latest/application/enable-access-logging.html +elb_access_log_account_id_map: + us-east-1: "127311923021" + us-east-2: "033677994240" + us-west-1: "027434742980" + us-west-2: "797873946194" + us-gov-east-1: "190560391635" + us-gov-west-1: "048591011584" + + +elb_account_id: '{{ elb_access_log_account_id_map[aws_region] }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml new file mode 100644 index 000000000..2a0cab761 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml @@ -0,0 +1,1558 @@ +- name: elb_application_lb integration tests + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + - name: Create a test VPC + ec2_vpc_net: + cidr_block: '{{ vpc_cidr }}' + name: '{{ resource_prefix }}_vpc' + state: present + ipv6_cidr: true + tags: + Name: elb_application_lb testing + ResourcePrefix: '{{ resource_prefix }}' + register: vpc + - name: 'Set fact: VPC ID' + set_fact: + vpc_id: '{{ vpc.vpc.id }}' + - name: Get VPC's default security group + ec2_group_info: + filters: + vpc-id: '{{ vpc_id }}' + register: default_sg + - name: Create an internet gateway + ec2_vpc_igw: + vpc_id: '{{ vpc_id }}' + state: present + tags: + Name: '{{ resource_prefix }}' + register: igw + - name: Create private subnets + ec2_vpc_subnet: + cidr: '{{ item.cidr }}' + az: '{{ aws_region }}{{ item.az }}' + vpc_id: '{{ vpc_id }}' + state: present + tags: + Public: 'False' + Name: private-{{ item.az }} + with_items: + - cidr: '{{ private_subnet_cidr_1 }}' + az: a + - cidr: '{{ private_subnet_cidr_2 }}' + az: b + register: private_subnets + - name: Create public subnets with ipv6 + ec2_vpc_subnet: + cidr: '{{ item.cidr }}' + az: '{{ aws_region }}{{ item.az }}' + vpc_id: '{{ vpc_id }}' + state: present + ipv6_cidr: '{{ item.vpc_ipv6_cidr }}' + tags: + Public: 'True' + Name: public-{{ item.az }} + with_items: + - cidr: '{{ public_subnet_cidr_1 }}' + az: a + vpc_ipv6_cidr: "{{ vpc.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block\ + \ | replace('0::/56','0::/64') }}" + - cidr: '{{ public_subnet_cidr_2 }}' + az: b + vpc_ipv6_cidr: "{{ vpc.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block\ + \ | replace('0::/56','1::/64') }}" + register: public_subnets + - name: Create list of subnet ids + set_fact: + public_subnets: "{{ public_subnets.results | map(attribute='subnet') | map(attribute='id')\ + \ }}" + private_subnets: "{{ private_subnets.results | map(attribute='subnet') | map(attribute='id')\ + \ }}" + - name: Create a route table + ec2_vpc_route_table: + vpc_id: '{{ vpc_id }}' + tags: + Name: igw-route + Created: '{{ resource_prefix }}' + subnets: '{{ public_subnets + private_subnets }}' + routes: + - dest: 0.0.0.0/0 + gateway_id: '{{ igw.gateway_id }}' + register: route_table + - name: Create a security group for Ansible ALB integration tests + ec2_group: + name: '{{ resource_prefix }}' + description: security group for Ansible ALB integration tests + state: present + vpc_id: '{{ vpc_id }}' + rules: + - proto: tcp + from_port: 1 + to_port: 65535 + cidr_ip: 0.0.0.0/0 + register: sec_group + - name: Create another security group for Ansible ALB integration tests + ec2_group: + name: '{{ resource_prefix }}-2' + description: security group for Ansible ALB integration tests + state: present + vpc_id: '{{ vpc_id }}' + rules: + - proto: tcp + from_port: 1 + to_port: 65535 + cidr_ip: 0.0.0.0/0 + register: sec_group2 + - name: Create a target group for testing + elb_target_group: + name: '{{ tg_name }}' + protocol: http + port: 80 + vpc_id: '{{ vpc_id }}' + state: present + register: tg + - name: Create a second target group for testing + community.aws.elb_target_group: + name: '{{ tg_2_name }}' + protocol: http + port: 80 + vpc_id: '{{ vpc_id }}' + state: present + register: tg_2 + - name: Get ARN of calling user + amazon.aws.aws_caller_info: + register: aws_caller_info + - name: Register account id + ansible.builtin.set_fact: + aws_account: "{{ aws_caller_info.account }}" + - name: Create S3 bucket for testing + amazon.aws.s3_bucket: + name: "{{ s3_bucket_name }}" + state: present + encryption: "aws:kms" + policy: "{{ lookup('template', 'policy.json') }}" + + - name: Create an ALB (invalid - SslPolicy is required when Protocol == HTTPS) + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTPS + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + ignore_errors: yes + register: alb + - assert: + that: + - alb is failed + - alb.msg is match("'SslPolicy' is a required listener dict key when Protocol + = HTTPS") + + - name: Create an ALB (invalid - didn't provide required listener options) + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Port: 80 + ignore_errors: yes + register: alb + - assert: + that: + - alb is failed + - alb.msg is match("missing required arguments:\ DefaultActions, Protocol found + in listeners") + + - name: Create an ALB (invalid - invalid listener option type) + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: bad type + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + ignore_errors: yes + register: alb + - assert: + that: + - alb is failed + - "'unable to convert to int' in alb.msg" + + - name: Create an ALB (invalid - invalid ip address type) + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + ip_address_type: ip_addr_v4_v6 + ignore_errors: yes + register: alb + - assert: + that: + - alb is failed + + # ------------------------------------------------------------------------------------------ + + - name: Create an ALB with defaults - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: [] + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + register: alb + check_mode: yes + - assert: + that: + - alb is changed + - alb.msg is match('Would have created ALB if not in check mode.') + + - name: Create an ALB with defaults + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: [] + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + register: alb + - assert: + that: + - alb is changed + - alb.listeners[0].rules | length == 1 + - alb.security_groups | length == 1 + - alb.security_groups[0] == default_sg.security_groups[0].group_id + + - name: Create an ALB with defaults (idempotence) - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: [] + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + register: alb + check_mode: yes + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Create an ALB with defaults (idempotence) + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: [] + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + register: alb + - assert: + that: + - alb is not changed + - alb.listeners[0].rules | length == 1 + - alb.security_groups[0] == default_sg.security_groups[0].group_id + + # ------------------------------------------------------------------------------------------ + + - name: Create an ALB with attributes - check_mode + amazon.aws.elb_application_lb: + name: '{{ alb_2_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_2_name }}' + access_logs_enabled: true + access_logs_s3_bucket: "{{ s3_bucket_name }}" + access_logs_s3_prefix: "alb-logs" + ip_address_type: dualstack + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb_2 + check_mode: true + + - name: Verify check mode response + ansible.builtin.assert: + that: + - alb_2 is changed + - alb_2.msg is match('Would have created ALB if not in check mode.') + + - name: Create an ALB with attributes + amazon.aws.elb_application_lb: + name: '{{ alb_2_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_2_name }}' + access_logs_enabled: true + access_logs_s3_bucket: "{{ s3_bucket_name }}" + access_logs_s3_prefix: "alb-logs" + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + idle_timeout: 120 + ip_address_type: dualstack + waf_fail_open: true + register: alb_2 + + - name: Verify ALB was created with correct attributes + ansible.builtin.assert: + that: + - alb_2 is changed + - alb_2.listeners[0].rules | length == 1 + - alb_2.security_groups | length == 1 + - alb_2.security_groups[0] == sec_group.group_id + - alb_2.ip_address_type == 'dualstack' + - alb_2.access_logs_s3_enabled | bool + - alb_2.access_logs_s3_bucket == "{{ s3_bucket_name }}" + - alb_2.access_logs_s3_prefix == "alb-logs" + - not alb_2.routing_http2_enabled | bool + - alb_2.routing_http_desync_mitigation_mode == 'monitor' + - alb_2.routing_http_drop_invalid_header_fields_enabled | bool + - alb_2.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - alb_2.routing_http_xff_client_port_enabled | bool + - alb_2.idle_timeout_timeout_seconds == "120" + - alb_2.waf_fail_open_enabled | bool + + - name: Create an ALB with attributes (idempotence) - check_mode + amazon.aws.elb_application_lb: + name: '{{ alb_2_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_2_name }}' + access_logs_enabled: true + access_logs_s3_bucket: "{{ s3_bucket_name }}" + access_logs_s3_prefix: "alb-logs" + ip_address_type: dualstack + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb_2 + check_mode: true + + - name: Verify idempotence check mode response + ansible.builtin.assert: + that: + - alb_2 is not changed + - alb_2.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Create an ALB with attributes (idempotence) + amazon.aws.elb_application_lb: + name: '{{ alb_2_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_2_name }}' + access_logs_enabled: true + access_logs_s3_bucket: "{{ s3_bucket_name }}" + access_logs_s3_prefix: "alb-logs" + ip_address_type: dualstack + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb_2 + + - name: Verify ALB was not changed + ansible.builtin.assert: + that: + - alb_2 is not changed + - alb_2.listeners[0].rules | length == 1 + - alb_2.security_groups | length == 1 + - alb_2.security_groups[0] == sec_group.group_id + - alb_2.ip_address_type == 'dualstack' + - alb_2.access_logs_s3_enabled | bool + - alb_2.access_logs_s3_bucket == "{{ s3_bucket_name }}" + - alb_2.access_logs_s3_prefix == "alb-logs" + - not alb_2.routing_http2_enabled | bool + - alb_2.routing_http_desync_mitigation_mode == 'monitor' + - alb_2.routing_http_drop_invalid_header_fields_enabled | bool + - alb_2.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - alb_2.routing_http_xff_client_port_enabled | bool + - alb_2.idle_timeout_timeout_seconds == "120" + - alb_2.waf_fail_open_enabled | bool + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB with ip address type - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + ip_address_type: dualstack + register: alb + check_mode: yes + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB with ip address type + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + ip_address_type: dualstack + register: alb + - assert: + that: + - alb is changed + - alb.ip_address_type == 'dualstack' + - alb.listeners[0].rules | length == 1 + - alb.routing_http2_enabled | bool + - alb.routing_http_desync_mitigation_mode == 'defensive' + - not alb.routing_http_drop_invalid_header_fields_enabled | bool + - not alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - not alb.routing_http_xff_client_port_enabled | bool + - not alb.waf_fail_open_enabled | bool + + - name: Create an ALB with ip address type (idempotence) - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + ip_address_type: dualstack + register: alb + check_mode: yes + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Create an ALB with ip address type (idempotence) + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + ip_address_type: dualstack + register: alb + - assert: + that: + - alb is not changed + - alb.ip_address_type == 'dualstack' + - alb.routing_http2_enabled | bool + - alb.routing_http_desync_mitigation_mode == 'defensive' + - not alb.routing_http_drop_invalid_header_fields_enabled | bool + - not alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - not alb.routing_http_xff_client_port_enabled | bool + - not alb.waf_fail_open_enabled | bool + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB with different attributes - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + ip_address_type: dualstack + http2: no + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: yes + http_x_amzn_tls_version_and_cipher_suite: yes + http_xff_client_port: yes + waf_fail_open: yes + register: alb + check_mode: yes + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB with different attributes + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + ip_address_type: dualstack + http2: no + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: yes + http_x_amzn_tls_version_and_cipher_suite: yes + http_xff_client_port: yes + waf_fail_open: yes + register: alb + - assert: + that: + - alb is changed + - alb.ip_address_type == 'dualstack' + - not alb.routing_http2_enabled | bool + - alb.routing_http_desync_mitigation_mode == 'monitor' + - alb.routing_http_drop_invalid_header_fields_enabled | bool + - alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - alb.routing_http_xff_client_port_enabled | bool + - alb.waf_fail_open_enabled | bool + + - name: Update an ALB with different attributes (idempotence) - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + ip_address_type: dualstack + http2: no + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: yes + http_x_amzn_tls_version_and_cipher_suite: yes + http_xff_client_port: yes + waf_fail_open: yes + register: alb + check_mode: yes + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB with different attributes (idempotence) + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + ip_address_type: dualstack + http2: no + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: yes + http_x_amzn_tls_version_and_cipher_suite: yes + http_xff_client_port: yes + waf_fail_open: yes + register: alb + - assert: + that: + - alb is not changed + - alb.ip_address_type == 'dualstack' + - not alb.routing_http2_enabled | bool + - alb.routing_http_desync_mitigation_mode == 'monitor' + - alb.routing_http_drop_invalid_header_fields_enabled | bool + - alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - alb.routing_http_xff_client_port_enabled | bool + - alb.waf_fail_open_enabled | bool + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB with different ip address type - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + ip_address_type: ipv4 + http2: no + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: yes + http_x_amzn_tls_version_and_cipher_suite: yes + http_xff_client_port: yes + waf_fail_open: yes + register: alb + check_mode: yes + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB with different ip address type + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + ip_address_type: ipv4 + http2: no + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: yes + http_x_amzn_tls_version_and_cipher_suite: yes + http_xff_client_port: yes + waf_fail_open: yes + register: alb + - assert: + that: + - alb is changed + - alb.ip_address_type == 'ipv4' + - not alb.routing_http2_enabled | bool + - alb.routing_http_desync_mitigation_mode == 'monitor' + - alb.routing_http_drop_invalid_header_fields_enabled | bool + - alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - alb.routing_http_xff_client_port_enabled | bool + - alb.waf_fail_open_enabled | bool + + - name: Update an ALB with different ip address type (idempotence) - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + ip_address_type: ipv4 + http2: no + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: yes + http_x_amzn_tls_version_and_cipher_suite: yes + http_xff_client_port: yes + waf_fail_open: yes + register: alb + check_mode: yes + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB with different ip address type (idempotence) + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + ip_address_type: ipv4 + http2: no + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: yes + http_x_amzn_tls_version_and_cipher_suite: yes + http_xff_client_port: yes + waf_fail_open: yes + register: alb + - assert: + that: + - alb is not changed + - alb.ip_address_type == 'ipv4' + - not alb.routing_http2_enabled | bool + - alb.routing_http_desync_mitigation_mode == 'monitor' + - alb.routing_http_drop_invalid_header_fields_enabled | bool + - alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - alb.routing_http_xff_client_port_enabled | bool + - alb.waf_fail_open_enabled | bool + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB with different listener by adding rule - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + Rules: + - Conditions: + - Field: path-pattern + Values: + - /test + Priority: '1' + Actions: + - TargetGroupName: '{{ tg_name }}' + Type: forward + register: alb + check_mode: yes + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB with different listener by adding rule + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + Rules: + - Conditions: + - Field: path-pattern + Values: + - /test + Priority: '1' + Actions: + - TargetGroupName: '{{ tg_name }}' + Type: forward + register: alb + - assert: + that: + - alb is changed + - alb.listeners[0].rules | length == 2 + - "'1' in {{ alb.listeners[0].rules | map(attribute='priority') }}" + + - name: Update an ALB with different listener by adding rule (idempotence) - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + Rules: + - Conditions: + - Field: path-pattern + Values: + - /test + Priority: '1' + Actions: + - TargetGroupName: '{{ tg_name }}' + Type: forward + register: alb + check_mode: yes + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB with different listener by adding rule (idempotence) + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + Rules: + - Conditions: + - Field: path-pattern + Values: + - /test + Priority: '1' + Actions: + - TargetGroupName: '{{ tg_name }}' + Type: forward + register: alb + - assert: + that: + - alb is not changed + - alb.listeners[0].rules | length == 2 + - "'1' in {{ alb.listeners[0].rules | map(attribute='priority') }}" + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB with different listener by modifying rule - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + Rules: + - Conditions: + - Field: path-pattern + Values: + - /test + Priority: '2' + Actions: + - TargetGroupName: '{{ tg_name }}' + Type: forward + register: alb + check_mode: yes + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB with different listener by modifying rule + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + Rules: + - Conditions: + - Field: path-pattern + Values: + - /test + Priority: '2' + Actions: + - TargetGroupName: '{{ tg_name }}' + Type: forward + register: alb + - assert: + that: + - alb is changed + - alb.listeners[0].rules | length == 2 + - "'2' in {{ alb.listeners[0].rules | map(attribute='priority') }}" + + - name: Update an ALB with different listener by modifying rule (idempotence) - + check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + Rules: + - Conditions: + - Field: path-pattern + Values: + - /test + Priority: '2' + Actions: + - TargetGroupName: '{{ tg_name }}' + Type: forward + register: alb + check_mode: yes + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB with different listener by modifying rule (idempotence) + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + Rules: + - Conditions: + - Field: path-pattern + Values: + - /test + Priority: '2' + Actions: + - TargetGroupName: '{{ tg_name }}' + Type: forward + register: alb + - assert: + that: + - alb is not changed + - alb.listeners[0].rules | length == 2 + - "'2' in {{ alb.listeners[0].rules | map(attribute='priority') }}" + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB with different listener by deleting rule - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + Rules: [] + register: alb + check_mode: yes + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB with different listener by deleting rule + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + Rules: [] + register: alb + - assert: + that: + - alb is changed + - alb.listeners[0].rules | length == 1 + - "'2' not in {{ alb.listeners[0].rules | map(attribute='priority') }}" + + - name: Update an ALB with different listener by deleting rule (idempotence) - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + Rules: [] + register: alb + check_mode: yes + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB with different listener by deleting rule (idempotence) + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_name }}' + Rules: [] + register: alb + - assert: + that: + - alb is not changed + - alb.listeners[0].rules | length == 1 + - "'2' not in {{ alb.listeners[0].rules | map(attribute='priority') }}" + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB by deleting listener - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: [] + register: alb + check_mode: yes + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB by deleting listener + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: [] + register: alb + - assert: + that: + - alb is changed + - not alb.listeners + + - name: Update an ALB by deleting listener (idempotence) - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: [] + register: alb + check_mode: yes + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB by deleting listener (idempotence) + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: [] + register: alb + - assert: + that: + - alb is not changed + - not alb.listeners + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB by adding tags - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + tags: + created_by: ALB test {{ resource_prefix }} + register: alb + check_mode: yes + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB by adding tags + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + tags: + created_by: ALB test {{ resource_prefix }} + register: alb + - assert: + that: + - alb is changed + - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}"}' + + - name: Update an ALB by adding tags (idempotence) - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + tags: + created_by: ALB test {{ resource_prefix }} + register: alb + check_mode: yes + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB by adding tags (idempotence) + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + tags: + created_by: ALB test {{ resource_prefix }} + register: alb + - assert: + that: + - alb is not changed + - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}"}' + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB by modifying tags - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + tags: + created_by: ALB test {{ resource_prefix }}-2 + register: alb + check_mode: yes + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB by modifying tags + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + tags: + created_by: ALB test {{ resource_prefix }}-2 + register: alb + - assert: + that: + - alb is changed + - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}-2"}' + + - name: Update an ALB by modifying tags (idempotence) - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + tags: + created_by: ALB test {{ resource_prefix }}-2 + register: alb + check_mode: yes + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB by modifying tags (idempotence) + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + tags: + created_by: ALB test {{ resource_prefix }}-2 + register: alb + - assert: + that: + - alb is not changed + - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}-2"}' + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB by removing tags - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + tags: {} + register: alb + check_mode: yes + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB by removing tags + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + tags: {} + register: alb + - assert: + that: + - alb is changed + - not alb.tags + + - name: Update an ALB by removing tags (idempotence) - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + tags: {} + register: alb + check_mode: yes + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB by removing tags (idempotence) + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + tags: {} + register: alb + - assert: + that: + - alb is not changed + - not alb.tags + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB by changing security group - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group2.group_id }}' + state: present + register: alb + check_mode: yes + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB by changing security group + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group2.group_id }}' + state: present + register: alb + - assert: + that: + - alb is changed + - alb.security_groups[0] == sec_group2.group_id + + - name: Update an ALB by changing security group (idempotence) - check_mode + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group2.group_id }}' + state: present + register: alb + check_mode: yes + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB by changing security group (idempotence) + elb_application_lb: + name: '{{ alb_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group2.group_id }}' + state: present + register: alb + - assert: + that: + - alb is not changed + - alb.security_groups[0] == sec_group2.group_id + + # ------------------------------------------------------------------------------------------ + + - name: Ensure elb_application_lb_info supports check_mode + elb_application_lb_info: + register: alb_info + check_mode: yes + - assert: + that: + - alb_info.load_balancers | length > 0 + + - name: Get ALB application info using no args + elb_application_lb_info: + register: alb_info + - assert: + that: + - alb_info.load_balancers | length > 0 + + - name: Get ALB application info using load balancer arn + elb_application_lb_info: + load_balancer_arns: + - '{{ alb.load_balancer_arn }}' + register: alb_info + - assert: + that: + - alb_info.load_balancers[0].security_groups[0] == sec_group2.group_id + + - name: Get ALB application info using load balancer name + elb_application_lb_info: + names: + - '{{ alb.load_balancer_name }}' + register: alb_info + - assert: + that: + - alb_info.load_balancers[0].security_groups[0] == sec_group2.group_id + + # ------------------------------------------------------------------------------------------ + + - name: Delete an ALB - check_mode + elb_application_lb: + name: '{{ alb_name }}' + state: absent + register: alb + check_mode: yes + - assert: + that: + - alb is changed + - alb.msg is match('Would have deleted ALB if not in check mode.') + + - name: Delete an ALB + elb_application_lb: + name: '{{ alb_name }}' + state: absent + register: alb + - assert: + that: + - alb is changed + + - name: Delete an ALB (idempotence) - check_mode + elb_application_lb: + name: '{{ alb_name }}' + state: absent + register: alb + check_mode: yes + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - ALB already absent.') + + - name: Delete an ALB (idempotence) + elb_application_lb: + name: '{{ alb_name }}' + state: absent + register: alb + - assert: + that: + - alb is not changed + + # ----- Cleanup ------------------------------------------------------------------------------ + + always: + - name: Destroy ALB + elb_application_lb: + name: '{{ alb_name }}' + state: absent + wait: true + wait_timeout: 600 + ignore_errors: true + - name: Destroy ALB 2 + amazon.aws.elb_application_lb: + name: '{{ alb_2_name }}' + state: absent + wait: true + wait_timeout: 600 + ignore_errors: true + - name: Destroy target group if it was created + elb_target_group: + name: '{{ tg_name }}' + protocol: http + port: 80 + vpc_id: '{{ vpc_id }}' + state: absent + wait: true + wait_timeout: 600 + register: remove_tg + retries: 5 + delay: 3 + until: remove_tg is success + when: tg is defined + ignore_errors: true + - name: Destroy target group 2 if it was created + community.aws.elb_target_group: + name: '{{ tg_2_name }}' + protocol: http + port: 80 + vpc_id: '{{ vpc_id }}' + state: absent + wait: true + wait_timeout: 600 + register: remove_tg_2 + retries: 5 + delay: 3 + until: remove_tg_2 is success + when: tg_2 is defined + ignore_errors: true + - name: Destroy sec groups + ec2_group: + name: '{{ item }}' + description: security group for Ansible ALB integration tests + state: absent + vpc_id: '{{ vpc_id }}' + register: remove_sg + retries: 10 + delay: 5 + until: remove_sg is success + ignore_errors: true + with_items: + - '{{ resource_prefix }}' + - '{{ resource_prefix }}-2' + + - name: Destroy route table + ec2_vpc_route_table: + vpc_id: '{{ vpc_id }}' + route_table_id: '{{ route_table.route_table.route_table_id }}' + lookup: id + state: absent + register: remove_rt + retries: 10 + delay: 5 + until: remove_rt is success + ignore_errors: true + - name: Destroy subnets + ec2_vpc_subnet: + cidr: '{{ item }}' + vpc_id: '{{ vpc_id }}' + state: absent + register: remove_subnet + retries: 10 + delay: 5 + until: remove_subnet is success + with_items: + - '{{ private_subnet_cidr_1 }}' + - '{{ private_subnet_cidr_2 }}' + - '{{ public_subnet_cidr_1 }}' + - '{{ public_subnet_cidr_2 }}' + ignore_errors: true + - name: Destroy internet gateway + ec2_vpc_igw: + vpc_id: '{{ vpc_id }}' + tags: + Name: '{{ resource_prefix }}' + state: absent + register: remove_igw + retries: 10 + delay: 5 + until: remove_igw is success + ignore_errors: true + - name: Destroy VPC + ec2_vpc_net: + cidr_block: '{{ vpc_cidr }}' + name: '{{ resource_prefix }}_vpc' + state: absent + register: remove_vpc + retries: 10 + delay: 5 + until: remove_vpc is success + ignore_errors: true + - name: Destroy ELB acccess log test file + amazon.aws.s3_object: + bucket: "{{ s3_bucket_name }}" + mode: delobj + object: "alb-logs/AWSLogs/{{ aws_account }}/ELBAccessLogTestFile" + - name: Destroy S3 bucket + amazon.aws.s3_bucket: + name: "{{ s3_bucket_name }}" + state: absent + force: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/templates/policy.json b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/templates/policy.json new file mode 100644 index 000000000..aa6ebf9b6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/templates/policy.json @@ -0,0 +1,13 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::{{ elb_account_id}}:root" + }, + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::{{ s3_bucket_name }}/alb-logs/AWSLogs/{{ aws_account }}/*" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/aliases b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/aliases new file mode 100644 index 000000000..8e0974e45 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/aliases @@ -0,0 +1,4 @@ +# 20+ minutes +slow + +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/defaults/main.yml new file mode 100644 index 000000000..42339f0b8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/defaults/main.yml @@ -0,0 +1,170 @@ +--- +# defaults file for ec2_elb_lb +elb_name: 'ansible-test-{{ tiny_prefix }}' + +vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' +subnet_cidr_1: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24' +subnet_cidr_2: '10.{{ 256 | random(seed=resource_prefix) }}.2.0/24' +subnet_cidr_3: '10.{{ 256 | random(seed=resource_prefix) }}.3.0/24' +subnet_cidr_4: '10.{{ 256 | random(seed=resource_prefix) }}.4.0/24' + +default_tags: + snake_case_key: snake_case_value + camelCaseKey: camelCaseValue + PascalCaseKey: PascalCaseValue + "key with spaces": value with spaces + "Upper With Spaces": Upper With Spaces + +partial_tags: + snake_case_key: snake_case_value + camelCaseKey: camelCaseValue + +updated_tags: + updated_snake_case_key: updated_snake_case_value + updatedCamelCaseKey: updatedCamelCaseValue + UpdatedPascalCaseKey: UpdatedPascalCaseValue + "updated key with spaces": updated value with spaces + "updated Upper With Spaces": Updated Upper With Spaces + +default_listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + - protocol: http + load_balancer_port: 8080 + instance_port: 8080 + instance_protocol: http +default_listener_tuples: + - [80, 80, "HTTP", "HTTP"] + - [8080, 8080, "HTTP", "HTTP"] + +purged_listeners: + - protocol: http + load_balancer_port: 8080 + instance_port: 8080 + instance_protocol: http +purged_listener_tuples: + - [8080, 8080, "HTTP", "HTTP"] + +updated_listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 8181 + - protocol: http + load_balancer_port: 8080 + instance_port: 8080 + instance_protocol: http +updated_listener_tuples: + - [80, 8181, "HTTP", "HTTP"] + - [8080, 8080, "HTTP", "HTTP"] + +unproxied_listener: + - protocol: http + load_balancer_port: 80 + instance_port: 8181 + proxy_protocol: False +unproxied_listener_tuples: + - [80, 8181, "HTTP", "HTTP"] + +proxied_listener: + - protocol: http + load_balancer_port: 80 + instance_port: 8181 + proxy_protocol: True +proxied_listener_tuples: + - [80, 8181, "HTTP", "HTTP"] + +ssh_listeners: + - protocol: tcp + load_balancer_port: 22 + instance_port: 22 + instance_protocol: tcp +ssh_listener_tuples: + - [22, 22, "TCP", "TCP"] + +default_health_check: + ping_protocol: http + ping_port: 80 + ping_path: "/index.html" + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 10 +default_health_check_target: "HTTP:80/index.html" + +updated_health_check: + ping_protocol: http + ping_port: 8181 + ping_path: "/healthz" + response_timeout: 15 + interval: 42 + unhealthy_threshold: 7 + healthy_threshold: 6 +updated_health_check_target: "HTTP:8181/healthz" + +nonhttp_health_check: + ping_protocol: tcp + ping_port: 8282 + response_timeout: 16 + interval: 43 + unhealthy_threshold: 8 + healthy_threshold: 2 +nonhttp_health_check_target: "TCP:8282" + +ssh_health_check: + ping_protocol: tcp + ping_port: 22 + response_timeout: 5 + interval: 10 + unhealthy_threshold: 2 + healthy_threshold: 2 +ssh_health_check_target: "TCP:22" + +default_idle_timeout: 25 +updated_idle_timeout: 50 +default_drain_timeout: 15 +updated_drain_timeout: 25 + +app_stickiness: + type: application + cookie: MyCookie + enabled: true + +updated_app_stickiness: + type: application + cookie: AnotherCookie + +lb_stickiness: + type: loadbalancer + +updated_lb_stickiness: + type: loadbalancer + expiration: 600 + +# Amazon's SDKs don't provide the list of account ID's. Amazon only provide a +# web page. If you want to run the tests outside the US regions you'll need to +# update this. +# https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html +access_log_account_id_map: + us-east-1: '127311923021' + us-east-2: '033677994240' + us-west-1: '027434742980' + us-west-2: '797873946194' + us-gov-west-1: '048591011584' + us-gov-east-1: '190560391635' + +access_log_account_id: '{{ access_log_account_id_map[aws_region] }}' + +s3_logging_bucket_a: 'ansible-test-{{ tiny_prefix }}-a' +s3_logging_bucket_b: 'ansible-test-{{ tiny_prefix }}-b' +default_logging_prefix: 'logs' +updated_logging_prefix: 'mylogs' +default_logging_interval: 5 +updated_logging_interval: 60 + +local_certs: + - priv_key: "{{ remote_tmp_dir }}/private-1.pem" + cert: "{{ remote_tmp_dir }}/public-1.pem" + csr: "{{ remote_tmp_dir }}/csr-1.csr" + domain: "elb-classic.{{ tiny_prefix }}.ansible.test" + name: "{{ resource_prefix }}_{{ resource_prefix }}_1" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/meta/main.yml new file mode 100644 index 000000000..fd89b0e4f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - setup_ec2_facts + - setup_remote_tmp_dir diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml new file mode 100644 index 000000000..28207ba69 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml @@ -0,0 +1,292 @@ +--- +- block: + # For creation test some basic behaviour + - module_defaults: + elb_classic_lb: + # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ default_listeners }}' + wait: true + scheme: 'internal' + subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + block: + # ============================================================ + # create test elb with listeners, certificate, and health check + + - name: Create internal ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + check_mode: true + + - assert: + that: + - result is changed + - result.elb.status == "created" + + - name: Create ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is changed + - result.elb.status == "created" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - subnet_a in result.elb.subnets + - subnet_b in result.elb.subnets + + - name: Create internal ELB idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + check_mode: true + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + + - name: Create internal ELB idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - subnet_a in result.elb.subnets + - subnet_b in result.elb.subnets + + - ec2_eni_info: + filters: + description: 'ELB {{ elb_name }}' + register: info + + - assert: + that: + - info.network_interfaces | length > 0 + + - elb_classic_lb_info: + names: ['{{ elb_name }}'] + register: info + + - assert: + that: + - info.elbs | length > 0 + + # ============================================================ + # Now we're outside of the creation we drop the defaults + # ============================================================ + + - name: Add a subnet - no purge (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + subnets: ['{{ subnet_c }}'] + register: result + check_mode: true + + - assert: + that: + - result is changed + - result.elb.status == "exists" + + - name: Add a subnet - no purge + elb_classic_lb: + name: "{{ elb_name }}" + state: present + subnets: ['{{ subnet_c }}'] + register: result + + - assert: + that: + - result is changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - availability_zone_c in result.elb.zones + - subnet_a in result.elb.subnets + - subnet_b in result.elb.subnets + - subnet_c in result.elb.subnets + + - name: Add a subnet - no purge - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + subnets: ['{{ subnet_c }}'] + register: result + check_mode: true + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + + - name: Add a subnet - no purge - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + subnets: ['{{ subnet_c }}'] + register: result + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - availability_zone_c in result.elb.zones + - subnet_a in result.elb.subnets + - subnet_b in result.elb.subnets + - subnet_c in result.elb.subnets + + # While purging try adding a subnet from the same AZ as one we're purging. + # This is important because you can't add 2 AZs to an LB from the same AZ at + # the same time. + - name: Add a subnet - purge (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + subnets: ['{{ subnet_c }}', '{{ subnet_a2 }}'] + purge_subnets: true + register: result + check_mode: true + + - assert: + that: + - result is changed + - result.elb.status == "exists" + + - name: Add a subnet - purge + elb_classic_lb: + name: "{{ elb_name }}" + state: present + subnets: ['{{ subnet_c }}', '{{ subnet_a2 }}'] + purge_subnets: true + register: result + + - assert: + that: + - result is changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b not in result.elb.zones + - availability_zone_c in result.elb.zones + - subnet_a not in result.elb.subnets + - subnet_b not in result.elb.subnets + - subnet_c in result.elb.subnets + - subnet_a2 in result.elb.subnets + + - name: Add a subnet - purge - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + subnets: ['{{ subnet_c }}', '{{ subnet_a2 }}'] + purge_subnets: true + register: result + check_mode: true + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + + - name: Add a subnet - purge - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + subnets: ['{{ subnet_c }}', '{{ subnet_a2 }}'] + purge_subnets: true + register: result + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b not in result.elb.zones + - availability_zone_c in result.elb.zones + - subnet_a not in result.elb.subnets + - subnet_b not in result.elb.subnets + - subnet_c in result.elb.subnets + - subnet_a2 in result.elb.subnets + + # ============================================================ + + - name: remove the test load balancer completely (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + check_mode: true + + - name: assert the load balancer would be removed + assert: + that: + - result is changed + - 'result.elb.name == "{{ elb_name }}"' + - 'result.elb.status == "deleted"' + + - name: remove the test load balancer completely + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + + - name: assert the load balancer was removed + assert: + that: + - result is changed + - 'result.elb.name == "{{ elb_name }}"' + - 'result.elb.status == "deleted"' + + - name: remove the test load balancer completely (idempotency) (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + check_mode: true + + - name: assert the load balancer is gone + assert: + that: + - result is not changed + - 'result.elb.name == "{{ elb_name }}"' + - 'result.elb.status == "gone"' + + - name: remove the test load balancer completely (idempotency) + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + + - name: assert the load balancer is gone + assert: + that: + - result is not changed + - 'result.elb.name == "{{ elb_name }}"' + - 'result.elb.status == "gone"' + + always: + + # ============================================================ + - name: remove the test load balancer + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_public.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_public.yml new file mode 100644 index 000000000..d76f62be8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_public.yml @@ -0,0 +1,273 @@ +--- +- block: + # For creation test some basic behaviour + - module_defaults: + elb_classic_lb: + zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ default_listeners }}' + wait: true + scheme: 'internet-facing' + # subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + block: + # ============================================================ + # create test elb with listeners, certificate, and health check + + - name: Create public ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + check_mode: true + + - assert: + that: + - result is changed + - result.elb.status == "created" + + - name: Create public ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is changed + - result.elb.status == "created" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + + - name: Create public ELB idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + check_mode: true + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + + - name: Create public ELB idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + + - ec2_eni_info: + filters: + description: 'ELB {{ elb_name }}' + register: info + + - assert: + that: + - info.network_interfaces | length > 0 + + - elb_classic_lb_info: + names: ['{{ elb_name }}'] + register: info + + - assert: + that: + - info.elbs | length > 0 + + # ============================================================ + # Now we're outside of the creation we drop the defaults + # ============================================================ + + - name: Add a zone - no purge (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + zones: ['{{ availability_zone_c }}'] + register: result + check_mode: true + + - assert: + that: + - result is changed + - result.elb.status == "exists" + + - name: Add a zone - no purge + elb_classic_lb: + name: "{{ elb_name }}" + state: present + zones: ['{{ availability_zone_c }}'] + register: result + + - assert: + that: + - result is changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - availability_zone_c in result.elb.zones + + - name: Add a zone - no purge - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + zones: ['{{ availability_zone_c }}'] + register: result + check_mode: true + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + + - name: Add a zone - no purge - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + zones: ['{{ availability_zone_c }}'] + register: result + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - availability_zone_c in result.elb.zones + + # ============================================================ + + - name: Remove a zone - purge (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + zones: ['{{ availability_zone_c }}'] + purge_zones: true + register: result + check_mode: true + + - assert: + that: + - result is changed + - result.elb.status == "exists" + + - name: Remove a zone - purge + elb_classic_lb: + name: "{{ elb_name }}" + state: present + zones: ['{{ availability_zone_c }}'] + purge_zones: true + register: result + + - assert: + that: + - result is changed + - result.elb.status == "exists" + - availability_zone_a not in result.elb.zones + - availability_zone_b not in result.elb.zones + - availability_zone_c in result.elb.zones + + - name: Remove a zone - purge - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + zones: ['{{ availability_zone_c }}'] + purge_zones: true + register: result + check_mode: true + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + + - name: Remove a zone - purge - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + zones: ['{{ availability_zone_c }}'] + purge_zones: true + register: result + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + - availability_zone_a not in result.elb.zones + - availability_zone_b not in result.elb.zones + - availability_zone_c in result.elb.zones + + # ============================================================ + + - name: remove the test load balancer completely (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + check_mode: true + + - name: assert the load balancer would be removed + assert: + that: + - result is changed + - 'result.elb.name == "{{ elb_name }}"' + - 'result.elb.status == "deleted"' + + - name: remove the test load balancer completely + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + + - name: assert the load balancer was removed + assert: + that: + - result is changed + - 'result.elb.name == "{{ elb_name }}"' + - 'result.elb.status == "deleted"' + + - name: remove the test load balancer completely (idempotency) (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + check_mode: true + + - name: assert the load balancer is gone + assert: + that: + - result is not changed + - 'result.elb.name == "{{ elb_name }}"' + - 'result.elb.status == "gone"' + + - name: remove the test load balancer completely (idempotency) + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + + - name: assert the load balancer is gone + assert: + that: + - result is not changed + - 'result.elb.name == "{{ elb_name }}"' + - 'result.elb.status == "gone"' + + always: + + # ============================================================ + - name: remove the test load balancer + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml new file mode 100644 index 000000000..92f253959 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml @@ -0,0 +1,9 @@ +--- +- name: Delete instance + ec2_instance: + instance_ids: + - '{{ instance_a }}' + - '{{ instance_b }}' + state: absent + wait: true + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml new file mode 100644 index 000000000..955f3da62 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml @@ -0,0 +1,32 @@ +--- +- name: Create empty temporary directory + tempfile: + state: directory + register: tmpdir + ignore_errors: true + +- name: Empty S3 buckets before deletion + s3_sync: + bucket: '{{ item }}' + delete: true + file_root: '{{ tmpdir.path }}' + ignore_errors: true + loop: + - '{{ s3_logging_bucket_a }}' + - '{{ s3_logging_bucket_b }}' + +- name: Delete S3 bucket for access logs + s3_bucket: + name: '{{ item }}' + state: absent + register: logging_bucket + ignore_errors: true + loop: + - '{{ s3_logging_bucket_a }}' + - '{{ s3_logging_bucket_b }}' + +- name: Remove temporary directory + file: + state: absent + path: "{{ tmpdir.path }}" + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml new file mode 100644 index 000000000..fd7ee965f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml @@ -0,0 +1,29 @@ +--- +- name: delete security groups + ec2_group: + name: '{{ item }}' + state: absent + ignore_errors: true + loop: + - '{{ resource_prefix }}-a' + - '{{ resource_prefix }}-b' + - '{{ resource_prefix }}-c' + +- name: delete subnets + ec2_vpc_subnet: + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: '{{ item }}' + state: absent + ignore_errors: true + loop: + - '{{ subnet_cidr_1 }}' + - '{{ subnet_cidr_2 }}' + - '{{ subnet_cidr_3 }}' + - '{{ subnet_cidr_4 }}' + +- name: delete VPC + ec2_vpc_net: + cidr_block: '{{ vpc_cidr }}' + state: absent + name: '{{ resource_prefix }}' + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/complex_changes.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/complex_changes.yml new file mode 100644 index 000000000..5f75f84d3 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/complex_changes.yml @@ -0,0 +1,330 @@ +--- +- block: + - name: Create ELB for testing complex updates (CHECK) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ default_listeners }}' + health_check: '{{ default_health_check }}' + wait: true + scheme: 'internal' + subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-b'] + tags: '{{ default_tags }}' + cross_az_load_balancing: True + idle_timeout: '{{ default_idle_timeout }}' + connection_draining_timeout: '{{ default_drain_timeout }}' + access_logs: + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + enabled: true + register: result + check_mode: True + + - name: Verify that we expect to change + assert: + that: + - result is changed + + - name: Create ELB for testing complex updates + elb_classic_lb: + name: "{{ elb_name }}" + state: present + # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ default_listeners }}' + health_check: '{{ default_health_check }}' + wait: true + scheme: 'internal' + subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-b'] + tags: '{{ default_tags }}' + cross_az_load_balancing: True + idle_timeout: '{{ default_idle_timeout }}' + connection_draining_timeout: '{{ default_drain_timeout }}' + access_logs: + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + enabled: true + register: result + + - name: Verify that simple parameters were set + assert: + that: + - result is changed + - result.elb.status == "created" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - subnet_a in result.elb.subnets + - subnet_b in result.elb.subnets + - default_listener_tuples[0] in result.elb.listeners + - default_listener_tuples[1] in result.elb.listeners + - sg_a in result.elb.security_group_ids + - sg_b in result.elb.security_group_ids + - sg_c not in result.elb.security_group_ids + - result.elb.health_check.healthy_threshold == default_health_check['healthy_threshold'] + - result.elb.health_check.interval == default_health_check['interval'] + - result.elb.health_check.target == default_health_check_target + - result.elb.health_check.timeout == default_health_check['response_timeout'] + - result.elb.health_check.unhealthy_threshold == default_health_check['unhealthy_threshold'] + - result.elb.tags == default_tags + - result.elb.cross_az_load_balancing == 'yes' + - result.elb.idle_timeout == default_idle_timeout + - result.elb.connection_draining_timeout == default_drain_timeout + - result.elb.proxy_policy == None + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + + - name: Create ELB for testing complex updates - idempotency (CHECK) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ default_listeners }}' + health_check: '{{ default_health_check }}' + wait: true + scheme: 'internal' + subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-b'] + tags: '{{ default_tags }}' + cross_az_load_balancing: True + idle_timeout: '{{ default_idle_timeout }}' + connection_draining_timeout: '{{ default_drain_timeout }}' + access_logs: + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + enabled: true + register: result + check_mode: True + + - name: Verify that we expect to not change + assert: + that: + - result is not changed + + - name: Create ELB for testing complex updates - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ default_listeners }}' + health_check: '{{ default_health_check }}' + wait: true + scheme: 'internal' + subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-b'] + tags: '{{ default_tags }}' + cross_az_load_balancing: True + idle_timeout: '{{ default_idle_timeout }}' + connection_draining_timeout: '{{ default_drain_timeout }}' + access_logs: + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + enabled: true + register: result + + - name: Verify that simple parameters were set + assert: + that: + - result is not changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - subnet_a in result.elb.subnets + - subnet_b in result.elb.subnets + - default_listener_tuples[0] in result.elb.listeners + - default_listener_tuples[1] in result.elb.listeners + - sg_a in result.elb.security_group_ids + - sg_b in result.elb.security_group_ids + - sg_c not in result.elb.security_group_ids + - result.elb.health_check.healthy_threshold == default_health_check['healthy_threshold'] + - result.elb.health_check.interval == default_health_check['interval'] + - result.elb.health_check.target == default_health_check_target + - result.elb.health_check.timeout == default_health_check['response_timeout'] + - result.elb.health_check.unhealthy_threshold == default_health_check['unhealthy_threshold'] + - result.elb.tags == default_tags + - result.elb.cross_az_load_balancing == 'yes' + - result.elb.idle_timeout == default_idle_timeout + - result.elb.connection_draining_timeout == default_drain_timeout + - result.elb.proxy_policy == None + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + + ### + + - name: Perform complex update (CHECK) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ updated_listeners }}' + health_check: '{{ updated_health_check }}' + wait: true + scheme: 'internal' + subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + security_group_names: ['{{ resource_prefix }}-c', '{{ resource_prefix }}-b'] + tags: '{{ updated_tags }}' + cross_az_load_balancing: False + idle_timeout: '{{ updated_idle_timeout }}' + connection_draining_timeout: '{{ default_drain_timeout }}' + access_logs: + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + enabled: true + register: result + check_mode: True + + - name: Verify that we expect to change + assert: + that: + - result is changed + + - name: Perform complex update + elb_classic_lb: + name: "{{ elb_name }}" + state: present + # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ updated_listeners }}' + health_check: '{{ updated_health_check }}' + wait: true + scheme: 'internal' + subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + security_group_names: ['{{ resource_prefix }}-c', '{{ resource_prefix }}-b'] + tags: '{{ updated_tags }}' + cross_az_load_balancing: False + idle_timeout: '{{ updated_idle_timeout }}' + connection_draining_timeout: '{{ default_drain_timeout }}' + access_logs: + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + enabled: true + register: result + + - name: Verify that simple parameters were set + assert: + that: + - result is changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - subnet_a in result.elb.subnets + - subnet_b in result.elb.subnets + - updated_listener_tuples[0] in result.elb.listeners + - updated_listener_tuples[1] in result.elb.listeners + - sg_a not in result.elb.security_group_ids + - sg_b in result.elb.security_group_ids + - sg_c in result.elb.security_group_ids + - result.elb.health_check.healthy_threshold == updated_health_check['healthy_threshold'] + - result.elb.health_check.interval == updated_health_check['interval'] + - result.elb.health_check.target == updated_health_check_target + - result.elb.health_check.timeout == updated_health_check['response_timeout'] + - result.elb.health_check.unhealthy_threshold == updated_health_check['unhealthy_threshold'] + - result.elb.tags == updated_tags + - result.elb.cross_az_load_balancing == 'no' + - result.elb.idle_timeout == updated_idle_timeout + - result.elb.connection_draining_timeout == default_drain_timeout + - result.elb.proxy_policy == None + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + + - name: Perform complex update idempotency (CHECK) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ updated_listeners }}' + health_check: '{{ updated_health_check }}' + wait: true + scheme: 'internal' + subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + security_group_names: ['{{ resource_prefix }}-c', '{{ resource_prefix }}-b'] + tags: '{{ updated_tags }}' + cross_az_load_balancing: False + idle_timeout: '{{ updated_idle_timeout }}' + connection_draining_timeout: '{{ default_drain_timeout }}' + access_logs: + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + enabled: true + register: result + check_mode: True + + - name: Verify we expect to not change + assert: + that: + - result is not changed + + - name: Perform complex update - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ updated_listeners }}' + health_check: '{{ updated_health_check }}' + wait: true + scheme: 'internal' + subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + security_group_names: ['{{ resource_prefix }}-c', '{{ resource_prefix }}-b'] + tags: '{{ updated_tags }}' + cross_az_load_balancing: False + idle_timeout: '{{ updated_idle_timeout }}' + connection_draining_timeout: '{{ default_drain_timeout }}' + access_logs: + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + enabled: true + register: result + + - name: Verify that simple parameters were set + assert: + that: + - result is not changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - subnet_a in result.elb.subnets + - subnet_b in result.elb.subnets + - updated_listener_tuples[0] in result.elb.listeners + - updated_listener_tuples[1] in result.elb.listeners + - sg_a not in result.elb.security_group_ids + - sg_b in result.elb.security_group_ids + - sg_c in result.elb.security_group_ids + - result.elb.health_check.healthy_threshold == updated_health_check['healthy_threshold'] + - result.elb.health_check.interval == updated_health_check['interval'] + - result.elb.health_check.target == updated_health_check_target + - result.elb.health_check.timeout == updated_health_check['response_timeout'] + - result.elb.health_check.unhealthy_threshold == updated_health_check['unhealthy_threshold'] + - result.elb.tags == updated_tags + - result.elb.cross_az_load_balancing == 'no' + - result.elb.idle_timeout == updated_idle_timeout + - result.elb.connection_draining_timeout == default_drain_timeout + - result.elb.proxy_policy == None + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + + always: + + # ============================================================ + - name: remove the test load balancer + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/describe_region.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/describe_region.yml new file mode 100644 index 000000000..50679a8c1 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/describe_region.yml @@ -0,0 +1,10 @@ +--- +- name: list available AZs + aws_az_info: + register: region_azs + +- name: pick AZs for testing + set_fact: + availability_zone_a: "{{ region_azs.availability_zones[0].zone_name }}" + availability_zone_b: "{{ region_azs.availability_zones[1].zone_name }}" + availability_zone_c: "{{ region_azs.availability_zones[2].zone_name }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/https_listeners.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/https_listeners.yml new file mode 100644 index 000000000..1b29347f4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/https_listeners.yml @@ -0,0 +1,132 @@ +# Create a SSL Certificate to use in test + +- name: Generate private key for local certs + with_items: '{{ local_certs }}' + community.crypto.openssl_privatekey: + path: '{{ item.priv_key }}' + type: RSA + size: 2048 + +- name: Generate an OpenSSL Certificate Signing Request for own certs + with_items: '{{ local_certs }}' + community.crypto.openssl_csr: + path: '{{ item.csr }}' + privatekey_path: '{{ item.priv_key }}' + common_name: '{{ item.domain }}' + +- name: Generate a Self Signed OpenSSL certificate for own certs + with_items: '{{ local_certs }}' + community.crypto.x509_certificate: + provider: selfsigned + path: '{{ item.cert }}' + csr_path: '{{ item.csr }}' + privatekey_path: '{{ item.priv_key }}' + selfsigned_digest: sha256 + register: cert_create_result + +- name: upload certificates first time + aws_acm: + name_tag: '{{ item.name }}' + certificate: '{{ lookup(''file'', item.cert ) }}' + private_key: '{{ lookup(''file'', item.priv_key ) }}' + state: present + tags: + Application: search + Environment: development + purge_tags: false + register: upload + with_items: '{{ local_certs }}' + until: upload is succeeded + retries: 5 + delay: 10 + +- set_fact: + cert_arn: '{{ upload.results[0].certificate.arn }}' + +# Create ELB definition + +- name: Create elb definition + set_fact: + elb_definition: + connection_draining_timeout: 5 + listeners: + - instance_port: 8080 + instance_protocol: http + load_balancer_port: 443 + protocol: https + ssl_certificate_id: "{{ cert_arn }}" + zones: ['{{ availability_zone_a }}'] + name: "{{ tiny_prefix }}-integration-test-lb" + region: "{{ aws_region }}" + state: present + tags: + TestId: "{{ tiny_prefix }}" + +# Test creating ELB + +- name: Create a classic ELB with https method listeners - check_mode + amazon.aws.elb_classic_lb: "{{ elb_definition }}" + register: elb_create_result + check_mode: true +- assert: + that: + - elb_create_result is changed + - elb_create_result.elb.status == "created" + - elb_create_result.load_balancer | length == 0 + - "'elasticloadbalancing:CreateLoadBalancer' not in {{ elb_create_result.resource_actions }}" + +- name: Create a classic ELB with https method listeners + amazon.aws.elb_classic_lb: "{{ elb_definition }}" + register: elb_create_result +- assert: + that: + - elb_create_result is changed + - elb_create_result.elb.status == "created" + - elb_create_result.load_balancer | length != 0 + - "'elasticloadbalancing:CreateLoadBalancer' in {{ elb_create_result.resource_actions }}" + +- name: Create a classic ELB with https method listeners - idempotency - check_mode + amazon.aws.elb_classic_lb: "{{ elb_definition }}" + register: elb_create_result + check_mode: true +- assert: + that: + - elb_create_result is not changed + - elb_create_result.elb.status != "created" + - elb_create_result.elb.status == "exists" + - elb_create_result.load_balancer | length != 0 + - "'elasticloadbalancing:CreateLoadBalancer' not in {{ elb_create_result.resource_actions }}" + +- name: Create a classic ELB with https method listeners - idempotency + amazon.aws.elb_classic_lb: "{{ elb_definition }}" + register: elb_create_result +- assert: + that: + - elb_create_result is not changed + - elb_create_result.elb.status != "created" + - elb_create_result.elb.status == "exists" + - elb_create_result.load_balancer | length != 0 + - "'elasticloadbalancing:CreateLoadBalancer' not in {{ elb_create_result.resource_actions }}" + +# Remove ELB and certificate created during this test + +- name: Delete the ELB created during the test + amazon.aws.elb_classic_lb: + name: "{{ tiny_prefix }}-integration-test-lb" + state: absent + +- name: Delete the certificate created in this test + community.aws.aws_acm: + certificate_arn: '{{ cert_arn }}' + state: absent + # AWS doesn't always cleanup the associations properly + # https://repost.aws/questions/QU63csgGNEQl2M--xCdy-oxw/cant-delete-certificate-because-there-are-dangling-load-balancer-resources + ignore_errors: True + register: delete_result +- assert: + that: + - delete_result is changed + - delete_result is not failed + # AWS doesn't always cleanup the associations properly + # https://repost.aws/questions/QU63csgGNEQl2M--xCdy-oxw/cant-delete-certificate-because-there-are-dangling-load-balancer-resources + ignore_errors: True diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/main.yml new file mode 100644 index 000000000..e8acba10e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/main.yml @@ -0,0 +1,58 @@ +--- +# __Test Info__ +# Create a self signed cert and upload it to AWS +# http://www.akadia.com/services/ssh_test_certificate.html +# http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/ssl-server-cert.html + +# __Test Outline__ +# +# __elb_classic_lb__ +# create test elb with listeners and certificate +# change AZ's +# change listeners +# remove listeners +# remove elb + +- module_defaults: + group/aws: + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + collections: + - amazon.aws + - community.aws + - community.crypto + block: + + - include_tasks: missing_params.yml + + - include_tasks: describe_region.yml + - include_tasks: setup_vpc.yml + - include_tasks: setup_instances.yml + - include_tasks: setup_s3.yml + + - include_tasks: basic_public.yml + - include_tasks: basic_internal.yml + - include_tasks: schema_change.yml + + - include_tasks: https_listeners.yml + + - include_tasks: simple_changes.yml + - include_tasks: complex_changes.yml + + always: + + # ============================================================ + # ELB should already be gone, but double-check + - name: remove the test load balancer + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + ignore_errors: true + + - include_tasks: cleanup_s3.yml + - include_tasks: cleanup_instances.yml + - include_tasks: cleanup_vpc.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/missing_params.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/missing_params.yml new file mode 100644 index 000000000..74779e32c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/missing_params.yml @@ -0,0 +1,203 @@ +--- +# Test behaviour when mandatory params aren't passed +- block: + # ============================================================ + + - name: test with no name + elb_classic_lb: + state: present + register: result + ignore_errors: true + + - name: assert failure when called with no parameters + assert: + that: + - 'result.failed' + - '"missing required arguments" in result.msg' + - '"name" in result.msg' + + - name: test with only name (state missing) + elb_classic_lb: + name: "{{ elb_name }}" + register: result + ignore_errors: true + + - name: assert failure when called with only name + assert: + that: + - 'result.failed' + - '"missing required arguments" in result.msg' + - '"state" in result.msg' + + - elb_classic_lb: + name: "{{ elb_name }}" + state: present + scheme: 'internal' + listeners: + - load_balancer_port: 80 + instance_port: 80 + protocol: http + register: result + ignore_errors: true + + - name: assert failure when neither subnets nor AZs are provided on creation + assert: + that: + - 'result.failed' + - '"subnets" in result.msg' + - '"zones" in result.msg' + + - elb_classic_lb: + name: "{{ elb_name }}" + state: present + scheme: 'internal' + subnets: ['subnet-123456789'] + register: result + ignore_errors: true + + - name: assert failure when listeners not provided on creation + assert: + that: + - 'result.failed' + - '"listeners" in result.msg' + + - elb_classic_lb: + name: "{{ elb_name }}" + state: present + scheme: 'internal' + subnets: ['subnet-123456789'] + listeners: + - load_balancer_port: 80 + instance_port: 80 + protocol: junk + register: result + ignore_errors: true + + - name: assert failure when listeners contains invalid protocol + assert: + that: + - 'result.failed' + - '"protocol" in result.msg' + - '"junk" in result.msg' + + - elb_classic_lb: + name: "{{ elb_name }}" + state: present + scheme: 'internal' + subnets: ['subnet-123456789'] + listeners: + - load_balancer_port: 80 + instance_port: 80 + protocol: http + instance_protocol: junk + register: result + ignore_errors: true + + - name: assert failure when listeners contains invalid instance_protocol + assert: + that: + - 'result.failed' + - '"protocol" in result.msg' + - '"junk" in result.msg' + + - elb_classic_lb: + name: "{{ elb_name }}" + state: present + scheme: 'internal' + subnets: ['subnet-123456789'] + listeners: + - load_balancer_port: 80 + instance_port: 80 + protocol: http + health_check: + ping_protocol: junk + ping_port: 80 + interval: 5 + timeout: 5 + unhealthy_threshold: 5 + healthy_threshold: 5 + register: result + ignore_errors: true + + - name: assert failure when healthcheck ping_protocol is invalid + assert: + that: + - 'result.failed' + - '"protocol" in result.msg' + - '"junk" in result.msg' + + - elb_classic_lb: + name: "{{ elb_name }}" + state: present + scheme: 'internal' + subnets: ['subnet-123456789'] + listeners: + - load_balancer_port: 80 + instance_port: 80 + protocol: http + health_check: + ping_protocol: http + ping_port: 80 + interval: 5 + timeout: 5 + unhealthy_threshold: 5 + healthy_threshold: 5 + register: result + ignore_errors: true + + - name: assert failure when HTTP healthcheck missing a ping_path + assert: + that: + - 'result.failed' + - '"ping_path" in result.msg' + + - elb_classic_lb: + name: "{{ elb_name }}" + state: present + scheme: 'internal' + subnets: ['subnet-123456789'] + listeners: + - load_balancer_port: 80 + instance_port: 80 + protocol: http + stickiness: + type: application + register: result + ignore_errors: true + + - name: assert failure when app stickiness policy missing cookie name + assert: + that: + - 'result.failed' + - '"cookie" in result.msg' + + - elb_classic_lb: + name: "{{ elb_name }}" + state: present + scheme: 'internal' + subnets: ['subnet-123456789'] + listeners: + - load_balancer_port: 80 + instance_port: 80 + protocol: http + access_logs: + interval: 60 + register: result + ignore_errors: true + + - name: assert failure when access log is missing a bucket + assert: + that: + - 'result.failed' + - '"s3_location" in result.msg' + + always: + + # ============================================================ + - name: remove the test load balancer + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/schema_change.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/schema_change.yml new file mode 100644 index 000000000..cc667bef2 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/schema_change.yml @@ -0,0 +1,189 @@ +--- +- block: + # For creation test some basic behaviour + - module_defaults: + elb_classic_lb: + zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ default_listeners }}' + wait: true + scheme: 'internet-facing' + # subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + block: + - name: Create ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is changed + - result.elb.status == 'created' + - result.elb.scheme == 'internet-facing' + + - module_defaults: + elb_classic_lb: + # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ default_listeners }}' + wait: true + scheme: 'internal' + subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + block: + + - name: Change Schema to internal (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + check_mode: true + + - assert: + that: + - result is changed + + - name: Change Schema to internal + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is changed + - result.elb.scheme == 'internal' + + - name: Change Schema to internal idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + check_mode: true + + - assert: + that: + - result is not changed + + - name: Change Schema to internal idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is not changed + - result.elb.scheme == 'internal' + + - name: No schema specified (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + schema: '{{ omit }}' + register: result + check_mode: true + + - assert: + that: + - result is not changed + + - name: No schema specified + elb_classic_lb: + name: "{{ elb_name }}" + state: present + schema: '{{ omit }}' + register: result + + - assert: + that: + - result is not changed + - result.elb.scheme == 'internal' + + # For creation test some basic behaviour + - module_defaults: + elb_classic_lb: + zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ default_listeners }}' + health_check: '{{ default_health_check }}' + wait: true + scheme: 'internet-facing' + # subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + block: + + - name: Change schema to internet-facing (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + check_mode: true + + - assert: + that: + - result is changed + + - name: Change schema to internet-facing + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is changed + - result.elb.scheme == 'internet-facing' + + - name: Change schema to internet-facing idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + check_mode: true + + - assert: + that: + - result is not changed + + - name: Change schema to internet-facing idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is not changed + - result.elb.scheme == 'internet-facing' + + - name: No schema specified (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + schema: '{{ omit }}' + register: result + check_mode: true + + - assert: + that: + - result is not changed + + - name: No schema specified + elb_classic_lb: + name: "{{ elb_name }}" + state: present + schema: '{{ omit }}' + register: result + + - assert: + that: + - result is not changed + - result.elb.scheme == 'internet-facing' + + always: + + # ============================================================ + - name: remove the test load balancer + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml new file mode 100644 index 000000000..712ba351d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml @@ -0,0 +1,25 @@ +--- +- name: Create instance a + ec2_instance: + name: "ansible-test-{{ tiny_prefix }}-elb-a" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ subnet_a }}" + instance_type: t2.micro + wait: false + security_group: "{{ sg_a }}" + register: ec2_instance_a + +- name: Create instance b + ec2_instance: + name: "ansible-test-{{ tiny_prefix }}-elb-b" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ subnet_b }}" + instance_type: t2.micro + wait: false + security_group: "{{ sg_b }}" + register: ec2_instance_b + +- name: store the Instance IDs + set_fact: + instance_a: "{{ ec2_instance_a.instance_ids[0] }}" + instance_b: "{{ ec2_instance_b.instance_ids[0] }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml new file mode 100644 index 000000000..60e9c73cc --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml @@ -0,0 +1,26 @@ +--- +- name: Create S3 bucket for access logs + vars: + s3_logging_bucket: '{{ s3_logging_bucket_a }}' + s3_bucket: + name: '{{ s3_logging_bucket_a }}' + state: present + policy: "{{ lookup('template','s3_policy.j2') }}" + register: logging_bucket + +- assert: + that: + - logging_bucket is changed + +- name: Create S3 bucket for access logs + vars: + s3_logging_bucket: '{{ s3_logging_bucket_b }}' + s3_bucket: + name: '{{ s3_logging_bucket_b }}' + state: present + policy: "{{ lookup('template','s3_policy.j2') }}" + register: logging_bucket + +- assert: + that: + - logging_bucket is changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml new file mode 100644 index 000000000..7e35e1d9e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml @@ -0,0 +1,103 @@ +--- +# SETUP: vpc, subnet, security group +- name: create a VPC to work in + ec2_vpc_net: + cidr_block: '{{ vpc_cidr }}' + state: present + name: '{{ resource_prefix }}' + resource_tags: + Name: '{{ resource_prefix }}' + register: setup_vpc + +- name: create a subnet + ec2_vpc_subnet: + az: '{{ availability_zone_a }}' + tags: '{{ resource_prefix }}' + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: '{{ subnet_cidr_1 }}' + state: present + resource_tags: + Name: '{{ resource_prefix }}-a' + register: setup_subnet_1 + +- name: create a subnet + ec2_vpc_subnet: + az: '{{ availability_zone_b }}' + tags: '{{ resource_prefix }}' + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: '{{ subnet_cidr_2 }}' + state: present + resource_tags: + Name: '{{ resource_prefix }}-b' + register: setup_subnet_2 + +- name: create a subnet + ec2_vpc_subnet: + az: '{{ availability_zone_c }}' + tags: '{{ resource_prefix }}' + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: '{{ subnet_cidr_3 }}' + state: present + resource_tags: + Name: '{{ resource_prefix }}-c' + register: setup_subnet_3 + +- name: create a subnet + ec2_vpc_subnet: + az: '{{ availability_zone_a }}' + tags: '{{ resource_prefix }}' + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: '{{ subnet_cidr_4 }}' + state: present + resource_tags: + Name: '{{ resource_prefix }}-a2' + register: setup_subnet_4 + +- name: create a security group + ec2_group: + name: '{{ resource_prefix }}-a' + description: 'created by Ansible integration tests' + state: present + vpc_id: '{{ setup_vpc.vpc.id }}' + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: '{{ vpc_cidr }}' + register: setup_sg_1 + +- name: create a security group + ec2_group: + name: '{{ resource_prefix }}-b' + description: 'created by Ansible integration tests' + state: present + vpc_id: '{{ setup_vpc.vpc.id }}' + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: '{{ vpc_cidr }}' + register: setup_sg_2 + +- name: create a security group + ec2_group: + name: '{{ resource_prefix }}-c' + description: 'created by Ansible integration tests' + state: present + vpc_id: '{{ setup_vpc.vpc.id }}' + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: '{{ vpc_cidr }}' + register: setup_sg_3 + +- name: store the IDs + set_fact: + subnet_a: "{{ setup_subnet_1.subnet.id }}" + subnet_b: "{{ setup_subnet_2.subnet.id }}" + subnet_c: "{{ setup_subnet_3.subnet.id }}" + subnet_a2: "{{ setup_subnet_4.subnet.id }}" + sg_a: "{{ setup_sg_1.group_id }}" + sg_b: "{{ setup_sg_2.group_id }}" + sg_c: "{{ setup_sg_3.group_id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml new file mode 100644 index 000000000..6644cf983 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml @@ -0,0 +1,79 @@ +--- +- block: + ## Setup an ELB for testing changing one thing at a time + - name: Create ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ default_listeners }}' + health_check: '{{ default_health_check }}' + wait: true + scheme: 'internal' + subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + security_group_ids: ['{{ sg_a }}'] + tags: '{{ default_tags }}' + cross_az_load_balancing: True + idle_timeout: '{{ default_idle_timeout }}' + connection_draining_timeout: '{{ default_drain_timeout }}' + access_logs: + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + enabled: true + register: result + + - name: Verify that simple parameters were set + assert: + that: + - result is changed + - result.elb.status == "created" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - subnet_a in result.elb.subnets + - subnet_b in result.elb.subnets + - default_listener_tuples[0] in result.elb.listeners + - default_listener_tuples[1] in result.elb.listeners + - sg_a in result.elb.security_group_ids + - sg_b not in result.elb.security_group_ids + - sg_c not in result.elb.security_group_ids + - result.elb.health_check.healthy_threshold == default_health_check['healthy_threshold'] + - result.elb.health_check.interval == default_health_check['interval'] + - result.elb.health_check.target == default_health_check_target + - result.elb.health_check.timeout == default_health_check['response_timeout'] + - result.elb.health_check.unhealthy_threshold == default_health_check['unhealthy_threshold'] + - result.elb.tags == default_tags + - result.elb.cross_az_load_balancing == 'yes' + - result.elb.idle_timeout == default_idle_timeout + - result.elb.connection_draining_timeout == default_drain_timeout + - result.elb.proxy_policy == None + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + + ## AZ / Subnet changes are tested in wth the public/internal tests + ## because they depend on the scheme of the LB + + - include_tasks: 'simple_securitygroups.yml' + - include_tasks: 'simple_listeners.yml' + - include_tasks: 'simple_healthcheck.yml' + - include_tasks: 'simple_tags.yml' + - include_tasks: 'simple_cross_az.yml' + - include_tasks: 'simple_idle_timeout.yml' + - include_tasks: 'simple_draining_timeout.yml' + - include_tasks: 'simple_proxy_policy.yml' + - include_tasks: 'simple_stickiness.yml' + - include_tasks: 'simple_instances.yml' + - include_tasks: 'simple_logging.yml' + + always: + + # ============================================================ + - name: remove the test load balancer + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml new file mode 100644 index 000000000..104b0afb5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml @@ -0,0 +1,100 @@ +--- +# =========================================================== + +- name: disable cross-az balancing on ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + cross_az_load_balancing: False + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: disable cross-az balancing on ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + cross_az_load_balancing: False + register: result + +- assert: + that: + - result is changed + - result.elb.cross_az_load_balancing == 'no' + +- name: disable cross-az balancing on ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + cross_az_load_balancing: False + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: disable cross-az balancing on ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + cross_az_load_balancing: False + register: result + +- assert: + that: + - result is not changed + - result.elb.cross_az_load_balancing == 'no' + +# =========================================================== + +- name: re-enable cross-az balancing on ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + cross_az_load_balancing: True + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: re-enable cross-az balancing on ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + cross_az_load_balancing: True + register: result + +- assert: + that: + - result is changed + - result.elb.cross_az_load_balancing == 'yes' + +- name: re-enable cross-az balancing on ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + cross_az_load_balancing: True + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: re-enable cross-az balancing on ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + cross_az_load_balancing: True + register: result + +- assert: + that: + - result is not changed + - result.elb.cross_az_load_balancing == 'yes' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml new file mode 100644 index 000000000..825ce2185 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml @@ -0,0 +1,148 @@ +--- +# =========================================================== + +- name: disable connection draining on ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: 0 + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: disable connection draining on ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: 0 + register: result + +- assert: + that: + - result is changed + +- name: disable connection draining on ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: 0 + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: disable connection draining on ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: 0 + register: result + +- assert: + that: + - result is not changed + +# =========================================================== + +- name: re-enable connection draining on ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: '{{ default_drain_timeout }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: re-enable connection draining on ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: '{{ default_drain_timeout }}' + register: result + +- assert: + that: + - result is changed + - result.elb.connection_draining_timeout == default_drain_timeout + +- name: re-enable connection draining on ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: '{{ default_drain_timeout }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: re-enable connection draining on ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: '{{ default_drain_timeout }}' + register: result + +- assert: + that: + - result is not changed + - result.elb.connection_draining_timeout == default_drain_timeout + +# =========================================================== + +- name: update connection draining timout on ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: '{{ updated_drain_timeout }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: update connection draining timout on ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: '{{ updated_drain_timeout }}' + register: result + +- assert: + that: + - result is changed + - result.elb.connection_draining_timeout == updated_drain_timeout + +- name: update connection draining timout on ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: '{{ updated_drain_timeout }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: update connection draining timout on ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: '{{ updated_drain_timeout }}' + register: result + +- assert: + that: + - result is not changed + - result.elb.connection_draining_timeout == updated_drain_timeout diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml new file mode 100644 index 000000000..179e8cb80 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml @@ -0,0 +1,116 @@ +--- +# Note: AWS doesn't support disabling health checks +# ============================================================== +- name: Non-HTTP Healthcheck (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + health_check: '{{ nonhttp_health_check }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Non-HTTP Healthcheck + elb_classic_lb: + name: "{{ elb_name }}" + state: present + health_check: '{{ nonhttp_health_check }}' + register: result + +- assert: + that: + - result is changed + - result.elb.health_check.healthy_threshold == nonhttp_health_check['healthy_threshold'] + - result.elb.health_check.interval == nonhttp_health_check['interval'] + - result.elb.health_check.target == nonhttp_health_check_target + - result.elb.health_check.timeout == nonhttp_health_check['response_timeout'] + - result.elb.health_check.unhealthy_threshold == nonhttp_health_check['unhealthy_threshold'] + +- name: Non-HTTP Healthcheck - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + health_check: '{{ nonhttp_health_check }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Non-HTTP Healthcheck - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + health_check: '{{ nonhttp_health_check }}' + register: result + +- assert: + that: + - result is not changed + - result.elb.health_check.healthy_threshold == nonhttp_health_check['healthy_threshold'] + - result.elb.health_check.interval == nonhttp_health_check['interval'] + - result.elb.health_check.target == nonhttp_health_check_target + - result.elb.health_check.timeout == nonhttp_health_check['response_timeout'] + - result.elb.health_check.unhealthy_threshold == nonhttp_health_check['unhealthy_threshold'] + +# ============================================================== + +- name: Update Healthcheck (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + health_check: '{{ updated_health_check }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Update Healthcheck + elb_classic_lb: + name: "{{ elb_name }}" + state: present + health_check: '{{ updated_health_check }}' + register: result + +- assert: + that: + - result is changed + - result.elb.health_check.healthy_threshold == updated_health_check['healthy_threshold'] + - result.elb.health_check.interval == updated_health_check['interval'] + - result.elb.health_check.target == updated_health_check_target + - result.elb.health_check.timeout == updated_health_check['response_timeout'] + - result.elb.health_check.unhealthy_threshold == updated_health_check['unhealthy_threshold'] + +- name: Update Healthcheck - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + health_check: '{{ updated_health_check }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Update Healthcheck - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + health_check: '{{ updated_health_check }}' + register: result + +- assert: + that: + - result is not changed + - result.elb.health_check.healthy_threshold == updated_health_check['healthy_threshold'] + - result.elb.health_check.interval == updated_health_check['interval'] + - result.elb.health_check.target == updated_health_check_target + - result.elb.health_check.timeout == updated_health_check['response_timeout'] + - result.elb.health_check.unhealthy_threshold == updated_health_check['unhealthy_threshold'] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml new file mode 100644 index 000000000..e89dd25f1 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml @@ -0,0 +1,50 @@ +--- +# =========================================================== + +- name: update idle connection timeout on ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + idle_timeout: "{{ updated_idle_timeout }}" + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: update idle connection timeout on ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + idle_timeout: "{{ updated_idle_timeout }}" + register: result + +- assert: + that: + - result is changed + - result.elb.idle_timeout == updated_idle_timeout + +- name: update idle connection timeout on ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + idle_timeout: "{{ updated_idle_timeout }}" + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: update idle connection timeout on ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + idle_timeout: "{{ updated_idle_timeout }}" + register: result + +- assert: + that: + - result is not changed + - result.elb.idle_timeout == updated_idle_timeout diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml new file mode 100644 index 000000000..8c27bc27f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml @@ -0,0 +1,415 @@ +--- +- name: Add SSH listener and health check to ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ ssh_listeners }}" + health_check: "{{ ssh_health_check }}" + purge_listeners: false + register: result + +- assert: + that: + - result is changed + - ssh_listener_tuples[0] in result.elb.listeners + +# Make sure that the instances are 'OK' + +- name: Wait for instance a + ec2_instance: + name: "ansible-test-{{ tiny_prefix }}-elb-a" + instance_ids: + - "{{ instance_a }}" + vpc_subnet_id: "{{ subnet_a }}" + instance_type: t2.micro + wait: true + security_group: "{{ sg_a }}" + register: ec2_instance_a + +- name: Wait for instance b + ec2_instance: + name: "ansible-test-{{ tiny_prefix }}-elb-b" + instance_ids: + - "{{ instance_b }}" + vpc_subnet_id: "{{ subnet_b }}" + instance_type: t2.micro + wait: true + security_group: "{{ sg_b }}" + register: ec2_instance_b + +- assert: + that: + - ec2_instance_a is successful + - ec2_instance_b is successful + +# ============================================================== + +- name: Add an instance to the LB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + wait: true + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Add an instance to the LB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + wait: true + register: result + +- assert: + that: + - result is changed + - instance_a in result.elb.instances + - instance_b not in result.elb.instances + +- name: Add an instance to the LB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + wait: true + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Add an instance to the LB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + wait: true + register: result + +- assert: + that: + - result is not changed + - instance_a in result.elb.instances + - instance_b not in result.elb.instances + +# ============================================================== + +- name: Add second instance to the LB without purge (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: false + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Add second instance to the LB without purge + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: false + register: result + +- assert: + that: + - result is changed + - instance_a in result.elb.instances + - instance_b in result.elb.instances + +- name: Add second instance to the LB without purge - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Add second instance to the LB without purge - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: false + register: result + +- assert: + that: + - result is not changed + - instance_a in result.elb.instances + - instance_b in result.elb.instances + +# ============================================================== + +- name: Both instances with purge - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + - '{{ instance_b }}' + purge_instance_ids: true + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Both instances with purge - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + - '{{ instance_b }}' + purge_instance_ids: true + register: result + +- assert: + that: + - result is not changed + - instance_a in result.elb.instances + - instance_b in result.elb.instances + +- name: Both instances with purge - different order - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + - '{{ instance_a }}' + purge_instance_ids: true + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Both instances with purge - different order - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + - '{{ instance_a }}' + purge_instance_ids: true + register: result + +- assert: + that: + - result is not changed + - instance_a in result.elb.instances + - instance_b in result.elb.instances + +# ============================================================== + +- name: Remove first instance from LB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: true + wait: true + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Remove first instance from LB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: true + wait: true + register: result + +- assert: + that: + - result is changed + - instance_a not in result.elb.instances + - instance_b in result.elb.instances + +- name: Remove first instance from LB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: true + wait: true + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Remove first instance from LB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: true + wait: true + register: result + +- assert: + that: + - result is not changed + - instance_a not in result.elb.instances + - instance_b in result.elb.instances + +# ============================================================== + +- name: Switch instances in LB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + purge_instance_ids: true + wait: true + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Switch instances in LB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + purge_instance_ids: true + wait: true + register: result + +- assert: + that: + - result is changed + - instance_a in result.elb.instances + - instance_b not in result.elb.instances + +- name: Switch instances in LB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + purge_instance_ids: true + wait: true + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Switch instances in LB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + purge_instance_ids: true + wait: true + register: result + +- assert: + that: + - result is not changed + - instance_a in result.elb.instances + - instance_b not in result.elb.instances + +# ============================================================== + +- name: Switch instances in LB - no wait (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: true + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Switch instances in LB - no wait + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: true + register: result + +- assert: + that: + - result is changed + - instance_a not in result.elb.instances + - instance_b in result.elb.instances + +- name: Switch instances in LB - no wait - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: true + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Switch instances in LB - no wait - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: true + register: result + +- assert: + that: + - result is not changed + - instance_a not in result.elb.instances + - instance_b in result.elb.instances diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml new file mode 100644 index 000000000..8edb96543 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml @@ -0,0 +1,196 @@ +--- +# =========================================================== +# remove a listener (no purge) +# remove a listener (purge) +# add a listener +# update a listener (same port) +# =========================================================== +# Test passing only one of the listeners +# Without purge +- name: Test partial Listener to ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ purged_listeners }}" + purge_listeners: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Test partial Listener to ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ purged_listeners }}" + purge_listeners: false + register: result + +- assert: + that: + - result is not changed + - default_listener_tuples[0] in result.elb.listeners + - default_listener_tuples[1] in result.elb.listeners + +# With purge +- name: Test partial Listener with purge to ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ purged_listeners }}" + purge_listeners: true + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Test partial Listener with purge to ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ purged_listeners }}" + purge_listeners: true + register: result + +- assert: + that: + - result is changed + - purged_listener_tuples[0] in result.elb.listeners + +- name: Test partial Listener with purge to ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ purged_listeners }}" + purge_listeners: true + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Test partial Listener with purge to ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ purged_listeners }}" + purge_listeners: true + register: result + +- assert: + that: + - result is not changed + - purged_listener_tuples[0] in result.elb.listeners + +# =========================================================== +# Test re-adding a listener +- name: Test re-adding listener to ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ default_listeners }}" + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Test re-adding listener to ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ default_listeners }}" + register: result + +- assert: + that: + - result is changed + - default_listener_tuples[0] in result.elb.listeners + - default_listener_tuples[1] in result.elb.listeners + +- name: Test re-adding listener to ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ default_listeners }}" + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Test re-adding listener to ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ default_listeners }}" + register: result + +- assert: + that: + - result is not changed + - default_listener_tuples[0] in result.elb.listeners + - default_listener_tuples[1] in result.elb.listeners + +# =========================================================== +# Test passing an updated listener +- name: Test updated listener to ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ updated_listeners }}" + purge_listeners: false + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Test updated listener to ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ updated_listeners }}" + purge_listeners: false + register: result + +- assert: + that: + - result is changed + - updated_listener_tuples[0] in result.elb.listeners + - updated_listener_tuples[1] in result.elb.listeners + +- name: Test updated listener to ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ updated_listeners }}" + purge_listeners: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Test updated listener to ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ updated_listeners }}" + purge_listeners: false + register: result + +- assert: + that: + - result is not changed + - updated_listener_tuples[0] in result.elb.listeners + - updated_listener_tuples[1] in result.elb.listeners diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml new file mode 100644 index 000000000..5e489eaf0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml @@ -0,0 +1,587 @@ +--- +# =========================================================== + +- name: S3 logging for ELB - implied enabled (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: S3 logging for ELB - implied enabled + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +# =========================================================== + +- name: Disable S3 logging for ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: false + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Disable S3 logging for ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: false + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is changed + - result.load_balancer.load_balancer_attributes.access_log.enabled == False + +- name: Disable S3 logging for ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: false + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Disable S3 logging for ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: false + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.enabled == False + +# =========================================================== + +- name: Disable S3 logging for ELB - ignore extras (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: false + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ updated_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Disable S3 logging for ELB - ignore extras + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: false + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.enabled == False + +- name: Disable S3 logging for ELB - no extras (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Disable S3 logging for ELB - no extras + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: false + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.enabled == False + +# =========================================================== + +- name: Re-enable S3 logging for ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Re-enable S3 logging for ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +- name: Re-enable S3 logging for ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Re-enable S3 logging for ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +# =========================================================== + +- name: Update ELB Log delivery interval for ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Update ELB Log delivery interval for ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +- name: Update ELB Log delivery interval for ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Update ELB Log delivery interval for ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +# =========================================================== + +- name: Update S3 Logging Location for ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Update S3 Logging Location for ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +- name: Update S3 Logging Location for ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Update S3 Logging Location for ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +# =========================================================== + +- name: Update S3 Logging Prefix for ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ updated_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Update S3 Logging Prefix for ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ updated_logging_prefix }}' + register: result + +- assert: + that: + - result is changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == updated_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +- name: Update S3 Logging Prefix for ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ updated_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Update S3 Logging Prefix for ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ updated_logging_prefix }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == updated_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +# =========================================================== + +- name: Empty S3 Logging Prefix for ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Empty S3 Logging Prefix for ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + register: result + +- assert: + that: + - result is changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == '' + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +- name: Empty S3 Logging Prefix for ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Empty S3 Logging Prefix for ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == '' + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +- name: Empty string S3 Logging Prefix for ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_prefix: '' + s3_location: '{{ s3_logging_bucket_b }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Empty stringS3 Logging Prefix for ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_prefix: '' + s3_location: '{{ s3_logging_bucket_b }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == '' + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +# =========================================================== + +- name: Update S3 Logging interval for ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Update S3 Logging interval for ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == 60 + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == '' + - result.load_balancer.load_balancer_attributes.access_log.enabled == True diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml new file mode 100644 index 000000000..50c5ce519 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml @@ -0,0 +1,141 @@ +--- +# =========================================================== +- name: Enable proxy protocol on a listener (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ proxied_listener }}" + purge_listeners: false + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Enable proxy protocol on a listener + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ proxied_listener }}" + purge_listeners: false + register: result + +- assert: + that: + - result is changed + - result.elb.proxy_policy == "ProxyProtocol-policy" + - result.load_balancer.backend_server_descriptions | length == 1 + - result.load_balancer.backend_server_descriptions[0].policy_names == ["ProxyProtocol-policy"] + +- name: Enable proxy protocol on a listener - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ proxied_listener }}" + purge_listeners: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Enable proxy protocol on a listener - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ proxied_listener }}" + purge_listeners: false + register: result + +- assert: + that: + - result is not changed + - result.elb.proxy_policy == "ProxyProtocol-policy" + - result.load_balancer.backend_server_descriptions | length == 1 + - result.load_balancer.backend_server_descriptions[0].policy_names == ["ProxyProtocol-policy"] + +# =========================================================== + +- name: Disable proxy protocol on a listener (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ unproxied_listener }}" + purge_listeners: false + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Disable proxy protocol on a listener + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ unproxied_listener }}" + purge_listeners: false + register: result + +- assert: + that: + - result is changed + - result.load_balancer.backend_server_descriptions | length == 0 + +- name: Disable proxy protocol on a listener - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ unproxied_listener }}" + purge_listeners: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Disable proxy protocol on a listener - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ unproxied_listener }}" + purge_listeners: false + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.backend_server_descriptions | length == 0 + +# =========================================================== + +- name: Re-enable proxy protocol on a listener (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ proxied_listener }}" + purge_listeners: false + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Re-enable proxy protocol on a listener + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ proxied_listener }}" + purge_listeners: false + register: result + +- assert: + that: + - result is changed + - result.elb.proxy_policy == "ProxyProtocol-policy" + - result.load_balancer.backend_server_descriptions | length == 1 + - result.load_balancer.backend_server_descriptions[0].policy_names == ["ProxyProtocol-policy"] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml new file mode 100644 index 000000000..21a56d792 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml @@ -0,0 +1,106 @@ +--- +- name: Assign Security Groups to ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + security_group_ids: ['{{ sg_b }}'] + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Assign Security Groups to ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + security_group_ids: ['{{ sg_b }}'] + register: result + +- assert: + that: + - result is changed + - sg_a not in result.elb.security_group_ids + - sg_b in result.elb.security_group_ids + - sg_c not in result.elb.security_group_ids + +- name: Assign Security Groups to ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + security_group_ids: ['{{ sg_b }}'] + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Assign Security Groups to ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + security_group_ids: ['{{ sg_b }}'] + register: result + +- assert: + that: + - result is not changed + - sg_a not in result.elb.security_group_ids + - sg_b in result.elb.security_group_ids + - sg_c not in result.elb.security_group_ids + +#===================================================================== + +- name: Assign Security Groups to ELB by name (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-c'] + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Assign Security Groups to ELB by name + elb_classic_lb: + name: "{{ elb_name }}" + state: present + security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-c'] + register: result + +- assert: + that: + - result is changed + - sg_a in result.elb.security_group_ids + - sg_b not in result.elb.security_group_ids + - sg_c in result.elb.security_group_ids + +- name: Assign Security Groups to ELB by name - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-c'] + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Assign Security Groups to ELB by name - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-c'] + register: result + +- assert: + that: + - result is not changed + - sg_a in result.elb.security_group_ids + - sg_b not in result.elb.security_group_ids + - sg_c in result.elb.security_group_ids diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml new file mode 100644 index 000000000..9c0f925ec --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml @@ -0,0 +1,390 @@ +--- +# ============================================================== +- name: App Cookie Stickiness (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ app_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: App Cookie Stickiness + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ app_stickiness }}" + register: result + +- assert: + that: + - result is changed + +- name: App Cookie Stickiness - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ app_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: App Cookie Stickiness - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ app_stickiness }}" + register: result + +- assert: + that: + - result is not changed + +# ============================================================== +- name: Update App Cookie Stickiness (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_app_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Update App Cookie Stickiness + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_app_stickiness }}" + register: result + +- assert: + that: + - result is changed + +- name: Update App Cookie Stickiness - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_app_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Update App Cookie Stickiness - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_app_stickiness }}" + register: result + +- assert: + that: + - result is not changed + + +# ============================================================== + +- name: Disable Stickiness (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: + enabled: false + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Disable Stickiness + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: + enabled: false + register: result + +- assert: + that: + - result is changed + +- name: Disable Stickiness - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: + enabled: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Disable Stickiness - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: + enabled: false + register: result + +- assert: + that: + - result is not changed + +# ============================================================== + +- name: Re-enable App Stickiness (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ app_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Re-enable App Stickiness + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ app_stickiness }}" + register: result + +- assert: + that: + - result is changed + +- name: Re-enable App Stickiness (check_mode) - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ app_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Re-enable App Stickiness - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ app_stickiness }}" + register: result + +- assert: + that: + - result is not changed + +# ============================================================== +- name: LB Stickiness (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ lb_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: LB Stickiness + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ lb_stickiness }}" + register: result + +- assert: + that: + - result is changed + +- name: LB Stickiness - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ lb_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: LB Stickiness - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ lb_stickiness }}" + register: result + +- assert: + that: + - result is not changed + +# ============================================================== +- name: Update LB Stickiness (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_lb_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Update LB Stickiness + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_lb_stickiness }}" + register: result + +- assert: + that: + - result is changed + +- name: Update LB Stickiness - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_lb_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Update LB Stickiness - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_lb_stickiness }}" + register: result + +- assert: + that: + - result is not changed + + +# ============================================================== + +- name: Disable Stickiness (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: + enabled: false + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Disable Stickiness + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: + enabled: false + register: result + +- assert: + that: + - result is changed + +- name: Disable Stickiness - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: + enabled: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Disable Stickiness - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: + enabled: false + register: result + +- assert: + that: + - result is not changed + +# ============================================================== + +- name: Re-enable LB Stickiness (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_lb_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Re-enable LB Stickiness + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_lb_stickiness }}" + register: result + +- assert: + that: + - result is changed + +- name: Re-enable LB Stickiness - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_lb_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Re-enable LB Stickiness - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_lb_stickiness }}" + register: result + +- assert: + that: + - result is not changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml new file mode 100644 index 000000000..b78eb1c58 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml @@ -0,0 +1,141 @@ +--- +# =========================================================== +# partial tags (no purge) +# update tags (no purge) +# update tags (with purge) +# =========================================================== +- name: Pass partial tags to ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ partial_tags }}" + purge_tags: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Pass partial tags to ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ partial_tags }}" + purge_tags: false + register: result + +- assert: + that: + - result is not changed + - result.elb.tags == default_tags + +# =========================================================== + +- name: Add tags to ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ updated_tags }}" + purge_tags: false + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Add tags to ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ updated_tags }}" + purge_tags: false + register: result + +- assert: + that: + - result is changed + - result.elb.tags == ( default_tags | combine(updated_tags) ) + +- name: Add tags to ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ updated_tags }}" + purge_tags: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Add tags to ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ updated_tags }}" + purge_tags: false + register: result + +- assert: + that: + - result is not changed + - result.elb.tags == ( default_tags | combine(updated_tags) ) + +# =========================================================== + +- name: Purge tags from ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ updated_tags }}" + purge_tags: true + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Purge tags from ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ updated_tags }}" + purge_tags: true + register: result + +- assert: + that: + - result is changed + - result.elb.tags == updated_tags + +- name: Purge tags from ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ updated_tags }}" + purge_tags: true + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Purge tags from ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ updated_tags }}" + purge_tags: true + register: result + +- assert: + that: + - result is not changed + - result.elb.tags == updated_tags + +# =========================================================== diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/templates/s3_policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/templates/s3_policy.j2 new file mode 100644 index 000000000..ee69dae33 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/templates/s3_policy.j2 @@ -0,0 +1,15 @@ +{ + "Version": "2012-10-17", + "Id": "ELB-Logging-Policy", + "Statement": [ + { + "Sid": "ELB-Logging", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::{{ access_log_account_id }}:root" + }, + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::{{ s3_logging_bucket }}/*" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/vars/main.yml new file mode 100644 index 000000000..79194af1e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for test_ec2_elb_lb diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/aliases b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/aliases new file mode 100644 index 000000000..9b3bde40b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/aliases @@ -0,0 +1,8 @@ +# reason: missing-policy +# It's not possible to control what permissions are granted to a policy. +# This makes securely testing iam_policy very difficult +unsupported + +cloud/aws + +iam_policy_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/defaults/main.yml new file mode 100644 index 000000000..caf40aebd --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/defaults/main.yml @@ -0,0 +1,5 @@ +iam_name: '{{resource_prefix}}' +iam_policy_name_a: '{{resource_prefix}}-document-a' +iam_policy_name_b: '{{resource_prefix}}-document-b' +iam_policy_name_c: '{{resource_prefix}}-json-a' +iam_policy_name_d: '{{resource_prefix}}-json-b' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access.json new file mode 100644 index 000000000..a2f299757 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access.json @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Action": "*", + "Resource": "*" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access_with_id.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access_with_id.json new file mode 100644 index 000000000..9d40dd54a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access_with_id.json @@ -0,0 +1,11 @@ +{ + "Id": "MyId", + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Action": "*", + "Resource": "*" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access_with_second_id.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access_with_second_id.json new file mode 100644 index 000000000..0efbc31d4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access_with_second_id.json @@ -0,0 +1,11 @@ +{ + "Id": "MyOtherId", + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Action": "*", + "Resource": "*" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_trust.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_trust.json new file mode 100644 index 000000000..c36616187 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_trust.json @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Principal": {"AWS": "*"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml new file mode 100644 index 000000000..0894490af --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml @@ -0,0 +1,70 @@ +- name: Run integration tests for IAM (inline) Policy management + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + # ============================================================ + - name: Create user for tests + iam_user: + state: present + name: '{{ iam_name }}' + register: result + - name: Ensure user was created + assert: + that: + - result is changed + + - name: Create role for tests + iam_role: + state: present + name: '{{ iam_name }}' + assume_role_policy_document: "{{ lookup('file','no_trust.json') }}" + register: result + - name: Ensure role was created + assert: + that: + - result is changed + + - name: Create group for tests + iam_group: + state: present + name: '{{ iam_name }}' + register: result + - name: Ensure group was created + assert: + that: + - result is changed + + # ============================================================ + + - name: Run tests for each type of object + include_tasks: object.yml + loop_control: + loop_var: iam_type + with_items: + - user + - group + - role + + # ============================================================ + + always: + # ============================================================ + - name: Remove user + iam_user: + state: absent + name: '{{ iam_name }}' + ignore_errors: yes + - name: Remove role + iam_role: + state: absent + name: '{{ iam_name }}' + ignore_errors: yes + - name: Remove group + iam_group: + state: absent + name: '{{ iam_name }}' + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/object.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/object.yml new file mode 100644 index 000000000..75eb5a167 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/object.yml @@ -0,0 +1,1169 @@ +- name: Run integration tests for IAM (inline) Policy management on {{ iam_type }}s + vars: + iam_object_key: '{{ iam_type }}_name' + block: + # ============================================================ + - name: Fetch policies from {{ iam_type }} before making changes + iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + register: iam_policy_info + - name: Assert empty policy list + assert: + that: + - iam_policy_info is succeeded + - iam_policy_info.policies | length == 0 + - iam_policy_info.all_policy_names | length == 0 + - iam_policy_info.policy_names | length == 0 + + - name: Fetch policies from non-existent {{ iam_type }} + iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}-junk' + register: iam_policy_info + - name: Assert not failed + assert: + that: + - iam_policy_info is succeeded + + # ============================================================ + - name: Invalid creation of policy for {{ iam_type }} - missing required parameters + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + skip_duplicates: yes + register: result + ignore_errors: yes + - name: Assert task failed with correct error message + assert: + that: + - result.failed + - "'state is present but any of the following are missing: policy_json' in result.msg" + + - name: Create policy using document for {{ iam_type }} (check mode) + check_mode: yes + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: yes + register: result + - name: Assert policy would be added for {{ iam_type }} + assert: + that: + - result is changed + + - name: Create policy using document for {{ iam_type }} + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: yes + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + register: iam_policy_info + - name: Assert policy was added for {{ iam_type }} + assert: + that: + - result is changed + - result.policies | length == 1 + - iam_policy_name_a in result.policies + - result[iam_object_key] == iam_name + - iam_policy_name_a in iam_policy_info.policy_names + - iam_policy_info.policy_names | length == 1 + - iam_policy_info.policies | length == 1 + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_info.all_policy_names | length == 1 + - iam_policy_info.policies[0].policy_name == iam_policy_name_a + - '"Id" not in iam_policy_info.policies[0].policy_document' + + - name: Create policy using document for {{ iam_type }} (idempotency - check mode) + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: yes + register: result + check_mode: yes + - name: Assert no change would occur + assert: + that: + - result is not changed + + - name: Create policy using document for {{ iam_type }} (idempotency) + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: yes + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + register: iam_policy_info + - name: Assert no change + assert: + that: + - result is not changed + - result.policies | length == 1 + - iam_policy_name_a in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies | length == 1 + - iam_policy_info.all_policy_names | length == 1 + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_info.policies[0].policy_name == iam_policy_name_a + - '"Id" not in iam_policy_info.policies[0].policy_document' + + # ============================================================ + - name: Create policy using document for {{ iam_type }} (check mode) (skip_duplicates) + check_mode: yes + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: yes + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + register: iam_policy_info + - name: Assert policy would be added for {{ iam_type }} + assert: + that: + - result is not changed + - iam_policy_info.all_policy_names | length == 1 + - '"policies" not in iam_policy_info' + - iam_policy_name_b not in iam_policy_info.all_policy_names + + - name: Create policy using document for {{ iam_type }} (skip_duplicates) + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: yes + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + register: iam_policy_info + - name: Assert policy was not added for {{ iam_type }} (skip_duplicates) + assert: + that: + - result is not changed + - result.policies | length == 1 + - iam_policy_name_b not in result.policies + - result[iam_object_key] == iam_name + - '"policies" not in iam_policy_info' + - '"policy_names" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 1 + - iam_policy_name_b not in iam_policy_info.all_policy_names + + - name: Create policy using document for {{ iam_type }} (check mode) (skip_duplicates + = no) + check_mode: yes + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: no + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + register: iam_policy_info + - name: Assert policy would be added for {{ iam_type }} + assert: + that: + - result.changed == True + - '"policies" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 1 + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b not in iam_policy_info.all_policy_names + + - name: Create policy using document for {{ iam_type }} (skip_duplicates = no) + iam_policy: + state: present + skip_duplicates: no + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + policy_json: '{{ lookup("file", "no_access.json") }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + register: iam_policy_info + - name: Assert policy was added for {{ iam_type }} + assert: + that: + - result is changed + - result.policies | length == 2 + - iam_policy_name_b in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies | length == 1 + - iam_policy_info.all_policy_names | length == 2 + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_info.policies[0].policy_name == iam_policy_name_b + - '"Id" not in iam_policy_info.policies[0].policy_document' + + - name: Create policy using document for {{ iam_type }} (idempotency - check mode) + (skip_duplicates = no) + iam_policy: + state: present + skip_duplicates: no + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + policy_json: '{{ lookup("file", "no_access.json") }}' + register: result + check_mode: yes + - name: Assert no change would occur + assert: + that: + - result is not changed + + - name: Create policy using document for {{ iam_type }} (idempotency) (skip_duplicates + = no) + iam_policy: + state: present + skip_duplicates: no + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + policy_json: '{{ lookup("file", "no_access.json") }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + register: iam_policy_info + - name: Assert no change + assert: + that: + - result is not changed + - result.policies | length == 2 + - iam_policy_name_b in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies | length == 1 + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_info.all_policy_names | length == 2 + - iam_policy_info.policies[0].policy_name == iam_policy_name_b + - '"Id" not in iam_policy_info.policies[0].policy_document' + + # ============================================================ + - name: Create policy using json for {{ iam_type }} (check mode) + check_mode: yes + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: yes + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + register: iam_policy_info + - name: Assert policy would be added for {{ iam_type }} + assert: + that: + - result is changed + - '"policies" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 2 + - iam_policy_name_c not in iam_policy_info.all_policy_names + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + + - name: Create policy using json for {{ iam_type }} + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: yes + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + register: iam_policy_info + - name: Assert policy was added for {{ iam_type }} + assert: + that: + - result is changed + - result.policies | length == 3 + - iam_policy_name_c in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies | length == 1 + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_name_c in iam_policy_info.all_policy_names + - iam_policy_info.all_policy_names | length == 3 + - iam_policy_info.policies[0].policy_name == iam_policy_name_c + - iam_policy_info.policies[0].policy_document.Id == 'MyId' + + - name: Create policy using json for {{ iam_type }} (idempotency - check mode) + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: yes + register: result + check_mode: yes + - name: Assert no change would occur + assert: + that: + - result is not changed + + - name: Create policy using json for {{ iam_type }} (idempotency) + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: yes + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + register: iam_policy_info + - name: Assert no change + assert: + that: + - result is not changed + - result.policies | length == 3 + - iam_policy_name_c in result.policies + - result[iam_object_key] == iam_name + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_name_c in iam_policy_info.all_policy_names + - iam_policy_info.all_policy_names | length == 3 + - iam_policy_info.policies[0].policy_name == iam_policy_name_c + - iam_policy_info.policies[0].policy_document.Id == 'MyId' + + # ============================================================ + - name: Create policy using json for {{ iam_type }} (check mode) (skip_duplicates) + check_mode: yes + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: yes + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + register: iam_policy_info + - name: Assert policy would not be added for {{ iam_type }} + assert: + that: + - result is not changed + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_name_c in iam_policy_info.all_policy_names + - iam_policy_name_d not in iam_policy_info.all_policy_names + - iam_policy_info.all_policy_names | length == 3 + - '"policies" not in iam_policy_info' + + - name: Create policy using json for {{ iam_type }} (skip_duplicates) + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: yes + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + register: iam_policy_info + - name: Assert policy was not added for {{ iam_type }} (skip_duplicates) + assert: + that: + - result is not changed + - result.policies | length == 3 + - iam_policy_name_d not in result.policies + - result[iam_object_key] == iam_name + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_name_c in iam_policy_info.all_policy_names + - iam_policy_name_d not in iam_policy_info.all_policy_names + - iam_policy_info.all_policy_names | length == 3 + - '"policies" not in iam_policy_info' + + - name: Create policy using json for {{ iam_type }} (check mode) (skip_duplicates + = no) + check_mode: yes + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: no + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + register: iam_policy_info + - name: Assert policy would be added for {{ iam_type }} + assert: + that: + - result.changed == True + + - name: Create policy using json for {{ iam_type }} (skip_duplicates = no) + iam_policy: + state: present + skip_duplicates: no + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + register: iam_policy_info + - name: Assert policy was added for {{ iam_type }} + assert: + that: + - result is changed + - result.policies | length == 4 + - iam_policy_name_d in result.policies + - result[iam_object_key] == iam_name + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_name_c in iam_policy_info.all_policy_names + - iam_policy_name_d in iam_policy_info.all_policy_names + - iam_policy_name_a not in iam_policy_info.policy_names + - iam_policy_name_b not in iam_policy_info.policy_names + - iam_policy_name_c not in iam_policy_info.policy_names + - iam_policy_name_d in iam_policy_info.policy_names + - iam_policy_info.policy_names | length == 1 + - iam_policy_info.all_policy_names | length == 4 + - iam_policy_info.policies[0].policy_name == iam_policy_name_d + - iam_policy_info.policies[0].policy_document.Id == 'MyId' + + - name: Create policy using json for {{ iam_type }} (idempotency - check mode) (skip_duplicates + = no) + iam_policy: + state: present + skip_duplicates: no + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + register: result + check_mode: yes + - name: Assert no change would occur + assert: + that: + - result is not changed + + - name: Create policy using json for {{ iam_type }} (idempotency) (skip_duplicates + = no) + iam_policy: + state: present + skip_duplicates: no + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + register: iam_policy_info + - name: Assert no change + assert: + that: + - result is not changed + - result.policies | length == 4 + - iam_policy_name_d in result.policies + - result[iam_object_key] == iam_name + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_name_c in iam_policy_info.all_policy_names + - iam_policy_name_d in iam_policy_info.all_policy_names + - iam_policy_info.all_policy_names | length == 4 + - iam_policy_info.policies[0].policy_name == iam_policy_name_d + - iam_policy_info.policies[0].policy_document.Id == 'MyId' + + # ============================================================ + - name: Test fetching multiple policies from {{ iam_type }} + iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + register: iam_policy_info + - name: Assert all policies returned + assert: + that: + - iam_policy_info is succeeded + - iam_policy_info.policies | length == 4 + - iam_policy_info.all_policy_names | length == 4 + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_name_c in iam_policy_info.all_policy_names + - iam_policy_name_d in iam_policy_info.all_policy_names + # Quick test that the policies are the ones we expect + - iam_policy_info.policies | community.general.json_query('[*].policy_name') + | length == 4 + - iam_policy_info.policies | community.general.json_query('[?policy_document.Id + == `MyId`].policy_name') | length == 2 + - iam_policy_name_c in (iam_policy_info.policies | community.general.json_query('[?policy_document.Id + == `MyId`].policy_name') | list) + - iam_policy_name_d in (iam_policy_info.policies | community.general.json_query('[?policy_document.Id + == `MyId`].policy_name') | list) + + # ============================================================ + - name: Update policy using document for {{ iam_type }} (check mode) (skip_duplicates) + check_mode: yes + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: yes + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + register: iam_policy_info + - name: Assert policy would not be added for {{ iam_type }} + assert: + that: + - result is not changed + - iam_policy_info.policies[0].policy_name == iam_policy_name_a + - '"Id" not in iam_policy_info.policies[0].policy_document' + + - name: Update policy using document for {{ iam_type }} (skip_duplicates) + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: yes + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + register: iam_policy_info + - name: Assert policy was not updated for {{ iam_type }} (skip_duplicates) + assert: + that: + - result is not changed + - result.policies | length == 4 + - iam_policy_name_a in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.all_policy_names | length == 4 + - iam_policy_info.policies[0].policy_name == iam_policy_name_a + - '"Id" not in iam_policy_info.policies[0].policy_document' + + - name: Update policy using document for {{ iam_type }} (check mode) (skip_duplicates + = no) + check_mode: yes + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: no + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + register: iam_policy_info + - name: Assert policy would be updated for {{ iam_type }} + assert: + that: + - result.changed == True + - iam_policy_info.all_policy_names | length == 4 + - iam_policy_info.policies[0].policy_name == iam_policy_name_a + - '"Id" not in iam_policy_info.policies[0].policy_document' + + - name: Update policy using document for {{ iam_type }} (skip_duplicates = no) + iam_policy: + state: present + skip_duplicates: no + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + register: iam_policy_info + - name: Assert policy was updated for {{ iam_type }} + assert: + that: + - result is changed + - result.policies | length == 4 + - iam_policy_name_a in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies[0].policy_document.Id == 'MyId' + + - name: Update policy using document for {{ iam_type }} (idempotency - check mode) + (skip_duplicates = no) + iam_policy: + state: present + skip_duplicates: no + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + register: result + check_mode: yes + - name: Assert no change would occur + assert: + that: + - result is not changed + + - name: Update policy using document for {{ iam_type }} (idempotency) (skip_duplicates + = no) + iam_policy: + state: present + skip_duplicates: no + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + register: iam_policy_info + - name: Assert no change + assert: + that: + - result is not changed + - result.policies | length == 4 + - iam_policy_name_a in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies[0].policy_document.Id == 'MyId' + + - name: Delete policy A + iam_policy: + state: absent + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + register: iam_policy_info + - name: Assert deleted + assert: + that: + - result is changed + - result.policies | length == 3 + - iam_policy_name_a not in result.policies + - result[iam_object_key] == iam_name + - '"policies" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 3 + - iam_policy_name_a not in iam_policy_info.all_policy_names + + # ============================================================ + # Update C with no_access.json + # Delete C + + - name: Update policy using json for {{ iam_type }} (check mode) (skip_duplicates) + check_mode: yes + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: yes + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + register: iam_policy_info + - name: Assert policy would not be added for {{ iam_type }} + assert: + that: + - result is not changed + - iam_policy_info.policies[0].policy_document.Id == 'MyId' + + - name: Update policy using json for {{ iam_type }} (skip_duplicates) + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: yes + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + register: iam_policy_info + - name: Assert policy was not updated for {{ iam_type }} (skip_duplicates) + assert: + that: + - result is not changed + - result.policies | length == 3 + - iam_policy_name_c in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies[0].policy_document.Id == 'MyId' + + - name: Update policy using json for {{ iam_type }} (check mode) (skip_duplicates + = no) + check_mode: yes + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: no + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + register: iam_policy_info + - name: Assert policy would be updated for {{ iam_type }} + assert: + that: + - result.changed == True + - iam_policy_info.policies[0].policy_document.Id == 'MyId' + + - name: Update policy using json for {{ iam_type }} (skip_duplicates = no) + iam_policy: + state: present + skip_duplicates: no + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + policy_json: '{{ lookup("file", "no_access.json") }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + register: iam_policy_info + - name: Assert policy was updated for {{ iam_type }} + assert: + that: + - result is changed + - result.policies | length == 3 + - iam_policy_name_c in result.policies + - result[iam_object_key] == iam_name + - '"Id" not in iam_policy_info.policies[0].policy_document' + + - name: Update policy using json for {{ iam_type }} (idempotency - check mode) (skip_duplicates + = no) + iam_policy: + state: present + skip_duplicates: no + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + policy_json: '{{ lookup("file", "no_access.json") }}' + register: result + check_mode: yes + - name: Assert no change would occur + assert: + that: + - result is not changed + + - name: Update policy using json for {{ iam_type }} (idempotency) (skip_duplicates + = no) + iam_policy: + state: present + skip_duplicates: no + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + policy_json: '{{ lookup("file", "no_access.json") }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + register: iam_policy_info + - name: Assert no change + assert: + that: + - result is not changed + - result.policies | length == 3 + - iam_policy_name_c in result.policies + - result[iam_object_key] == iam_name + - '"Id" not in iam_policy_info.policies[0].policy_document' + + - name: Delete policy C + iam_policy: + state: absent + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + register: iam_policy_info + - name: Assert deleted + assert: + that: + - result is changed + - result.policies | length == 2 + - iam_policy_name_c not in result.policies + - result[iam_object_key] == iam_name + - '"policies" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 2 + - iam_policy_name_c not in iam_policy_info.all_policy_names + + # ============================================================ + - name: Update policy using document for {{ iam_type }} (check mode) + check_mode: yes + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + register: iam_policy_info + - name: Assert policy would be updated for {{ iam_type }} + assert: + that: + - result.changed == True + - '"Id" not in iam_policy_info.policies[0].policy_document' + + - name: Update policy using document for {{ iam_type }} + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + register: iam_policy_info + - name: Assert policy was updated for {{ iam_type }} + assert: + that: + - result is changed + - result.policies | length == 2 + - iam_policy_name_b in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId' + + - name: Update policy using document for {{ iam_type }} (idempotency - check mode) + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' + register: result + check_mode: yes + - name: Assert no change would occur + assert: + that: + - result is not changed + + - name: Update policy using document for {{ iam_type }} (idempotency) + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + register: iam_policy_info + - name: Assert no change + assert: + that: + - result is not changed + - result.policies | length == 2 + - iam_policy_name_b in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId' + + - name: Delete policy B + iam_policy: + state: absent + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + register: iam_policy_info + - name: Assert deleted + assert: + that: + - result is changed + - result.policies | length == 1 + - iam_policy_name_b not in result.policies + - result[iam_object_key] == iam_name + - '"policies" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 1 + - iam_policy_name_b not in iam_policy_info.all_policy_names + + # ============================================================ + - name: Update policy using json for {{ iam_type }} (check mode) + check_mode: yes + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + register: iam_policy_info + - name: Assert policy would be updated for {{ iam_type }} + assert: + that: + - result.changed == True + - iam_policy_info.policies[0].policy_document.Id == 'MyId' + + - name: Update policy using json for {{ iam_type }} + iam_policy: + state: present + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + register: iam_policy_info + - name: Assert policy was updated for {{ iam_type }} + assert: + that: + - result is changed + - result.policies | length == 1 + - iam_policy_name_d in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId' + + - name: Update policy using json for {{ iam_type }} (idempotency - check mode) + iam_policy: + state: present + skip_duplicates: no + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' + register: result + check_mode: yes + - name: Assert no change would occur + assert: + that: + - result is not changed + + - name: Update policy using json for {{ iam_type }} (idempotency) + iam_policy: + state: present + skip_duplicates: no + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + register: iam_policy_info + - name: Assert no change + assert: + that: + - result is not changed + - result.policies | length == 1 + - iam_policy_name_d in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId' + + # ============================================================ + - name: Delete policy D (check_mode) + check_mode: yes + iam_policy: + state: absent + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + register: iam_policy_info + - name: Assert not deleted + assert: + that: + - result is changed + - result.policies | length == 1 + - iam_policy_name_d in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.all_policy_names | length == 1 + - iam_policy_name_d in iam_policy_info.all_policy_names + - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId' + + - name: Delete policy D + iam_policy: + state: absent + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + register: iam_policy_info + - name: Assert deleted + assert: + that: + - result is changed + - '"policies" not in iam_policy_info' + - iam_policy_name_d not in result.policies + - result[iam_object_key] == iam_name + - '"policies" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 0 + + - name: Delete policy D (test idempotency) + iam_policy: + state: absent + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + register: iam_policy_info + - name: Assert deleted + assert: + that: + - result is not changed + - '"policies" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 0 + + - name: Delete policy D (check_mode) (test idempotency) + check_mode: yes + iam_policy: + state: absent + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + register: result + - iam_policy_info: + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + register: iam_policy_info + - name: Assert deleted + assert: + that: + - result is not changed + - '"policies" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 0 + + always: + # ============================================================ + - name: Delete policy A for {{ iam_type }} + iam_policy: + state: absent + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_a }}' + ignore_errors: yes + - name: Delete policy B for {{ iam_type }} + iam_policy: + state: absent + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_b }}' + ignore_errors: yes + - name: Delete policy C for {{ iam_type }} + iam_policy: + state: absent + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_c }}' + ignore_errors: yes + - name: Delete policy D for {{ iam_type }} + iam_policy: + state: absent + iam_type: '{{ iam_type }}' + iam_name: '{{ iam_name }}' + policy_name: '{{ iam_policy_name_d }}' + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/aliases b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/aliases new file mode 100644 index 000000000..cf11724d7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/aliases @@ -0,0 +1,9 @@ +# reason: missing-policy +# It should be possible to test iam_user by limiting which policies can be +# attached to the users. +# Careful review is needed prior to adding this to the main CI. +unsupported + +cloud/aws + +iam_user_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/defaults/main.yml new file mode 100644 index 000000000..d5726a48b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/defaults/main.yml @@ -0,0 +1,10 @@ +test_group: '{{ resource_prefix }}-group' +test_path: / +test_user: '{{ test_users[0] }}' +test_user3: '{{ test_users[2] }}' +test_password: ATotallySecureUncrackablePassword1! +test_new_password: ATotallyNewSecureUncrackablePassword1! +test_users: +- '{{ resource_prefix }}-user-a' +- '{{ resource_prefix }}-user-b' +- '{{ resource_prefix }}-user-c' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml new file mode 100644 index 000000000..06279024f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml @@ -0,0 +1,798 @@ +- name: set up aws connection info + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + - name: ensure improper usage of parameters fails gracefully + iam_user_info: + path: '{{ test_path }}' + group: '{{ test_group }}' + ignore_errors: yes + register: iam_user_info_path_group + - assert: + that: + - iam_user_info_path_group is failed + - 'iam_user_info_path_group.msg == "parameters are mutually exclusive: group|path"' + + - name: create test user (check mode) + iam_user: + name: '{{ test_user }}' + state: present + check_mode: yes + register: iam_user + - name: assert that the user would be created + assert: + that: + - iam_user is changed + + - name: create test user + iam_user: + name: '{{ test_user }}' + state: present + register: iam_user + - name: assert that the user is created + assert: + that: + - iam_user is changed + + - name: ensure test user exists (no change - check mode) + iam_user: + name: '{{ test_user }}' + state: present + register: iam_user + check_mode: yes + - name: assert that user would not change + assert: + that: + - iam_user is not changed + + - name: ensure test user exists (no change) + iam_user: + name: '{{ test_user }}' + state: present + register: iam_user + - name: assert that the user wasn't changed + assert: + that: + - iam_user is not changed + + - name: ensure the info used to validate other tests is valid + set_fact: + test_iam_user: '{{ iam_user.iam_user.user }}' + - assert: + that: + - test_iam_user.arn.startswith("arn:aws:iam") + - test_iam_user.arn.endswith("user/" + test_user ) + - test_iam_user.create_date is not none + - test_iam_user.path == '{{ test_path }}' + - test_iam_user.user_id is not none + - test_iam_user.user_name == '{{ test_user }}' + - test_iam_user.tags | length == 0 + + - name: get info on IAM user(s) + iam_user_info: + register: iam_user_info + - assert: + that: + - iam_user_info.iam_users | length != 0 + + - name: get info on IAM user(s) with name + iam_user_info: + name: '{{ test_user }}' + register: iam_user_info + - assert: + that: + - iam_user_info.iam_users | length == 1 + - iam_user_info.iam_users[0].arn == test_iam_user.arn + - iam_user_info.iam_users[0].create_date == test_iam_user.create_date + - iam_user_info.iam_users[0].path == test_iam_user.path + - iam_user_info.iam_users[0].user_id == test_iam_user.user_id + - iam_user_info.iam_users[0].user_name == test_iam_user.user_name + - iam_user_info.iam_users[0].tags | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: create test user with password (check mode) + iam_user: + name: '{{ test_user3 }}' + password: '{{ test_password }}' + state: present + check_mode: yes + register: iam_user + - name: assert that the second user would be created + assert: + that: + - iam_user is changed + + - name: create second test user with password + iam_user: + name: '{{ test_user3 }}' + password: '{{ test_password }}' + password_reset_required: yes + state: present + wait: false + register: iam_user + - name: assert that the second user is created + assert: + that: + - iam_user is changed + - iam_user.iam_user.user.password_reset_required + + - name: get info on IAM user(s) on path + iam_user_info: + path: '{{ test_path }}' + name: '{{ test_user }}' + register: iam_user_info + - assert: + that: + - iam_user_info.iam_users | length == 1 + - iam_user_info.iam_users[0].arn == test_iam_user.arn + - iam_user_info.iam_users[0].create_date == test_iam_user.create_date + - iam_user_info.iam_users[0].path == test_iam_user.path + - iam_user_info.iam_users[0].user_id == test_iam_user.user_id + - iam_user_info.iam_users[0].user_name == test_iam_user.user_name + - iam_user_info.iam_users[0].tags | length == 0 + + # ------------------------------------------------------------------------------------------ + ## Test tags creation / updates + - name: Add Tag (check mode) + iam_user: + name: '{{ test_user }}' + state: present + tags: + TagA: ValueA + register: iam_user + check_mode: yes + - assert: + that: + - iam_user is changed + + - name: Add Tag + iam_user: + name: '{{ test_user }}' + state: present + tags: + TagA: ValueA + register: iam_user + - assert: + that: + - iam_user is changed + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.tags | length == 1 + - '"TagA" in iam_user.iam_user.user.tags' + - iam_user.iam_user.user.tags.TagA == "ValueA" + + - name: Add Tag (no change - check mode) + iam_user: + name: '{{ test_user }}' + state: present + tags: + TagA: ValueA + register: iam_user + check_mode: yes + - assert: + that: + - iam_user is not changed + + - name: Add Tag (no change) + iam_user: + name: '{{ test_user }}' + state: present + tags: + TagA: ValueA + register: iam_user + - assert: + that: + - iam_user is not changed + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.tags | length == 1 + - '"TagA" in iam_user.iam_user.user.tags' + - iam_user.iam_user.user.tags.TagA == "ValueA" + + - name: Extend Tags + iam_user: + name: '{{ test_user }}' + state: present + purge_tags: no + tags: + tag_b: value_b + Tag C: Value C + tag d: value d + register: iam_user + - assert: + that: + - iam_user is changed + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.tags | length == 4 + - '"TagA" in iam_user.iam_user.user.tags' + - '"tag_b" in iam_user.iam_user.user.tags' + - '"Tag C" in iam_user.iam_user.user.tags' + - '"tag d" in iam_user.iam_user.user.tags' + - iam_user.iam_user.user.tags.TagA == "ValueA" + - iam_user.iam_user.user.tags.tag_b == "value_b" + - iam_user.iam_user.user.tags["Tag C"] == "Value C" + - iam_user.iam_user.user.tags["tag d"] == "value d" + + - name: Create user without Tag (no change) + iam_user: + name: '{{ test_user }}' + state: present + register: iam_user + - assert: + that: + - iam_user is not changed + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.tags | length == 4 + + - name: Remove all Tags (check mode) + iam_user: + name: '{{ test_user }}' + state: present + tags: {} + check_mode: yes + register: iam_user + - assert: + that: + - iam_user is changed + + - name: Remove 3 Tags + iam_user: + name: '{{ test_user }}' + state: present + tags: + TagA: ValueA + register: iam_user + - assert: + that: + - iam_user is changed + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.tags | length == 1 + - '"TagA" in iam_user.iam_user.user.tags' + - iam_user.iam_user.user.tags.TagA == "ValueA" + + - name: Change Tag (check mode) + iam_user: + name: '{{ test_user }}' + state: present + tags: + TagA: AnotherValueA + register: iam_user + check_mode: yes + - assert: + that: + - iam_user is changed + + - name: Change Tag + iam_user: + name: '{{ test_user }}' + state: present + tags: + TagA: AnotherValueA + register: iam_user + - assert: + that: + - iam_user is changed + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.tags | length == 1 + - '"TagA" in iam_user.iam_user.user.tags' + - iam_user.iam_user.user.tags.TagA == "AnotherValueA" + + - name: Remove All Tags + iam_user: + name: '{{ test_user }}' + state: present + tags: {} + register: iam_user + - assert: + that: + - iam_user is changed + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.tags | length == 0 + + - name: Remove All Tags (no change) + iam_user: + name: '{{ test_user }}' + state: present + tags: {} + register: iam_user + - assert: + that: + - iam_user is not changed + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.tags | length == 0 + + # ------------------------------------------------------------------------------------------ + ## Test user password update + - name: test update IAM password with on_create only (check mode) + iam_user: + name: '{{ test_user3 }}' + password: '{{ test_new_password }}' + update_password: on_create + state: present + register: iam_user_update + check_mode: yes + - assert: + that: + - iam_user_update is not changed + + - name: test update IAM password with on_create only + iam_user: + name: '{{ test_user3 }}' + password: '{{ test_new_password }}' + update_password: on_create + state: present + register: iam_user_update + - assert: + that: + - iam_user_update is not changed + + - name: update IAM password (check mode) + iam_user: + name: '{{ test_user3 }}' + password: '{{ test_new_password }}' + state: present + register: iam_user_update + check_mode: yes + - assert: + that: + - iam_user_update is changed + + # flakey, there is no waiter for login profiles + # Login Profile for User ansible-user-c cannot be modified while login profile is being created. + - name: update IAM password + iam_user: + name: '{{ test_user3 }}' + password: '{{ test_new_password }}' + state: present + register: iam_user_update + until: iam_user_update.failed == false + delay: 3 + retries: 5 + - assert: + that: + - iam_user_update is changed + - iam_user_update.iam_user.user.user_name == test_user3 + + # =========================================== + # Test Managed Policy management + # + # Use a couple of benign policies for testing: + # - AWSDenyAll + # - ServiceQuotasReadOnlyAccess + # + - name: attach managed policy to user (check mode) + iam_user: + name: '{{ test_user }}' + state: present + managed_policy: + - arn:aws:iam::aws:policy/AWSDenyAll + register: iam_user + check_mode: yes + - name: assert that the user is changed + assert: + that: + - iam_user is changed + + - name: attach managed policy to user + iam_user: + name: '{{ test_user }}' + state: present + managed_policy: + - arn:aws:iam::aws:policy/AWSDenyAll + register: iam_user + - name: assert that the user is changed + assert: + that: + - iam_user is changed + + - name: ensure managed policy is attached to user (no change - check mode) + iam_user: + name: '{{ test_user }}' + state: present + managed_policy: + - arn:aws:iam::aws:policy/AWSDenyAll + register: iam_user + check_mode: yes + - name: assert that the user hasn't changed + assert: + that: + - iam_user is not changed + + - name: ensure managed policy is attached to user (no change) + iam_user: + name: '{{ test_user }}' + state: present + managed_policy: + - arn:aws:iam::aws:policy/AWSDenyAll + register: iam_user + - name: assert that the user hasn't changed + assert: + that: + - iam_user is not changed + + # ------------------------------------------------------------------------------------------ + + - name: attach different managed policy to user (check mode) + check_mode: yes + iam_user: + name: '{{ test_user }}' + state: present + managed_policy: + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: no + register: iam_user + - name: assert that the user changed + assert: + that: + - iam_user is changed + + - name: attach different managed policy to user + iam_user: + name: '{{ test_user }}' + state: present + managed_policy: + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: no + register: iam_user + - name: assert that the user changed + assert: + that: + - iam_user is changed + + - name: attach different managed policy to user (no change - check mode) + iam_user: + name: '{{ test_user }}' + state: present + managed_policy: + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: no + register: iam_user + check_mode: yes + - name: assert that the user hasn't changed + assert: + that: + - iam_user is not changed + + - name: Check first policy wasn't purged + iam_user: + name: '{{ test_user }}' + state: present + managed_policy: + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + - arn:aws:iam::aws:policy/AWSDenyAll + purge_policy: no + register: iam_user + - name: assert that the user hasn't changed + assert: + that: + - iam_user is not changed + + - name: Check that managed policy order doesn't matter + iam_user: + name: '{{ test_user }}' + state: present + managed_policy: + - arn:aws:iam::aws:policy/AWSDenyAll + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: no + register: iam_user + - name: assert that the user hasn't changed + assert: + that: + - iam_user is not changed + + - name: Check that policy doesn't require full ARN path + iam_user: + name: '{{ test_user }}' + state: present + managed_policy: + - AWSDenyAll + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: no + register: iam_user + - name: assert that the user hasn't changed + assert: + that: + - iam_user is not changed + + # ------------------------------------------------------------------------------------------ + + - name: Remove one of the managed policies - with purge (check mode) + check_mode: yes + iam_user: + name: '{{ test_user }}' + state: present + managed_policy: + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: yes + register: iam_user + - name: assert that the user changed + assert: + that: + - iam_user is changed + + - name: Remove one of the managed policies - with purge + iam_user: + name: '{{ test_user }}' + state: present + managed_policy: + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: yes + register: iam_user + - name: assert that the user changed + assert: + that: + - iam_user is changed + + - name: Remove one of the managed policies - with purge (no change - check mode) + iam_user: + name: '{{ test_user }}' + state: present + managed_policy: + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: yes + register: iam_user + check_mode: yes + - name: assert that the user hasn't changed + assert: + that: + - iam_user is not changed + + - name: Remove one of the managed policies - with purge (no change) + iam_user: + name: '{{ test_user }}' + state: present + managed_policy: + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: yes + register: iam_user + - name: assert that the user hasn't changed + assert: + that: + - iam_user is not changed + + # ------------------------------------------------------------------------------------------ + + - name: ensure group exists + iam_group: + name: '{{ test_group }}' + users: + - '{{ test_user }}' + state: present + register: iam_group + - assert: + that: + - iam_group.changed + - iam_group.iam_group.users + + - name: get info on IAM user(s) in group + iam_user_info: + group: '{{ test_group }}' + name: '{{ test_user }}' + register: iam_user_info + - assert: + that: + - iam_user_info.iam_users | length == 1 + - iam_user_info.iam_users[0].arn == test_iam_user.arn + - iam_user_info.iam_users[0].create_date == test_iam_user.create_date + - iam_user_info.iam_users[0].path == test_iam_user.path + - iam_user_info.iam_users[0].user_id == test_iam_user.user_id + - iam_user_info.iam_users[0].user_name == test_iam_user.user_name + - iam_user_info.iam_users[0].tags | length == 0 + + - name: remove user from group + iam_group: + name: '{{ test_group }}' + purge_users: true + users: [] + state: present + register: iam_group + - name: get info on IAM user(s) after removing from group + iam_user_info: + group: '{{ test_group }}' + name: '{{ test_user }}' + register: iam_user_info + - name: assert empty list of users for group are returned + assert: + that: + - iam_user_info.iam_users | length == 0 + + - name: ensure ansible users exist + iam_user: + name: '{{ item }}' + state: present + with_items: '{{ test_users }}' + - name: get info on multiple IAM user(s) + iam_user_info: + register: iam_user_info + - assert: + that: + - iam_user_info.iam_users | length != 0 + + - name: ensure multiple user group exists with single user + iam_group: + name: '{{ test_group }}' + users: + - '{{ test_user }}' + state: present + register: iam_group + - name: get info on IAM user(s) in group + iam_user_info: + group: '{{ test_group }}' + register: iam_user_info + - assert: + that: + - iam_user_info.iam_users | length == 1 + + - name: add all users to group + iam_group: + name: '{{ test_group }}' + users: '{{ test_users }}' + state: present + register: iam_group + - name: get info on multiple IAM user(s) in group + iam_user_info: + group: '{{ test_group }}' + register: iam_user_info + - assert: + that: + - iam_user_info.iam_users | length == test_users | length + + - name: purge users from group + iam_group: + name: '{{ test_group }}' + purge_users: true + users: [] + state: present + register: iam_group + - name: ensure info is empty for empty group + iam_user_info: + group: '{{ test_group }}' + register: iam_user_info + - assert: + that: + - iam_user_info.iam_users | length == 0 + + - name: get info on IAM user(s) after removing from group + iam_user_info: + group: '{{ test_group }}' + register: iam_user_info + - name: assert empty list of users for group are returned + assert: + that: + - iam_user_info.iam_users | length == 0 + + - name: remove group + iam_group: + name: '{{ test_group }}' + state: absent + register: iam_group + - name: assert that group was removed + assert: + that: + - iam_group.changed + - iam_group + + - name: Test remove group again (idempotency) + iam_group: + name: '{{ test_group }}' + state: absent + register: iam_group + - name: assert that group remove is not changed + assert: + that: + - not iam_group.changed + + # ------------------------------------------------------------------------------------------ + + - name: Remove user with attached policy (check mode) + iam_user: + name: '{{ test_user }}' + state: absent + register: iam_user + check_mode: yes + - name: get info on IAM user(s) after deleting in check mode + iam_user_info: + name: '{{ test_user }}' + register: iam_user_info + - name: Assert user was not removed in check mode + assert: + that: + - iam_user.changed + - iam_user_info.iam_users | length == 1 + + - name: Remove user with attached policy + iam_user: + name: '{{ test_user }}' + state: absent + register: iam_user + - name: get info on IAM user(s) after deleting + iam_user_info: + name: '{{ test_user }}' + register: iam_user_info + - name: Assert user was removed + assert: + that: + - iam_user.changed + - iam_user_info.iam_users | length == 0 + + - name: Remove user with attached policy (idempotent - check mode) + iam_user: + name: '{{ test_user }}' + state: absent + register: iam_user + check_mode: yes + - name: Assert no change + assert: + that: + - not iam_user.changed + + - name: Remove user with attached policy (idempotent) + iam_user: + name: '{{ test_user }}' + state: absent + register: iam_user + - name: Assert no change + assert: + that: + - not iam_user.changed + + # ------------------------------------------------------------------------------------------ + ## Test user password removal + - name: Delete IAM password (check mode) + iam_user: + name: '{{ test_user3 }}' + remove_password: yes + state: present + register: iam_user_password_removal + check_mode: yes + - assert: + that: + - iam_user_password_removal is changed + + - name: Delete IAM password + iam_user: + name: '{{ test_user3 }}' + remove_password: yes + state: present + register: iam_user_password_removal + - assert: + that: + - iam_user_password_removal is changed + + - name: Delete IAM password again (check mode) + iam_user: + name: '{{ test_user3 }}' + remove_password: yes + state: present + register: iam_user_password_removal + check_mode: yes + - assert: + that: + - iam_user_password_removal is not changed + + - name: Delete IAM password again + iam_user: + name: '{{ test_user3 }}' + remove_password: yes + state: present + register: iam_user_password_removal + - assert: + that: + - iam_user_password_removal is not changed + + always: + - name: remove group + iam_group: + name: '{{ test_group }}' + state: absent + ignore_errors: yes + - name: remove ansible users + iam_user: + name: '{{ item }}' + state: absent + with_items: '{{ test_users }}' + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/aliases b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/aliases new file mode 100644 index 000000000..66c3ccc82 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/aliases @@ -0,0 +1,3 @@ +time=45m + +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_environment_script.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_environment_script.yml new file mode 100644 index 000000000..bfa5f4bb4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_environment_script.yml @@ -0,0 +1,9 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: 'Write access key to file we can source' + copy: + dest: '../access_key.sh' + content: 'export MY_ACCESS_KEY="{{ aws_access_key }}"' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml new file mode 100644 index 000000000..7e4c31068 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml @@ -0,0 +1,11 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + vars: + template_name: "../templates/{{ template | default('inventory.yml.j2') }}" + tasks: + - name: write inventory config file + copy: + dest: ../test.aws_ec2.yml + content: "{{ lookup('template', template_name) }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml new file mode 100644 index 000000000..f67fff1a9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml @@ -0,0 +1,9 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + tasks: + - name: write inventory config file + copy: + dest: ../test.aws_ec2.yml + content: "" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml new file mode 100644 index 000000000..929608c72 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml @@ -0,0 +1,55 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + environment: "{{ ansible_test.environment }}" + tasks: + + - module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + # Create VPC, subnet, security group, and find image_id to create instance + + - include_tasks: setup.yml +# - pause: +# seconds: 240 + + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_ec2' in groups" + - "not groups.aws_ec2" + + # Create new host, add it to inventory and then terminate it without updating the cache + + - name: create a new host + ec2_instance: + image_id: '{{ image_id }}' + name: '{{ resource_prefix }}' + instance_type: t2.micro + wait: no + security_groups: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + register: setup_instance + + - meta: refresh_inventory + + always: + + - name: remove setup ec2 instance + ec2_instance: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + name: '{{ resource_prefix }}' + security_groups: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + ignore_errors: yes + when: setup_instance is defined + + - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml new file mode 100644 index 000000000..abbb61997 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml @@ -0,0 +1,52 @@ +- name: get image ID to create an instance + ec2_ami_info: + filters: + architecture: x86_64 + # CentOS Community Platform Engineering (CPE) + owner-id: '125523088429' + virtualization-type: hvm + root-device-type: ebs + name: 'Fedora-Cloud-Base-34-1.2.x86_64*' + register: fedora_images + +- set_fact: + image_id: '{{ fedora_images.images.0.image_id }}' + vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' + subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24' + +- name: create a VPC to work in + ec2_vpc_net: + cidr_block: '{{ vpc_cidr }}' + state: present + name: '{{ resource_prefix }}_setup' + resource_tags: + Name: '{{ resource_prefix }}_setup' + register: setup_vpc + +- set_fact: + vpc_id: '{{ setup_vpc.vpc.id }}' + +- name: create a subnet to use for creating an ec2 instance + ec2_vpc_subnet: + az: '{{ aws_region }}a' + tags: '{{ resource_prefix }}_setup' + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: '{{ subnet_cidr }}' + state: present + resource_tags: + Name: '{{ resource_prefix }}_setup' + register: setup_subnet + +- set_fact: + subnet_id: '{{ setup_subnet.subnet.id }}' + +- name: create a security group to use for creating an ec2 instance + ec2_group: + name: '{{ resource_prefix }}_setup' + description: 'created by Ansible integration tests' + state: present + vpc_id: '{{ setup_vpc.vpc.id }}' + register: setup_sg + +- set_fact: + sg_id: '{{ setup_sg.group_id }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml new file mode 100644 index 000000000..c782421d4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml @@ -0,0 +1,31 @@ +- set_fact: + vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' + subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24' + +- name: remove setup security group + ec2_group: + name: '{{ resource_prefix }}_setup' + description: 'created by Ansible integration tests' + state: absent + vpc_id: '{{ vpc_id }}' + ignore_errors: yes + +- name: remove setup subnet + ec2_vpc_subnet: + az: '{{ aws_region }}a' + tags: '{{ resource_prefix }}_setup' + vpc_id: '{{ vpc_id }}' + cidr: '{{ subnet_cidr }}' + state: absent + resource_tags: + Name: '{{ resource_prefix }}_setup' + ignore_errors: yes + +- name: remove setup VPC + ec2_vpc_net: + cidr_block: '{{ vpc_cidr }}' + state: absent + name: '{{ resource_prefix }}_setup' + resource_tags: + Name: '{{ resource_prefix }}_setup' + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml new file mode 100644 index 000000000..cc1b9a5a5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml @@ -0,0 +1,9 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + tasks: + - name: assert inventory was not populated by aws_ec2 inventory plugin + assert: + that: + - "'aws_ec2' not in groups" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml new file mode 100644 index 000000000..d83cb0bfe --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml @@ -0,0 +1,18 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + tasks: + - name: assert cache was used to populate inventory + assert: + that: + - "'aws_ec2' in groups" + - "groups.aws_ec2 | length == 1" + + - meta: refresh_inventory + + - name: assert refresh_inventory updated the cache + assert: + that: + - "'aws_ec2' in groups" + - "not groups.aws_ec2" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml new file mode 100644 index 000000000..01627659b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml @@ -0,0 +1,78 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + environment: "{{ ansible_test.environment }}" + tasks: + + - module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + + block: + + # Create VPC, subnet, security group, and find image_id to create instance + + - include_tasks: setup.yml + + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_ec2' in groups" + - "not groups.aws_ec2" + + # Create new host, refresh inventory, remove host, refresh inventory + + - name: create a new host + ec2_instance: + image_id: '{{ image_id }}' + name: '{{ resource_prefix }}' + instance_type: t2.micro + security_groups: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + wait: no + register: setup_instance + + - meta: refresh_inventory + + - name: assert group was populated with inventory and is no longer empty + assert: + that: + - "'aws_ec2' in groups" + - "groups.aws_ec2 | length == 1" + - "groups.aws_ec2.0 == '{{ resource_prefix }}'" + + - name: remove setup ec2 instance + ec2_instance: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + name: '{{ resource_prefix }}' + security_groups: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + + - meta: refresh_inventory + + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_ec2' in groups" + - "not groups.aws_ec2" + + always: + + - name: remove setup ec2 instance + ec2_instance: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + name: '{{ resource_prefix }}' + security_groups: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + ignore_errors: yes + when: setup_instance is defined + + - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml new file mode 100644 index 000000000..b155b7ab3 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml @@ -0,0 +1,56 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + environment: "{{ ansible_test.environment }}" + tasks: + + - module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + # Create VPC, subnet, security group, and find image_id to create instance + - include_tasks: setup.yml + + # Create new host, refresh inventory + - name: create a new host + ec2_instance: + image_id: '{{ image_id }}' + name: '{{ resource_prefix }}' + tags: + OtherTag: value + instance_type: t2.micro + security_groups: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + wait: no + register: setup_instance + + - meta: refresh_inventory + + - name: register the current hostname + set_fact: + expected_hostname: "value_{{ resource_prefix }}" + + - name: "Ensure we've got a hostvars entry for the new host" + assert: + that: + - expected_hostname in hostvars + + always: + + - name: remove setup ec2 instance + ec2_instance: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + name: '{{ resource_prefix }}' + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + ignore_errors: yes + when: setup_instance is defined + + - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml new file mode 100644 index 000000000..f75dafac8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml @@ -0,0 +1,69 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + environment: "{{ ansible_test.environment }}" + tasks: + + - module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + # Create VPC, subnet, security group, and find image_id to create instance + + - include_tasks: setup.yml + + # Create new host, refresh inventory + + - name: create a new host + ec2_instance: + image_id: '{{ image_id }}' + name: '{{ resource_prefix }}' + tags: + tag1: value1 + tag2: value2 + instance_type: t2.micro + security_groups: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + wait: no + register: setup_instance + + - meta: refresh_inventory + + - name: register the keyed sg group name + set_fact: + sg_group_name: "security_groups_{{ sg_id | replace('-', '_') }}" + + - name: register one of the keyed tag groups name + set_fact: + tag_group_name: "tag_Name_{{ resource_prefix | replace('-', '_') }}" + + - name: assert the keyed groups and groups from constructed config were added to inventory and composite var added to hostvars + assert: + that: + # There are 9 groups: all, ungrouped, aws_ec2, sg keyed group, 3 tag keyed group (one per tag), arch keyed group, constructed group + - "groups | length == 9" + - "groups[tag_group_name] | length == 1" + - "groups[sg_group_name] | length == 1" + - "groups.arch_x86_64 | length == 1" + - "groups.tag_with_name_key | length == 1" + - vars.hostvars[groups.aws_ec2.0]['test_compose_var_sum'] == 'value1value2' + + always: + + - name: remove setup ec2 instance + ec2_instance: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + name: '{{ resource_prefix }}' + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + ignore_errors: yes + when: setup_instance is defined + + - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags.yml new file mode 100644 index 000000000..dfae16f05 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags.yml @@ -0,0 +1,62 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: false + environment: "{{ ansible_test.environment }}" + tasks: + + - module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + # Create VPC, subnet, security group, and find image_id to create instance + - include_tasks: setup.yml + + # Create new host + - name: create a new host + ec2_instance: + image_id: '{{ image_id }}' + name: '{{ resource_prefix }}' + tags: + Tag1: Test1 + Tag2: Test2 + instance_type: t2.micro + security_groups: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + wait: false + register: setup_instance + + # refresh inventory + - meta: refresh_inventory + + - debug: + var: groups + + - name: assert groups and hostvars were populated with inventory + assert: + that: + - "'aws_ec2' in groups" + - groups['aws_ec2'] | length == 1 + - "'Tag1_Test1' in groups['aws_ec2']" + - "'Tag2_Test2' not in groups['aws_ec2']" + - "'Tag1_Test1' in hostvars" + - "'Tag2_Test2' not in hostvars" + + always: + + - name: remove ec2 instance + ec2_instance: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + name: '{{ resource_prefix }}' + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + ignore_errors: true + when: setup_instance is defined + + - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml new file mode 100644 index 000000000..576b53ab5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml @@ -0,0 +1,62 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: false + environment: "{{ ansible_test.environment }}" + tasks: + + - module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + # Create VPC, subnet, security group, and find image_id to create instance + - include_tasks: setup.yml + + # Create new host + - name: create a new host + ec2_instance: + image_id: '{{ image_id }}' + name: '{{ resource_prefix }}' + tags: + Tag1: Test1 + Tag2: Test2 + instance_type: t2.micro + security_groups: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + wait: false + register: setup_instance + + # refresh inventory + - meta: refresh_inventory + + - debug: + var: groups + + - name: assert groups and hostvars were populated with inventory + assert: + that: + - "'aws_ec2' in groups" + - groups['aws_ec2'] | length == 1 + - "'Test1' in groups['aws_ec2']" + - "'Test2' not in groups['aws_ec2']" + - "'Test1' in hostvars" + - "'Test2' not in hostvars" + + always: + + - name: remove ec2 instance + ec2_instance: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + name: '{{ resource_prefix }}' + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + ignore_errors: true + when: setup_instance is defined + + - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml new file mode 100644 index 000000000..7d6e8c5d9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml @@ -0,0 +1,65 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + environment: "{{ ansible_test.environment }}" + tasks: + + - module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + # Create VPC, subnet, security group, and find image_id to create instance + + - include_tasks: setup.yml + + # Create new host, refresh inventory + - name: create a new host + ec2_instance: + image_id: '{{ image_id }}' + name: '{{ resource_prefix }}_1' + tags: + tag_instance1: foo + instance_type: t2.micro + security_groups: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + wait: no + register: setup_instance_1 + + - meta: refresh_inventory + + - name: assert the hostvars are defined with prefix and/or suffix + assert: + that: + - "hostvars['{{ resource_prefix }}_1'].{{ vars_prefix }}instance_type{{ vars_suffix }} == 't2.micro'" + - "'{{ vars_prefix }}instance_type{{ vars_suffix }}' in hostvars['{{ resource_prefix }}_1']" + - "'{{ vars_prefix }}image_id{{ vars_suffix }}' in hostvars['{{ resource_prefix }}_1']" + - "'{{ vars_prefix }}instance_id{{ vars_suffix }}' in hostvars['{{ resource_prefix }}_1']" + - "'instance_type' not in hostvars['{{ resource_prefix }}_1']" + - "'image_id' not in hostvars['{{ resource_prefix }}_1']" + - "'instance_id' not in hostvars['{{ resource_prefix }}_1']" + - "'ansible_diff_mode' in hostvars['{{ resource_prefix }}_1']" + - "'ansible_forks' in hostvars['{{ resource_prefix }}_1']" + - "'ansible_version' in hostvars['{{ resource_prefix }}_1']" + vars: + vars_prefix: "{{ hostvars_prefix | default('') }}" + vars_suffix: "{{ hostvars_suffix | default('') }}" + + always: + + - name: remove setup ec2 instance + ec2_instance: + instance_type: t2.micro + instance_ids: '{{ setup_instance_1.instance_ids }}' + state: absent + name: '{{ resource_prefix }}_1' + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + ignore_errors: yes + when: setup_instance_1 is defined + + - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_include_or_exclude_filters.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_include_or_exclude_filters.yml new file mode 100644 index 000000000..b456565ae --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_include_or_exclude_filters.yml @@ -0,0 +1,103 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + environment: "{{ ansible_test.environment }}" + tasks: + + - module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + # Create VPC, subnet, security group, and find image_id to create instance + + - include_tasks: setup.yml + + # Create new host, refresh inventory + - name: create a new host (1/3) + ec2_instance: + image_id: '{{ image_id }}' + name: '{{ resource_prefix }}_1' + tags: + tag_instance1: foo + instance_type: t2.micro + security_groups: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + wait: no + register: setup_instance_1 + + - name: create a new host (2/3) + ec2_instance: + image_id: '{{ image_id }}' + name: '{{ resource_prefix }}_2' + tags: + tag_instance2: bar + instance_type: t2.micro + security_groups: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + wait: no + register: setup_instance_2 + + - name: create a new host (3/3) + ec2_instance: + image_id: '{{ image_id }}' + name: '{{ resource_prefix }}_3' + tags: + tag_instance2: bar + instance_type: t2.micro + security_groups: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + wait: no + register: setup_instance_3 + + - meta: refresh_inventory + + - name: assert the keyed groups and groups from constructed config were added to inventory and composite var added to hostvars + assert: + that: + # There are 9 groups: all, ungrouped, aws_ec2, sg keyed group, 3 tag keyed group (one per tag), arch keyed group, constructed group + - "groups['all'] | length == 2" + - "'{{ resource_prefix }}_1' in groups['all']" + - "'{{ resource_prefix }}_2' in groups['all']" + - "not ('{{ resource_prefix }}_3' in groups['all'])" + + always: + + - name: remove setup ec2 instance (1/3) + ec2_instance: + instance_type: t2.micro + instance_ids: '{{ setup_instance_1.instance_ids }}' + state: absent + name: '{{ resource_prefix }}_1' + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + ignore_errors: yes + when: setup_instance_1 is defined + + - name: remove setup ec2 instance (2/3) + ec2_instance: + instance_type: t2.micro + instance_ids: '{{ setup_instance_2.instance_ids }}' + state: absent + name: '{{ resource_prefix }}_2' + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + ignore_errors: yes + when: setup_instance_2 is defined + + - name: remove setup ec2 instance (3/3) + ec2_instance: + instance_type: t2.micro + instance_ids: '{{ setup_instance_3.instance_ids }}' + state: absent + name: '{{ resource_prefix }}_3' + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + ignore_errors: yes + when: setup_instance_3 is defined + + - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_literal_string.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_literal_string.yml new file mode 100644 index 000000000..8ba065eaf --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_literal_string.yml @@ -0,0 +1,56 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + environment: "{{ ansible_test.environment }}" + tasks: + + - module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + # Create VPC, subnet, security group, and find image_id to create instance + - include_tasks: setup.yml + + # Create new host, refresh inventory + - name: create a new host + ec2_instance: + image_id: '{{ image_id }}' + name: '{{ resource_prefix }}' + tags: + OtherTag: value + instance_type: t2.micro + security_groups: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + wait: no + register: setup_instance + + - meta: refresh_inventory + + - name: register the current hostname + set_fact: + expected_hostname: "aws-{{ resource_prefix }}" + + - name: "Ensure we've got a hostvars entry for the new host" + assert: + that: + - expected_hostname in hostvars + + always: + + - name: remove setup ec2 instance + ec2_instance: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + name: '{{ resource_prefix }}' + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + ignore_errors: yes + when: setup_instance is defined + + - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_use_contrib_script_keys.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_use_contrib_script_keys.yml new file mode 100644 index 000000000..6a4ef5b2a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_use_contrib_script_keys.yml @@ -0,0 +1,57 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + environment: "{{ ansible_test.environment }}" + tasks: + + - module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + # Create VPC, subnet, security group, and find image_id to create instance + - include_tasks: setup.yml + + # Create new host, refresh inventory + - name: create a new host + ec2_instance: + image_id: '{{ image_id }}' + name: '{{ resource_prefix }}:/aa' + tags: + OtherTag: value + instance_type: t2.micro + security_groups: '{{ sg_id }}' + vpc_subnet_id: '{{ subnet_id }}' + wait: no + register: setup_instance + + - meta: refresh_inventory + + - name: "register the current hostname, the : and / a replaced with _" + set_fact: + expected_hostname: "{{ resource_prefix }}__aa" + + - name: "Ensure we've got a hostvars entry for the new host" + assert: + that: + - expected_hostname in hostvars + - hostvars[expected_hostname].ec2_tag_OtherTag == "value" + + always: + + - name: remove setup ec2 instance + ec2_instance: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + name: '{{ resource_prefix }}' + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + ignore_errors: yes + when: setup_instance is defined + + - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml new file mode 100644 index 000000000..46a0c3e3b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml @@ -0,0 +1,61 @@ +- name: Test updating inventory + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_ec2' in groups" + - "not groups.aws_ec2" + + - name: create a new host + ec2_instance: + image_id: "{{ images[aws_region] }}" + exact_count: 1 + name: '{{ resource_prefix }}' + instance_type: t2.micro + security_groups: '{{ setup_sg.security_groups }}' + vpc_subnet_id: '{{ setup_subnet.subnet.id }}' + wait: no + register: setup_instance + + - meta: refresh_inventory + + - name: assert group was populated with inventory and is no longer empty + assert: + that: + - "'aws_ec2' in groups" + - "groups.aws_ec2 | length == 1" + - "groups.aws_ec2.0 == '{{ resource_prefix }}'" + + - name: remove setup ec2 instance + ec2_instance: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + name: '{{ resource_prefix }}' + security_groups: '{{ setup_sg.security_groups }}' + vpc_subnet_id: '{{ setup_subnet.subnet.id }}' + + - meta: refresh_inventory + + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_ec2' in groups" + - "not groups.aws_ec2" + + always: + - name: remove setup ec2 instance + ec2_instance: + instance_type: t2.micro + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + name: '{{ resource_prefix }}' + security_groups: '{{ setup_sg.security_groups }}' + vpc_subnet_id: '{{ setup_subnet.subnet.id }}' + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/runme.sh new file mode 100755 index 000000000..d2940cd2a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/runme.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +set -eux + +# ensure test config is empty +ansible-playbook playbooks/empty_inventory_config.yml "$@" + +export ANSIBLE_INVENTORY_ENABLED="amazon.aws.aws_ec2" + +# test with default inventory file +ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@" + +export ANSIBLE_INVENTORY=test.aws_ec2.yml + +# test empty inventory config +ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@" + +# generate inventory config and test using it +ansible-playbook playbooks/create_inventory_config.yml "$@" +ansible-playbook playbooks/test_populating_inventory.yml "$@" + +# generate inventory with access_key provided through a templated variable +ansible-playbook playbooks/create_environment_script.yml "$@" +source access_key.sh +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_template.yml.j2'" "$@" +ansible-playbook playbooks/test_populating_inventory.yml "$@" + +# generate inventory config with caching and test using it +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.yml.j2'" "$@" +ansible-playbook playbooks/populate_cache.yml "$@" +ansible-playbook playbooks/test_inventory_cache.yml "$@" + +# remove inventory cache +rm -r aws_ec2_cache_dir/ + +# generate inventory config with constructed features and test using it +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.yml.j2'" "$@" +ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@" +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_concatenation.yml.j2'" "$@" +ansible-playbook playbooks/test_populating_inventory_with_concatenation.yml "$@" +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_literal_string.yml.j2'" "$@" +ansible-playbook playbooks/test_populating_inventory_with_literal_string.yml "$@" +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostnames_using_tags_classic.yml.j2'" "$@" +ansible-playbook playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml "$@" +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostnames_using_tags.yml.j2'" "$@" +ansible-playbook playbooks/test_populating_inventory_with_hostnames_using_tags.yml "$@" + +# generate inventory config with includes_entries_matching and prepare the tests +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_include_or_exclude_filters.yml.j2'" "$@" +ansible-playbook playbooks/test_populating_inventory_with_include_or_exclude_filters.yml "$@" + +# generate inventory config with hostvars_prefix +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.yml.j2'" -e "hostvars_prefix='aws_ec2_'" "$@" +ansible-playbook playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml -e "hostvars_prefix='aws_ec2_'" "$@" +# generate inventory config with hostvars_suffix +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.yml.j2'" -e "hostvars_suffix='_aws_ec2'" "$@" +ansible-playbook playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml -e "hostvars_suffix='_aws_ec2'" "$@" +# generate inventory config with hostvars_prefix and hostvars_suffix +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.yml.j2'" -e "hostvars_prefix='aws_'" -e "hostvars_suffix='_ec2'" "$@" +ansible-playbook playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml -e "hostvars_prefix='aws_'" -e "hostvars_suffix='_ec2'" "$@" + +# generate inventory config with caching and test using it +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_use_contrib_script_keys.yml.j2'" "$@" +ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS=never ansible-playbook playbooks/test_populating_inventory_with_use_contrib_script_keys.yml "$@" + +# cleanup inventory config +ansible-playbook playbooks/empty_inventory_config.yml "$@" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2 new file mode 100644 index 000000000..baac15be0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2 @@ -0,0 +1,14 @@ +plugin: amazon.aws.aws_ec2 +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +aws_security_token: '{{ security_token }}' +{% endif %} +regions: +- '{{ aws_region }}' +filters: + tag:Name: + - '{{ resource_prefix }}' +hostnames: +- tag:Name +- dns-name diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2 new file mode 100644 index 000000000..8fe4e33f4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2 @@ -0,0 +1,14 @@ +plugin: amazon.aws.aws_ec2 +cache: True +cache_plugin: jsonfile +cache_connection: aws_ec2_cache_dir +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +aws_security_token: '{{ security_token }}' +{% endif %} +regions: +- '{{ aws_region }}' +filters: + tag:Name: + - '{{ resource_prefix }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2 new file mode 100644 index 000000000..035b1d7ca --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2 @@ -0,0 +1,15 @@ +plugin: amazon.aws.aws_ec2 +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +aws_security_token: '{{ security_token }}' +{% endif %} +regions: +- '{{ aws_region }}' +filters: + tag:Name: + - '{{ resource_prefix }}' +hostnames: + - name: 'tag:Name' + separator: '_' + prefix: 'tag:OtherTag' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2 new file mode 100644 index 000000000..a33f03e21 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2 @@ -0,0 +1,22 @@ +plugin: amazon.aws.aws_ec2 +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +aws_security_token: '{{ security_token }}' +{% endif %} +regions: +- '{{ aws_region }}' +filters: + tag:Name: + - '{{ resource_prefix }}' +keyed_groups: +- key: 'security_groups|map(attribute="group_id")' + prefix: security_groups +- key: tags + prefix: tag +- prefix: arch + key: architecture +compose: + test_compose_var_sum: tags.tag1 + tags.tag2 +groups: + tag_with_name_key: '''Name'' in (tags | list)' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags.yml.j2 new file mode 100644 index 000000000..2f7882a22 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags.yml.j2 @@ -0,0 +1,21 @@ +plugin: amazon.aws.aws_ec2 +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +aws_security_token: '{{ security_token }}' +{% endif %} +regions: +- '{{ aws_region }}' +keyed_groups: +- prefix: tag + key: tags +hostnames: +# can also be specified using +# - tag:Tag1,Tag2 +# or +# - tag:Tag1 +# - tag:Tag2 +# or +- tag:Tag1=Test1,Tag2=Test2 +compose: + ansible_host: private_ip_address diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags_classic.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags_classic.yml.j2 new file mode 100644 index 000000000..3138a4a2a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags_classic.yml.j2 @@ -0,0 +1,21 @@ +plugin: amazon.aws.aws_ec2 +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +aws_security_token: '{{ security_token }}' +{% endif %} +regions: +- '{{ aws_region }}' +keyed_groups: +- prefix: tag + key: tags +hostnames: +# can also be specified using +# - tag:Tag1,Tag2 +# or +# - tag:Tag1=Test1,Tag2=Test2 +# or +- tag:Tag1 +- tag:Tag2 +compose: + ansible_host: private_ip_address diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostvars_prefix_suffix.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostvars_prefix_suffix.yml.j2 new file mode 100644 index 000000000..f4f12c632 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostvars_prefix_suffix.yml.j2 @@ -0,0 +1,19 @@ +plugin: amazon.aws.aws_ec2 +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +aws_security_token: '{{ security_token }}' +{% endif %} +regions: +- '{{ aws_region }}' +filters: + tag:Name: + - '{{ resource_prefix }}_*' +{% if hostvars_prefix | default(false) %} +hostvars_prefix: '{{ hostvars_prefix }}' +{% endif %} +{% if hostvars_suffix | default(false) %} +hostvars_suffix: '{{ hostvars_suffix }}' +{% endif %} +hostnames: +- tag:Name diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_include_or_exclude_filters.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_include_or_exclude_filters.yml.j2 new file mode 100644 index 000000000..a6d48ce8c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_include_or_exclude_filters.yml.j2 @@ -0,0 +1,23 @@ +plugin: amazon.aws.aws_ec2 +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +aws_security_token: '{{ security_token }}' +{% endif %} +regions: +- '{{ aws_region }}' +filters: + tag:Name: + - '{{ resource_prefix }}_1' + - '{{ resource_prefix }}_3' +include_filters: +- tag:Name: + - '{{ resource_prefix }}_2' + - '{{ resource_prefix }}_4' +exclude_filters: +- tag:Name: + - '{{ resource_prefix }}_3' + - '{{ resource_prefix }}_4' +hostnames: +- tag:Name +- dns-name diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_literal_string.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_literal_string.yml.j2 new file mode 100644 index 000000000..0dbddcb82 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_literal_string.yml.j2 @@ -0,0 +1,15 @@ +plugin: amazon.aws.aws_ec2 +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +aws_security_token: '{{ security_token }}' +{% endif %} +regions: +- '{{ aws_region }}' +filters: + tag:Name: + - '{{ resource_prefix }}' +hostnames: + - name: 'tag:Name' + separator: '-' + prefix: 'aws' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_template.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_template.yml.j2 new file mode 100644 index 000000000..6b27544f9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_template.yml.j2 @@ -0,0 +1,14 @@ +plugin: amazon.aws.aws_ec2 +aws_access_key_id: '{{ '{{ lookup("env", "MY_ACCESS_KEY") }}' }}' +aws_secret_access_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +aws_security_token: '{{ security_token }}' +{% endif %} +regions: +- '{{ aws_region }}' +filters: + tag:Name: + - '{{ resource_prefix }}' +hostnames: +- tag:Name +- dns-name diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_use_contrib_script_keys.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_use_contrib_script_keys.yml.j2 new file mode 100644 index 000000000..e6b4068fa --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_use_contrib_script_keys.yml.j2 @@ -0,0 +1,15 @@ +plugin: amazon.aws.aws_ec2 +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +aws_security_token: '{{ security_token }}' +{% endif %} +regions: +- '{{ aws_region }}' +filters: + tag:Name: + - '{{ resource_prefix }}:/aa' +hostnames: +- tag:Name +use_contrib_script_compatible_sanitization: True +use_contrib_script_compatible_ec2_tag_keys: True diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/test.aws_ec2.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/test.aws_ec2.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/aliases b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/aliases new file mode 100644 index 000000000..569271951 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/aliases @@ -0,0 +1,2 @@ +cloud/aws +unsupported diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml new file mode 100644 index 000000000..f0a9030a0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml @@ -0,0 +1,11 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + vars: + template_name: "../templates/{{ template | default('inventory.j2') }}" + tasks: + - name: write inventory config file + copy: + dest: ../test.aws_rds.yml + content: "{{ lookup('template', template_name) }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml new file mode 100644 index 000000000..d7e2cda3a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml @@ -0,0 +1,9 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + tasks: + - name: write inventory config file + copy: + dest: ../test.aws_rds.yml + content: "" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml new file mode 100644 index 000000000..3c75a7cf5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml @@ -0,0 +1,57 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + environment: "{{ ansible_test.environment }}" + collections: + - amazon.aws + - community.aws + tasks: + + - module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + - set_fact: + instance_id: '{{ resource_prefix }}-mariadb' + + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_rds' in groups" + - "not groups.aws_rds" + + # Create new host, add it to inventory and then terminate it without updating the cache + + - name: create minimal mariadb instance in default VPC and default subnet group + rds_instance: + state: present + engine: mariadb + db_instance_class: db.t2.micro + allocated_storage: 20 + instance_id: '{{ instance_id }}' + master_username: 'ansibletestuser' + master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}' + tags: + workload_type: other + register: setup_instance + + - meta: refresh_inventory + + - assert: + that: + - groups.aws_rds + + always: + + - name: remove mariadb instance + rds_instance: + state: absent + engine: mariadb + skip_final_snapshot: yes + instance_id: '{{ instance_id }}' + ignore_errors: yes + when: setup_instance is defined diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml new file mode 100644 index 000000000..499513570 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml @@ -0,0 +1,9 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + tasks: + - name: assert inventory was not populated by aws_rds inventory plugin + assert: + that: + - "'aws_rds' not in groups" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml new file mode 100644 index 000000000..7eadbad85 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml @@ -0,0 +1,18 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + tasks: + - name: assert cache was used to populate inventory + assert: + that: + - "'aws_rds' in groups" + - "groups.aws_rds | length == 1" + + - meta: refresh_inventory + + - name: assert refresh_inventory updated the cache + assert: + that: + - "'aws_rds' in groups" + - "not groups.aws_rds" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_with_hostvars_prefix_suffix.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_with_hostvars_prefix_suffix.yml new file mode 100644 index 000000000..2bdcea0eb --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_with_hostvars_prefix_suffix.yml @@ -0,0 +1,63 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + environment: "{{ ansible_test.environment }}" + collections: + - amazon.aws + - community.aws + tasks: + + - module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + - set_fact: + instance_id: "{{ resource_prefix }}-mariadb" + + - name: create minimal mariadb instance in default VPC and default subnet group + rds_instance: + state: present + engine: mariadb + db_instance_class: db.t2.micro + allocated_storage: 20 + instance_id: '{{ resource_prefix }}-mariadb' + master_username: 'ansibletestuser' + master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}' + tags: + workload_type: other + register: setup_instance + + - meta: refresh_inventory + + - name: assert the hostvars are defined with prefix and/or suffix + assert: + that: + - "hostvars[host_instance_name].{{ vars_prefix }}db_instance_class{{ vars_suffix }} == 'db.t2.micro'" + - "hostvars[host_instance_name].{{ vars_prefix }}engine{{ vars_suffix }} == 'mariadb'" + - "hostvars[host_instance_name].{{ vars_prefix }}db_instance_status{{ vars_suffix }} == 'available'" + - "'db_instance_class' not in hostvars[host_instance_name]" + - "'engine' not in hostvars[host_instance_name]" + - "'db_instance_status' not in hostvars[host_instance_name]" + - "'ansible_diff_mode' in hostvars[host_instance_name]" + - "'ansible_forks' in hostvars[host_instance_name]" + - "'ansible_version' in hostvars[host_instance_name]" + vars: + host_instance_name: "{{ resource_prefix }}-mariadb" + vars_prefix: "{{ inventory_prefix | default('') }}" + vars_suffix: "{{ inventory_suffix | default('') }}" + + always: + + - name: remove mariadb instance + rds_instance: + state: absent + engine: mariadb + skip_final_snapshot: yes + instance_id: '{{ instance_id }}' + ignore_errors: yes + when: setup_instance is defined diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml new file mode 100644 index 000000000..678f65b7a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml @@ -0,0 +1,77 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + environment: "{{ ansible_test.environment }}" + collections: + - amazon.aws + - community.aws + tasks: + + - module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + - set_fact: + instance_id: "{{ resource_prefix }}-mariadb" + + - debug: var=groups + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_rds' in groups" + - "not groups.aws_rds" + + # Create new host, refresh inventory, remove host, refresh inventory + + - name: create minimal mariadb instance in default VPC and default subnet group + rds_instance: + state: present + engine: mariadb + db_instance_class: db.t2.micro + allocated_storage: 20 + instance_id: '{{ instance_id }}' + master_username: 'ansibletestuser' + master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}' + tags: + workload_type: other + register: setup_instance + + - meta: refresh_inventory + + - name: assert group was populated with inventory and is no longer empty + assert: + that: + - "'aws_rds' in groups" + - "groups.aws_rds | length == 1" + - "groups.aws_rds.0 == '{{ instance_id }}'" + + - name: remove mariadb instance + rds_instance: + state: absent + engine: mariadb + skip_final_snapshot: yes + instance_id: '{{ instance_id }}' + + - meta: refresh_inventory + + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_rds' in groups" + - "not groups.aws_rds" + + always: + + - name: remove mariadb instance + rds_instance: + state: absent + engine: mariadb + skip_final_snapshot: yes + instance_id: '{{ instance_id }}' + ignore_errors: yes + when: setup_instance is defined diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml new file mode 100644 index 000000000..1f59e683b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml @@ -0,0 +1,65 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: no + environment: "{{ ansible_test.environment }}" + collections: + - amazon.aws + - community.aws + tasks: + + - module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + - set_fact: + instance_id: "{{ resource_prefix }}-mariadb" + + - name: create minimal mariadb instance in default VPC and default subnet group + rds_instance: + state: present + engine: mariadb + db_instance_class: db.t2.micro + allocated_storage: 20 + instance_id: '{{ resource_prefix }}-mariadb' + master_username: 'ansibletestuser' + master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}' + tags: + workload_type: other + register: setup_instance + + - meta: refresh_inventory + - debug: var=groups + + - name: 'generate expected group name based off the db parameter groups' + vars: + parameter_group_name: '{{ setup_instance.db_parameter_groups[0].db_parameter_group_name }}' + set_fact: + parameter_group_key: 'rds_parameter_group_{{ parameter_group_name | replace(".", "_") }}' + + - name: assert the keyed groups from constructed config were added to inventory + assert: + that: + # There are 6 groups: all, ungrouped, aws_rds, tag keyed group, engine keyed group, parameter group keyed group + - "groups | length == 6" + - '"all" in groups' + - '"ungrouped" in groups' + - '"aws_rds" in groups' + - '"tag_workload_type_other" in groups' + - '"rds_mariadb" in groups' + - 'parameter_group_key in groups' + + always: + + - name: remove mariadb instance + rds_instance: + state: absent + engine: mariadb + skip_final_snapshot: yes + instance_id: '{{ instance_id }}' + ignore_errors: yes + when: setup_instance is defined diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml new file mode 100644 index 000000000..519aa5b28 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml @@ -0,0 +1,67 @@ +- name: test updating inventory + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + collections: + - amazon.aws + - community.aws + block: + - set_fact: + instance_id: "{{ resource_prefix }}update" + + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_rds' in groups" + - "not groups.aws_rds" + + - name: create minimal mariadb instance in default VPC and default subnet group + rds_instance: + state: present + engine: mariadb + db_instance_class: db.t2.micro + allocated_storage: 20 + instance_id: 'rds-mariadb-{{ resource_prefix }}' + master_username: 'ansibletestuser' + master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}' + tags: + workload_type: other + register: setup_instance + + - meta: refresh_inventory + + - name: assert group was populated with inventory and is no longer empty + assert: + that: + - "'aws_rds' in groups" + - "groups.aws_rds | length == 1" + - "groups.aws_rds.0 == '{{ resource_prefix }}'" + + - name: remove mariadb instance + rds_instance: + state: absent + engine: mariadb + skip_final_snapshot: yes + instance_id: ansible-rds-mariadb-example + + - meta: refresh_inventory + + - name: assert group was populated with inventory but is empty + assert: + that: + - "'aws_rds' in groups" + - "not groups.aws_rds" + + always: + + - name: remove mariadb instance + rds_instance: + state: absent + engine: mariadb + skip_final_snapshot: yes + instance_id: ansible-rds-mariadb-example + ignore_errors: yes + when: setup_instance is defined diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/runme.sh new file mode 100755 index 000000000..c16c083ee --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/runme.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +set -eux + +# ensure test config is empty +ansible-playbook playbooks/empty_inventory_config.yml "$@" + +export ANSIBLE_INVENTORY_ENABLED="amazon.aws.aws_rds" + +# test with default inventory file +ansible-playbook playbooks/test_invalid_aws_rds_inventory_config.yml "$@" + +export ANSIBLE_INVENTORY=test.aws_rds.yml + +# test empty inventory config +ansible-playbook playbooks/test_invalid_aws_rds_inventory_config.yml "$@" + +# generate inventory config and test using it +ansible-playbook playbooks/create_inventory_config.yml "$@" +ansible-playbook playbooks/test_populating_inventory.yml "$@" + +# generate inventory config with caching and test using it +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.j2'" "$@" +ansible-playbook playbooks/populate_cache.yml "$@" +ansible-playbook playbooks/test_inventory_cache.yml "$@" + +# remove inventory cache +rm -r aws_rds_cache_dir/ + +# generate inventory config with constructed features and test using it +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.j2'" "$@" +ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@" + +# generate inventory config with hostvars_prefix features and test using it +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.j2'" -e "inventory_prefix='aws_rds_'" "$@" +ansible-playbook playbooks/test_inventory_with_hostvars_prefix_suffix.yml -e "inventory_prefix='aws_rds_'" "$@" + +# generate inventory config with hostvars_suffix features and test using it +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.j2'" -e "inventory_suffix='_aws_rds'" "$@" +ansible-playbook playbooks/test_inventory_with_hostvars_prefix_suffix.yml -e "inventory_suffix='_aws_rds'" "$@" + +# generate inventory config with hostvars_prefix and hostvars_suffix features and test using it +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.j2'" -e "inventory_prefix='aws_'" -e "inventory_suffix='_rds'" "$@" +ansible-playbook playbooks/test_inventory_with_hostvars_prefix_suffix.yml -e "inventory_prefix='aws_'" -e "inventory_suffix='_rds'" "$@" + +# cleanup inventory config +ansible-playbook playbooks/empty_inventory_config.yml "$@" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory.j2 new file mode 100644 index 000000000..61a659eaa --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory.j2 @@ -0,0 +1,10 @@ +plugin: amazon.aws.aws_rds +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +aws_security_token: '{{ security_token }}' +{% endif %} +regions: + - '{{ aws_region }}' +filters: + db-instance-id: "{{ resource_prefix }}-mariadb" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2 new file mode 100644 index 000000000..6e9c40e90 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2 @@ -0,0 +1,13 @@ +plugin: amazon.aws.aws_rds +cache: True +cache_plugin: jsonfile +cache_connection: aws_rds_cache_dir +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +aws_security_token: '{{ security_token }}' +{% endif %} +regions: + - '{{ aws_region }}' +filters: + db-instance-id: "{{ resource_prefix }}-mariadb" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2 new file mode 100644 index 000000000..c5603ef87 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2 @@ -0,0 +1,17 @@ +plugin: amazon.aws.aws_rds +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +aws_security_token: '{{ security_token }}' +{% endif %} +regions: + - '{{ aws_region }}' +keyed_groups: + - key: 'db_parameter_groups|map(attribute="db_parameter_group_name")' + prefix: rds_parameter_group + - key: tags + prefix: tag + - key: engine + prefix: rds +filters: + db-instance-id: "{{ resource_prefix }}-mariadb" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_hostvars_prefix_suffix.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_hostvars_prefix_suffix.j2 new file mode 100644 index 000000000..1e2ac7af6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_hostvars_prefix_suffix.j2 @@ -0,0 +1,16 @@ +plugin: amazon.aws.aws_rds +aws_access_key_id: '{{ aws_access_key }}' +aws_secret_access_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +aws_security_token: '{{ security_token }}' +{% endif %} +regions: + - '{{ aws_region }}' +{% if inventory_prefix | default(false) %} +hostvars_prefix: '{{ inventory_prefix }}' +{% endif %} +{% if inventory_suffix | default(false) %} +hostvars_suffix: '{{ inventory_suffix }}' +{% endif %} +filters: + db-instance-id: "{{ resource_prefix }}-mariadb" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/test.aws_rds.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/test.aws_rds.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/aliases b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/aliases new file mode 100644 index 000000000..36c332ab4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/aliases @@ -0,0 +1,11 @@ +# Various race conditions - likely needs waiters +# https://github.com/ansible-collections/community.aws/issues/433 +# No KMS supported waiters, and manual waiting for updates didn't fix the issue either. +# Issue likely from AWS side - added waits on updates in integration tests to workaround this. + +# Some KMS operations are just slow +time=10m + +cloud/aws + +kms_key_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/inventory b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/inventory new file mode 100644 index 000000000..a9081eae9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/inventory @@ -0,0 +1,12 @@ +# inventory names shortened down to fit resource name length limits +[tests] +states +grants +modify +tagging +# CI's AWS account doesnot support multi region +# multi_region + +[all:vars] +ansible_connection=local +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/main.yml new file mode 100644 index 000000000..0f248fc01 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/main.yml @@ -0,0 +1,9 @@ +# Beware: most of our tests here are run in parallel. +# To add new tests you'll need to add a new host to the inventory and a matching +# '{{ inventory_hostname }}'.yml file in roles/aws_kms/tasks/ + +- hosts: all + gather_facts: no + strategy: free + roles: + - aws_kms diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/defaults/main.yml new file mode 100644 index 000000000..af2b9609a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/defaults/main.yml @@ -0,0 +1,2 @@ +kms_key_alias: ansible-test-{{ inventory_hostname | replace('_','-') }}{{ tiny_prefix + }} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml new file mode 100644 index 000000000..2dcdcc757 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml @@ -0,0 +1,11 @@ +- name: aws_kms integration tests + collections: + - community.aws + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + - include: ./test_{{ inventory_hostname }}.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_grants.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_grants.yml new file mode 100644 index 000000000..071b36417 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_grants.yml @@ -0,0 +1,350 @@ +- block: + # ============================================================ + # PREPARATION + # + # Get some information about who we are before starting our tests + # we'll need this as soon as we start working on the policies + - name: get ARN of calling user + aws_caller_info: + register: aws_caller_info + - name: create an IAM role that can do nothing + iam_role: + name: '{{ kms_key_alias }}' + state: present + assume_role_policy_document: '{"Version": "2012-10-17", "Statement": {"Action": + "sts:AssumeRole", "Principal": {"Service": "ec2.amazonaws.com"}, "Effect": + "Deny"} }' + register: iam_role_result + - name: create a key + aws_kms: + alias: '{{ kms_key_alias }}' + tags: + Hello: World + state: present + enabled: yes + enable_key_rotation: no + register: key + - name: assert that state is enabled + assert: + that: + - key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + # ------------------------------------------------------------------------------------------ + + - name: Add grant - check mode + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + purge_grants: yes + grants: + - name: test_grant + grantee_principal: '{{ iam_role_result.iam_role.arn }}' + retiring_principal: '{{ aws_caller_info.arn }}' + constraints: + encryption_context_equals: + environment: test + application: testapp + operations: + - Decrypt + - RetireGrant + register: key + check_mode: yes + - name: assert grant would have been added + assert: + that: + - key.changed + + # Roles can take a little while to get ready, pause briefly to give it chance + - wait_for: + timeout: 20 + - name: Add grant + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + purge_grants: yes + grants: + - name: test_grant + grantee_principal: '{{ iam_role_result.iam_role.arn }}' + retiring_principal: '{{ aws_caller_info.arn }}' + constraints: + encryption_context_equals: + environment: test + application: testapp + operations: + - Decrypt + - RetireGrant + register: key + - name: assert grant added + assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 1 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + - name: Sleep to wait for updates to propagate + wait_for: + timeout: 45 + - name: Add grant (idempotence) - check mode + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + purge_grants: yes + grants: + - name: test_grant + grantee_principal: '{{ iam_role_result.iam_role.arn }}' + retiring_principal: '{{ aws_caller_info.arn }}' + constraints: + encryption_context_equals: + environment: test + application: testapp + operations: + - Decrypt + - RetireGrant + register: key + check_mode: yes + - assert: + that: + - not key.changed + + - name: Add grant (idempotence) + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + purge_grants: yes + grants: + - name: test_grant + grantee_principal: '{{ iam_role_result.iam_role.arn }}' + retiring_principal: '{{ aws_caller_info.arn }}' + constraints: + encryption_context_equals: + environment: test + application: testapp + operations: + - Decrypt + - RetireGrant + register: key + - assert: + that: + - not key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 1 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + - name: Add a second grant + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + grants: + - name: another_grant + grantee_principal: '{{ iam_role_result.iam_role.arn }}' + retiring_principal: '{{ aws_caller_info.arn }}' + constraints: + encryption_context_equals: + Environment: second + Application: anotherapp + operations: + - Decrypt + - RetireGrant + register: key + - name: Assert grant added + assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 2 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + - name: Sleep to wait for updates to propagate + wait_for: + timeout: 45 + - name: Add a second grant again + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + grants: + - name: another_grant + grantee_principal: '{{ iam_role_result.iam_role.arn }}' + retiring_principal: '{{ aws_caller_info.arn }}' + constraints: + encryption_context_equals: + Environment: second + Application: anotherapp + operations: + - Decrypt + - RetireGrant + register: key + - name: Assert grant added + assert: + that: + - not key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 2 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + - name: Update the grants with purge_grants set + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + purge_grants: yes + grants: + - name: third_grant + grantee_principal: '{{ iam_role_result.iam_role.arn }}' + retiring_principal: '{{ aws_caller_info.arn }}' + constraints: + encryption_context_equals: + environment: third + application: onemoreapp + operations: + - Decrypt + - RetireGrant + register: key + - name: Assert grants replaced + assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 1 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + - name: Update third grant to change encryption context equals to subset + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + grants: + - name: third_grant + grantee_principal: '{{ iam_role_result.iam_role.arn }}' + retiring_principal: '{{ aws_caller_info.arn }}' + constraints: + encryption_context_subset: + environment: third + application: onemoreapp + operations: + - Decrypt + - RetireGrant + register: key + - name: Assert grants replaced + assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 1 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - "'encryption_context_equals' not in key.grants[0].constraints" + - "'encryption_context_subset' in key.grants[0].constraints" + + always: + # ============================================================ + # CLEAN-UP + - name: finish off by deleting keys + aws_kms: + state: absent + alias: '{{ kms_key_alias }}' + pending_window: 7 + ignore_errors: true + - name: remove the IAM role + iam_role: + name: '{{ kms_key_alias }}' + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_modify.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_modify.yml new file mode 100644 index 000000000..223074a3e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_modify.yml @@ -0,0 +1,279 @@ +- block: + # ============================================================ + # PREPARATION + # + # Get some information about who we are before starting our tests + # we'll need this as soon as we start working on the policies + - name: get ARN of calling user + aws_caller_info: + register: aws_caller_info + - name: create an IAM role that can do nothing + iam_role: + name: '{{ kms_key_alias }}' + state: present + assume_role_policy_document: '{"Version": "2012-10-17", "Statement": {"Action": + "sts:AssumeRole", "Principal": {"Service": "ec2.amazonaws.com"}, "Effect": + "Deny"} }' + register: iam_role_result + - name: create a key + aws_kms: + alias: '{{ kms_key_alias }}' + tags: + Hello: World + state: present + enabled: yes + enable_key_rotation: no + register: key + - name: assert that state is enabled + assert: + that: + - key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + # ------------------------------------------------------------------------------------------ + + - name: Save IDs for later + set_fact: + kms_key_id: '{{ key.key_id }}' + kms_key_arn: '{{ key.key_arn }}' + - name: find facts about the key (by ID) + aws_kms_info: + key_id: '{{ kms_key_id }}' + register: new_key + - name: check that a key was found + assert: + that: + - '"key_id" in new_key.kms_keys[0]' + - new_key.kms_keys[0].key_id | length >= 36 + - not new_key.kms_keys[0].key_id.startswith("arn:aws") + - '"key_arn" in new_key.kms_keys[0]' + - new_key.kms_keys[0].key_arn.endswith(new_key.kms_keys[0].key_id) + - new_key.kms_keys[0].key_arn.startswith("arn:aws") + - new_key.kms_keys[0].key_state == "Enabled" + - new_key.kms_keys[0].enabled == True + - new_key.kms_keys[0].tags | length == 1 + - new_key.kms_keys[0].tags['Hello'] == 'World' + - new_key.kms_keys[0].enable_key_rotation == False + - new_key.kms_keys[0].key_usage == 'ENCRYPT_DECRYPT' + - new_key.kms_keys[0].customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - new_key.kms_keys[0].grants | length == 0 + - new_key.kms_keys[0].key_policies | length == 1 + - new_key.kms_keys[0].key_policies[0].Id == 'key-default-1' + - new_key.kms_keys[0].description == '' + + - name: Update policy - check mode + aws_kms: + key_id: '{{ kms_key_id }}' + policy: "{{ lookup('template', 'console-policy.j2') }}" + register: key + check_mode: yes + - assert: + that: + - key is changed + + - name: Update policy + aws_kms: + key_id: '{{ kms_key_id }}' + policy: "{{ lookup('template', 'console-policy.j2') }}" + register: key + - name: Policy should have been changed + assert: + that: + - key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-consolepolicy-3' + - key.description == '' + + - name: Sleep to wait for updates to propagate + wait_for: + timeout: 45 + - name: Update policy (idempotence) - check mode + aws_kms: + alias: alias/{{ kms_key_alias }} + policy: "{{ lookup('template', 'console-policy.j2') }}" + register: key + check_mode: yes + - assert: + that: + - not key.changed + + - name: Update policy (idempotence) + aws_kms: + alias: alias/{{ kms_key_alias }} + policy: "{{ lookup('template', 'console-policy.j2') }}" + register: key + - assert: + that: + - not key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-consolepolicy-3' + - key.description == '' + + # ------------------------------------------------------------------------------------------ + + - name: Update description - check mode + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + description: test key for testing + register: key + check_mode: yes + - assert: + that: + - key.changed + + - name: Update description + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + description: test key for testing + register: key + - assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-consolepolicy-3' + - key.description == 'test key for testing' + + - name: Sleep to wait for updates to propagate + wait_for: + timeout: 45 + - name: Update description (idempotence) - check mode + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + description: test key for testing + register: key + check_mode: yes + - assert: + that: + - not key.changed + + - name: Update description (idempotence) + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + description: test key for testing + register: key + - assert: + that: + - not key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-consolepolicy-3' + - key.description == 'test key for testing' + + # ------------------------------------------------------------------------------------------ + + - name: update policy to remove access to key rotation status + aws_kms: + alias: alias/{{ kms_key_alias }} + policy: "{{ lookup('template', 'console-policy-no-key-rotation.j2') }}" + register: key + - assert: + that: + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation is none + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-consolepolicy-3' + - key.description == 'test key for testing' + - "'Disable access to key rotation status' in {{ key.key_policies[0].Statement\ + \ | map(attribute='Sid') }}" + + always: + # ============================================================ + # CLEAN-UP + - name: finish off by deleting keys + aws_kms: + state: absent + alias: '{{ kms_key_alias }}' + pending_window: 7 + ignore_errors: true + - name: remove the IAM role + iam_role: + name: '{{ kms_key_alias }}' + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_multi_region.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_multi_region.yml new file mode 100644 index 000000000..c112b4571 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_multi_region.yml @@ -0,0 +1,100 @@ +- block: + # ============================================================ + # PREPARATION + # + # Get some information about who we are before starting our tests + # we'll need this as soon as we start working on the policies + - name: get ARN of calling user + aws_caller_info: + register: aws_caller_info + - name: See whether key exists and its current state + kms_key_info: + alias: '{{ kms_key_alias }}' + - name: create a multi region key - check mode + kms_key: + alias: '{{ kms_key_alias }}-check' + tags: + Hello: World + state: present + multi_region: True + enabled: yes + register: key_check + check_mode: yes + - name: find facts about the check mode key + kms_key_info: + alias: '{{ kms_key_alias }}-check' + register: check_key + - name: ensure that check mode worked as expected + assert: + that: + - check_key.kms_keys | length == 0 + - key_check is changed + + - name: create a multi region key + kms_key: + alias: '{{ kms_key_alias }}' + tags: + Hello: World + state: present + enabled: yes + multi_region: True + enable_key_rotation: no + register: key + - name: assert that state is enabled + assert: + that: + - key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - key.multi_region == True + + - name: Sleep to wait for updates to propagate + wait_for: + timeout: 45 + + - name: create a key (expect failure) + kms_key: + alias: '{{ kms_key_alias }}' + tags: + Hello: World + state: present + enabled: yes + multi_region: True + register: result + ignore_errors: True + + - assert: + that: + - result is failed + - result.msg != "MODULE FAILURE" + - result.changed == False + - '"You cannot change the multi-region property on an existing key." in result.msg' + + always: + # ============================================================ + # CLEAN-UP + - name: finish off by deleting keys + kms_key: + state: absent + alias: '{{ item }}' + pending_window: 7 + ignore_errors: true + loop: + - '{{ kms_key_alias }}' + - '{{ kms_key_alias }}-diff-spec-usage' + - '{{ kms_key_alias }}-check' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_states.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_states.yml new file mode 100644 index 000000000..917410c50 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_states.yml @@ -0,0 +1,522 @@ +- block: + # ============================================================ + # PREPARATION + # + # Get some information about who we are before starting our tests + # we'll need this as soon as we start working on the policies + - name: get ARN of calling user + aws_caller_info: + register: aws_caller_info + - name: See whether key exists and its current state + aws_kms_info: + alias: '{{ kms_key_alias }}' + - name: create a key - check mode + aws_kms: + alias: '{{ kms_key_alias }}-check' + tags: + Hello: World + state: present + enabled: yes + register: key_check + check_mode: yes + - name: find facts about the check mode key + aws_kms_info: + alias: '{{ kms_key_alias }}-check' + register: check_key + - name: ensure that check mode worked as expected + assert: + that: + - check_key.kms_keys | length == 0 + - key_check is changed + + - name: create a key + aws_kms: + alias: '{{ kms_key_alias }}' + tags: + Hello: World + state: present + enabled: yes + enable_key_rotation: no + register: key + - name: assert that state is enabled + assert: + that: + - key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - key.multi_region == False + + - name: Sleep to wait for updates to propagate + wait_for: + timeout: 45 + - name: create a key (idempotence) - check mode + aws_kms: + alias: '{{ kms_key_alias }}' + tags: + Hello: World + state: present + enabled: yes + register: key + check_mode: yes + - assert: + that: + - key is not changed + + - name: create a key (idempotence) + aws_kms: + alias: '{{ kms_key_alias }}' + tags: + Hello: World + state: present + enabled: yes + register: key + check_mode: yes + - assert: + that: + - key is not changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - key.multi_region == False + + # ------------------------------------------------------------------------------------------ + + - name: Save IDs for later + set_fact: + kms_key_id: '{{ key.key_id }}' + kms_key_arn: '{{ key.key_arn }}' + - name: Enable key rotation - check mode + aws_kms: + alias: '{{ kms_key_alias }}' + tags: + Hello: World + state: present + enabled: yes + enable_key_rotation: yes + register: key + check_mode: yes + - assert: + that: + - key.changed + + - name: Enable key rotation + aws_kms: + alias: '{{ kms_key_alias }}' + tags: + Hello: World + state: present + enabled: yes + enable_key_rotation: yes + register: key + - name: assert that key rotation is enabled + assert: + that: + - key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == True + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + - name: Sleep to wait for updates to propagate + wait_for: + timeout: 45 + - name: Enable key rotation (idempotence) - check mode + aws_kms: + alias: '{{ kms_key_alias }}' + tags: + Hello: World + state: present + enabled: yes + enable_key_rotation: yes + register: key + check_mode: yes + - assert: + that: + - not key.changed + + - name: Enable key rotation (idempotence) + aws_kms: + alias: '{{ kms_key_alias }}' + tags: + Hello: World + state: present + enabled: yes + enable_key_rotation: yes + register: key + - assert: + that: + - not key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == True + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + # ------------------------------------------------------------------------------------------ + + - name: Disable key - check mode + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + enabled: no + register: key + check_mode: yes + - assert: + that: + - key.changed + + - name: Disable key + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + enabled: no + register: key + - name: assert that state is disabled + assert: + that: + - key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Disabled" + - key.enabled == False + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == True + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + - name: Sleep to wait for updates to propagate + wait_for: + timeout: 45 + - name: Disable key (idempotence) - check mode + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + enabled: no + register: key + check_mode: yes + - assert: + that: + - not key.changed + + - name: Disable key (idempotence) + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + enabled: no + register: key + - assert: + that: + - not key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Disabled" + - key.enabled == False + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == True + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + # ------------------------------------------------------------------------------------------ + + - name: Delete key - check mode + aws_kms: + alias: '{{ kms_key_alias }}' + state: absent + register: key + check_mode: yes + - assert: + that: + - key is changed + + - name: Delete key + aws_kms: + alias: '{{ kms_key_alias }}' + state: absent + register: key + - name: Sleep to wait for updates to propagate + wait_for: + timeout: 45 + - name: Assert that state is pending deletion + vars: + now_time: '{{ lookup("pipe", "date -u +%Y-%m-%d\ %H:%M:%S") }}' + deletion_time: '{{ key.deletion_date[:19] | to_datetime("%Y-%m-%dT%H:%M:%S") + }}' + assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "PendingDeletion" + - key.enabled == False + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == False + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + # Times won't be perfect, allow a 24 hour window + - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days <= 30 + - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days >= 29 + + - name: Delete key (idempotence) - check mode + aws_kms: + alias: '{{ kms_key_alias }}' + state: absent + register: key + check_mode: yes + - assert: + that: + - not key.changed + + - name: Delete key (idempotence) + aws_kms: + alias: '{{ kms_key_alias }}' + state: absent + register: key + - vars: + now_time: '{{ lookup("pipe", "date -u +%Y-%m-%d\ %H:%M:%S") }}' + deletion_time: '{{ key.deletion_date[:19] | to_datetime("%Y-%m-%dT%H:%M:%S") + }}' + assert: + that: + - not key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "PendingDeletion" + - key.enabled == False + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == False + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + # Times won't be perfect, allow a 24 hour window + - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days <= 30 + - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days >= 29 + + # ------------------------------------------------------------------------------------------ + + - name: Cancel key deletion - check mode + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + register: key + check_mode: yes + - assert: + that: + - key.changed + + - name: Cancel key deletion + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + register: key + - assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == True + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - "'deletion_date' not in key" + + - name: Sleep to wait for updates to propagate + wait_for: + timeout: 45 + - name: Cancel key deletion (idempotence) - check mode + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + register: key + check_mode: yes + - assert: + that: + - not key.changed + + - name: Cancel key deletion (idempotence) + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + register: key + - assert: + that: + - not key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == True + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - "'deletion_date' not in key" + + # ------------------------------------------------------------------------------------------ + + - name: delete the key with a specific deletion window + aws_kms: + alias: '{{ kms_key_alias }}' + state: absent + pending_window: 7 + register: delete_kms + - name: Sleep to wait for updates to propagate + wait_for: + timeout: 45 + - name: assert that state is pending deletion + vars: + now_time: '{{ lookup("pipe", "date -u +%Y-%m-%d\ %H:%M:%S") }}' + deletion_time: '{{ delete_kms.deletion_date[:19] | to_datetime("%Y-%m-%dT%H:%M:%S") + }}' + assert: + that: + - delete_kms.key_state == "PendingDeletion" + - delete_kms.changed + # Times won't be perfect, allow a 24 hour window + - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days <= 7 + - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days >= 6 + + # ============================================================ + # test different key usage and specs + - name: create kms key with different specs + aws_kms: + alias: '{{ kms_key_alias }}-diff-spec-usage' + purge_grants: yes + key_spec: ECC_NIST_P256 + key_usage: SIGN_VERIFY + register: create_diff_kms + - name: Sleep to wait for updates to propagate + wait_for: + timeout: 45 + - name: verify different specs on kms key + assert: + that: + - '"key_id" in create_diff_kms' + - create_diff_kms.key_id | length >= 36 + - not create_diff_kms.key_id.startswith("arn:aws") + - '"key_arn" in create_diff_kms' + - create_diff_kms.key_arn.endswith(create_diff_kms.key_id) + - create_diff_kms.key_arn.startswith("arn:aws") + - create_diff_kms.key_usage == 'SIGN_VERIFY' + - create_diff_kms.customer_master_key_spec == 'ECC_NIST_P256' + + always: + # ============================================================ + # CLEAN-UP + - name: finish off by deleting keys + aws_kms: + state: absent + alias: '{{ item }}' + pending_window: 7 + ignore_errors: true + loop: + - '{{ kms_key_alias }}' + - '{{ kms_key_alias }}-diff-spec-usage' + - '{{ kms_key_alias }}-check' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_tagging.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_tagging.yml new file mode 100644 index 000000000..7d53b1dad --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_tagging.yml @@ -0,0 +1,187 @@ +- block: + # ============================================================ + # PREPARATION + # + # Get some information about who we are before starting our tests + # we'll need this as soon as we start working on the policies + - name: get ARN of calling user + aws_caller_info: + register: aws_caller_info + - name: create a key + aws_kms: + alias: '{{ kms_key_alias }}' + tags: + Hello: World + state: present + enabled: yes + enable_key_rotation: no + register: key + - name: assert that state is enabled + assert: + that: + - key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + # ------------------------------------------------------------------------------------------ + + - name: Tag encryption key + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + tags: + tag_one: tag_one + tag_two: tag_two + purge_tags: no + register: key + - name: Assert tags added + assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 3 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - "'tag_one' in key.tags" + - "'tag_two' in key.tags" + + - name: Sleep to wait for updates to propagate + wait_for: + timeout: 45 + - name: Modify tags - check mode + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + purge_tags: yes + tags: + tag_two: tag_two_updated + Tag Three: '{{ resource_prefix }}' + register: key + check_mode: yes + - assert: + that: + - key.changed + + - name: Modify tags + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + purge_tags: yes + tags: + tag_two: tag_two_updated + Tag Three: '{{ resource_prefix }}' + register: key + - name: Assert tags correctly changed + assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 2 + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - "'tag_one' not in key.tags" + - "'tag_two' in key.tags" + - key.tags.tag_two == 'tag_two_updated' + - "'Tag Three' in key.tags" + - key.tags['Tag Three'] == resource_prefix + + - name: Sleep to wait for updates to propagate + wait_for: + timeout: 45 + - name: Modify tags (idempotence) - check mode + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + purge_tags: yes + tags: + tag_two: tag_two_updated + Tag Three: '{{ resource_prefix }}' + register: key + check_mode: yes + - assert: + that: + - not key.changed + + - name: Modify tags (idempotence) + aws_kms: + alias: '{{ kms_key_alias }}' + state: present + purge_tags: yes + tags: + tag_two: tag_two_updated + Tag Three: '{{ resource_prefix }}' + register: key + - assert: + that: + - not key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 2 + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - "'tag_one' not in key.tags" + - "'tag_two' in key.tags" + - key.tags.tag_two == 'tag_two_updated' + - "'Tag Three' in key.tags" + - key.tags['Tag Three'] == resource_prefix + + always: + # ============================================================ + # CLEAN-UP + - name: finish off by deleting keys + aws_kms: + state: absent + alias: '{{ kms_key_alias }}' + pending_window: 7 + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy-no-key-rotation.j2 b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy-no-key-rotation.j2 new file mode 100644 index 000000000..0e019d202 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy-no-key-rotation.j2 @@ -0,0 +1,81 @@ +{ + "Id": "key-consolepolicy-3", + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Enable IAM User Permissions", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::{{ aws_caller_info.account }}:root" + }, + "Action": "kms:*", + "Resource": "*" + }, + { + "Sid": "Allow access for Key Administrators", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:Create*", + "kms:Describe*", + "kms:Enable*", + "kms:List*", + "kms:Put*", + "kms:Update*", + "kms:Revoke*", + "kms:Disable*", + "kms:Get*", + "kms:Delete*", + "kms:TagResource", + "kms:UntagResource", + "kms:ScheduleKeyDeletion", + "kms:CancelKeyDeletion" + ], + "Resource": "*" + }, + { + "Sid": "Allow use of the key", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:Encrypt", + "kms:Decrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:DescribeKey" + ], + "Resource": "*" + }, + { + "Sid": "Allow attachment of persistent resources", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:CreateGrant", + "kms:ListGrants", + "kms:RevokeGrant" + ], + "Resource": "*", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": "true" + } + } + }, + { + "Sid": "Disable access to key rotation status", + "Effect": "Deny", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": "kms:GetKeyRotationStatus", + "Resource": "*" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy.j2 new file mode 100644 index 000000000..4b60ba588 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy.j2 @@ -0,0 +1,72 @@ +{ + "Id": "key-consolepolicy-3", + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Enable IAM User Permissions", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::{{ aws_caller_info.account }}:root" + }, + "Action": "kms:*", + "Resource": "*" + }, + { + "Sid": "Allow access for Key Administrators", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:Create*", + "kms:Describe*", + "kms:Enable*", + "kms:List*", + "kms:Put*", + "kms:Update*", + "kms:Revoke*", + "kms:Disable*", + "kms:Get*", + "kms:Delete*", + "kms:TagResource", + "kms:UntagResource", + "kms:ScheduleKeyDeletion", + "kms:CancelKeyDeletion" + ], + "Resource": "*" + }, + { + "Sid": "Allow use of the key", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:Encrypt", + "kms:Decrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:DescribeKey" + ], + "Resource": "*" + }, + { + "Sid": "Allow attachment of persistent resources", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:CreateGrant", + "kms:ListGrants", + "kms:RevokeGrant" + ], + "Resource": "*", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": "true" + } + } + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/runme.sh new file mode 100755 index 000000000..5b5b69fbd --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/runme.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# +# Beware: most of our tests here are run in parallel. +# To add new tests you'll need to add a new host to the inventory and a matching +# '{{ inventory_hostname }}'.yml file in roles/aws_kms/tasks/ + + +set -eux + +export ANSIBLE_ROLES_PATH=../ + +ansible-playbook main.yml -i inventory "$@" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lambda/aliases new file mode 100644 index 000000000..f6bf003fe --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/aliases @@ -0,0 +1,4 @@ +cloud/aws + +lambda_execute +lambda_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda/defaults/main.yml new file mode 100644 index 000000000..63414fbfd --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/defaults/main.yml @@ -0,0 +1,13 @@ +# defaults file for lambda integration test +# IAM role names have to be less than 64 characters +# we hash the resource_prefix to get a shorter, unique string +lambda_function_name: '{{ tiny_prefix }}' +lambda_role_name: ansible-test-{{ tiny_prefix }}-lambda + +lambda_python_runtime: python3.9 +lambda_python_handler: mini_lambda.handler +lambda_python_layers_names: + - "{{ tiny_prefix }}-layer-01" + - "{{ tiny_prefix }}-layer-02" +lambda_function_name_with_layer: '{{ tiny_prefix }}-func-with-layer' +lambda_function_name_with_multiple_layer: '{{ tiny_prefix }}-func-with-mutiplelayer' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/files/mini_lambda.py b/ansible_collections/amazon/aws/tests/integration/targets/lambda/files/mini_lambda.py new file mode 100644 index 000000000..901f6b55a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/files/mini_lambda.py @@ -0,0 +1,48 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os + + +def handler(event, context): + """ + The handler function is the function which gets called each time + the lambda is run. + """ + # printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find + # the log entry. + print("got event:\n" + json.dumps(event)) + + # if the name parameter isn't present this can throw an exception + # which will result in an amazon chosen failure from the lambda + # which can be completely fine. + + name = event["name"] + + # we can use environment variables as part of the configuration of the lambda + # which can change the behaviour of the lambda without needing a new upload + + extra = os.environ.get("EXTRA_MESSAGE") + if extra is not None and len(extra) > 0: + greeting = "hello {0}. {1}".format(name, extra) + else: + greeting = "hello " + name + + return {"message": greeting} + + +def main(): + """ + This main function will normally never be called during normal + lambda use. It is here for testing the lambda program only. + """ + event = {"name": "james"} + context = None + print(handler(event, context)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/files/minimal_trust_policy.json b/ansible_collections/amazon/aws/tests/integration/targets/lambda/files/minimal_trust_policy.json new file mode 100644 index 000000000..fb84ae9de --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/files/minimal_trust_policy.json @@ -0,0 +1,12 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda/meta/main.yml new file mode 100644 index 000000000..409583a2c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/meta/main.yml @@ -0,0 +1,5 @@ +dependencies: +- role: setup_botocore_pip + vars: + botocore_version: 1.21.51 +- role: setup_remote_tmp_dir diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml new file mode 100644 index 000000000..443a8327f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml @@ -0,0 +1,788 @@ +- name: set connection information for AWS modules and run tests + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + collections: + - community.general + block: + # Preparation + - name: create minimal lambda role + iam_role: + name: '{{ lambda_role_name }}' + assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json") + }}' + create_instance_profile: false + managed_policies: + - arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess + register: iam_role + - name: wait 10 seconds for role to become available + pause: + seconds: 10 + when: iam_role.changed + - name: move lambda into place for archive module + copy: + src: mini_lambda.py + dest: '{{ output_dir }}/mini_lambda.py' + mode: preserve + - name: bundle lambda into a zip + register: zip_res + archive: + format: zip + path: '{{ output_dir }}/mini_lambda.py' + dest: '{{ output_dir }}/mini_lambda.zip' + + # Parameter tests + - name: test with no parameters + lambda: + register: result + ignore_errors: true + - name: assert failure when called with no parameters + assert: + that: + - result.failed + - 'result.msg.startswith("missing required arguments: ")' + - '"name" in result.msg' + + - name: test with no parameters except state absent + lambda: + state: absent + register: result + ignore_errors: true + - name: assert failure when called with no parameters + assert: + that: + - result.failed + - 'result.msg.startswith("missing required arguments: name")' + + - name: test with no role or handler + lambda: + name: ansible-testing-fake-should-not-be-created + runtime: '{{ lambda_python_runtime }}' + register: result + ignore_errors: true + - name: assert failure when called with no parameters + assert: + that: + - result.failed + - 'result.msg.startswith("state is present but all of the following are missing: + ")' + - '"handler" in result.msg' + - '"role" in result.msg' + + - name: test execute lambda with no function arn or name + execute_lambda: + register: result + ignore_errors: true + - name: assert failure when called with no parameters + assert: + that: + - result.failed + - "result.msg == 'one of the following is required: name, function_arn'" + + - name: test state=present with security group but no vpc + lambda: + name: '{{ lambda_function_name }}' + runtime: '{{ lambda_python_runtime }}' + role: '{{ lambda_role_name }}' + zip_file: '{{ zip_res.dest }}' + handler: '{{ omit }}' + description: '{{ omit }}' + vpc_subnet_ids: '{{ omit }}' + vpc_security_group_ids: sg-FA6E + environment_variables: '{{ omit }}' + dead_letter_arn: '{{ omit }}' + register: result + ignore_errors: true + - name: assert lambda fails with proper message + assert: + that: + - result is failed + - result.msg != "MODULE FAILURE" + - result.changed == False + - '"parameters are required together" in result.msg' + + - name: test state=present with incomplete layers + lambda: + name: '{{ lambda_function_name }}' + runtime: '{{ lambda_python_runtime }}' + role: '{{ lambda_role_name }}' + handler: mini_lambda.handler + zip_file: '{{ zip_res.dest }}' + layers: + - layer_name: test-layer + check_mode: true + register: result + ignore_errors: true + - name: assert lambda fails with proper message + assert: + that: + - result is failed + - result is not changed + - '"parameters are required together: layer_name, version found in layers" in result.msg' + + - name: test state=present with incomplete layers + lambda: + name: '{{ lambda_function_name }}' + runtime: '{{ lambda_python_runtime }}' + role: '{{ lambda_role_name }}' + handler: mini_lambda.handler + zip_file: '{{ zip_res.dest }}' + layers: + - layer_version_arn: 'arn:aws:lambda:us-east-2:123456789012:layer:blank-java-lib:7' + version: 9 + check_mode: true + register: result + ignore_errors: true + - name: assert lambda fails with proper message + assert: + that: + - result is failed + - result is not changed + - '"parameters are mutually exclusive: version|layer_version_arn found in layers" in result.msg' + + # Prepare minimal Lambda + - name: test state=present - upload the lambda (check mode) + lambda: + name: '{{ lambda_function_name }}' + runtime: '{{ lambda_python_runtime }}' + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + zip_file: '{{ zip_res.dest }}' + architecture: arm64 + vars: + ansible_python_interpreter: '{{ botocore_virtualenv_interpreter }}' + register: result + check_mode: yes + - name: assert lambda upload succeeded + assert: + that: + - result.changed + + - name: test state=present - upload the lambda + lambda: + name: '{{ lambda_function_name }}' + runtime: '{{ lambda_python_runtime }}' + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + zip_file: '{{ zip_res.dest }}' + architecture: arm64 + vars: + ansible_python_interpreter: '{{ botocore_virtualenv_interpreter }}' + register: result + - name: assert lambda upload succeeded + assert: + that: + - result.changed + - result.configuration.tracing_config.mode == "PassThrough" + - result.configuration.architectures == ['arm64'] + + - include_tasks: tagging.yml + + # Test basic operation of Uploaded lambda + - name: test lambda works (check mode) + execute_lambda: + name: '{{lambda_function_name}}' + payload: + name: Mr Ansible Tests + register: result + check_mode: yes + - name: assert check mode works correctly + assert: + that: + - result.changed + - "'result' not in result" + + - name: test lambda works + execute_lambda: + name: '{{lambda_function_name}}' + payload: + name: Mr Ansible Tests + register: result + - name: assert lambda manages to respond as expected + assert: + that: + - result is not failed + - result.result.output.message == "hello Mr Ansible Tests" + + # Test updating Lambda + - name: test lambda config updates (check mode) + lambda: + name: '{{lambda_function_name}}' + runtime: nodejs14.x + tracing_mode: Active + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + tags: + CamelCase: ACamelCaseValue + snake_case: a_snake_case_value + Spaced key: A value with spaces + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is not failed + - update_result.changed == True + + - name: test lambda config updates + lambda: + name: '{{lambda_function_name}}' + runtime: nodejs14.x + tracing_mode: Active + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + tags: + CamelCase: ACamelCaseValue + snake_case: a_snake_case_value + Spaced key: A value with spaces + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is not failed + - update_result.changed == True + - update_result.configuration.runtime == 'nodejs14.x' + - update_result.configuration.tracing_config.mode == 'Active' + + - name: test no changes are made with the same parameters repeated (check mode) + lambda: + name: '{{lambda_function_name}}' + runtime: nodejs14.x + tracing_mode: Active + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + tags: + CamelCase: ACamelCaseValue + snake_case: a_snake_case_value + Spaced key: A value with spaces + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is not failed + - update_result.changed == False + + - name: test no changes are made with the same parameters repeated + lambda: + name: '{{lambda_function_name}}' + runtime: nodejs14.x + tracing_mode: Active + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + tags: + CamelCase: ACamelCaseValue + snake_case: a_snake_case_value + Spaced key: A value with spaces + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is not failed + - update_result.changed == False + - update_result.configuration.runtime == 'nodejs14.x' + - update_result.configuration.tracing_config.mode == 'Active' + + - name: reset config updates for the following tests + lambda: + name: '{{lambda_function_name}}' + runtime: '{{ lambda_python_runtime }}' + tracing_mode: PassThrough + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + register: result + - name: assert that reset succeeded + assert: + that: + - result is not failed + - result.changed == True + - result.configuration.runtime == lambda_python_runtime + - result.configuration.tracing_config.mode == 'PassThrough' + + # Test lambda_info + - name: lambda_info | Gather all infos for all lambda functions + lambda_info: + query: all + register: lambda_infos_all + check_mode: yes + vars: + ansible_python_interpreter: '{{ botocore_virtualenv_interpreter }}' + - name: lambda_info | Assert successfull retrieval of all information 1 + vars: + lambda_info: "{{ lambda_infos_all.functions | selectattr('function_name', 'eq', lambda_function_name) | first }}" + assert: + that: + - lambda_infos_all is not failed + - lambda_infos_all.functions | length > 0 + - lambda_infos_all.functions | selectattr('function_name', 'eq', lambda_function_name) | length == 1 + - lambda_info.runtime == lambda_python_runtime + - lambda_info.description == "" + - lambda_info.function_arn is defined + - lambda_info.handler == lambda_python_handler + - lambda_info.versions is defined + - lambda_info.aliases is defined + - lambda_info.policy is defined + - lambda_info.mappings is defined + - lambda_info.tags is defined + - lambda_info.architectures == ['arm64'] + + - name: lambda_info | Ensure default query value is 'config' when function name + omitted + lambda_info: + register: lambda_infos_query_config + check_mode: yes + - name: lambda_info | Assert successfull retrieval of all information 2 + vars: + lambda_info: "{{ lambda_infos_query_config.functions | selectattr('function_name', 'eq', lambda_function_name) | first }}" + assert: + that: + - lambda_infos_query_config is not failed + - lambda_infos_query_config.functions | length > 0 + - lambda_infos_query_config.functions | selectattr('function_name', 'eq', lambda_function_name) | length == 1 + - lambda_info.runtime == lambda_python_runtime + - lambda_info.description == "" + - lambda_info.function_arn is defined + - lambda_info.handler == lambda_python_handler + - lambda_info.versions is not defined + - lambda_info.aliases is not defined + - lambda_info.policy is not defined + - lambda_info.mappings is not defined + - lambda_info.tags is not defined + + - name: lambda_info | Ensure default query value is 'all' when function name specified + lambda_info: + name: '{{ lambda_function_name }}' + register: lambda_infos_query_all + - name: lambda_info | Assert successfull retrieval of all information 3 + assert: + that: + - lambda_infos_query_all is not failed + - lambda_infos_query_all.functions | length == 1 + - lambda_infos_query_all.functions[0].versions|length > 0 + - lambda_infos_query_all.functions[0].function_name is defined + - lambda_infos_query_all.functions[0].policy is defined + - lambda_infos_query_all.functions[0].aliases is defined + - lambda_infos_query_all.functions[0].mappings is defined + - lambda_infos_query_all.functions[0].tags is defined + + - name: lambda_info | Gather version infos for given lambda function + lambda_info: + name: '{{ lambda_function_name }}' + query: versions + register: lambda_infos_versions + - name: lambda_info | Assert successfull retrieval of versions information + assert: + that: + - lambda_infos_versions is not failed + - lambda_infos_versions.functions | length == 1 + - lambda_infos_versions.functions[0].versions|length > 0 + - lambda_infos_versions.functions[0].function_name == lambda_function_name + - lambda_infos_versions.functions[0].policy is undefined + - lambda_infos_versions.functions[0].aliases is undefined + - lambda_infos_versions.functions[0].mappings is undefined + - lambda_infos_versions.functions[0].tags is undefined + + - name: lambda_info | Gather config infos for given lambda function + lambda_info: + name: '{{ lambda_function_name }}' + query: config + register: lambda_infos_config + - name: lambda_info | Assert successfull retrieval of config information + assert: + that: + - lambda_infos_config is not failed + - lambda_infos_config.functions | length == 1 + - lambda_infos_config.functions[0].function_name == lambda_function_name + - lambda_infos_config.functions[0].description is defined + - lambda_infos_config.functions[0].versions is undefined + - lambda_infos_config.functions[0].policy is undefined + - lambda_infos_config.functions[0].aliases is undefined + - lambda_infos_config.functions[0].mappings is undefined + - lambda_infos_config.functions[0].tags is undefined + + - name: lambda_info | Gather policy infos for given lambda function + lambda_info: + name: '{{ lambda_function_name }}' + query: policy + register: lambda_infos_policy + - name: lambda_info | Assert successfull retrieval of policy information + assert: + that: + - lambda_infos_policy is not failed + - lambda_infos_policy.functions | length == 1 + - lambda_infos_policy.functions[0].policy is defined + - lambda_infos_policy.functions[0].versions is undefined + - lambda_infos_policy.functions[0].function_name == lambda_function_name + - lambda_infos_policy.functions[0].aliases is undefined + - lambda_infos_policy.functions[0].mappings is undefined + - lambda_infos_policy.functions[0].tags is undefined + + - name: lambda_info | Gather aliases infos for given lambda function + lambda_info: + name: '{{ lambda_function_name }}' + query: aliases + register: lambda_infos_aliases + - name: lambda_info | Assert successfull retrieval of aliases information + assert: + that: + - lambda_infos_aliases is not failed + - lambda_infos_aliases.functions | length == 1 + - lambda_infos_aliases.functions[0].aliases is defined + - lambda_infos_aliases.functions[0].versions is undefined + - lambda_infos_aliases.functions[0].function_name == lambda_function_name + - lambda_infos_aliases.functions[0].policy is undefined + - lambda_infos_aliases.functions[0].mappings is undefined + - lambda_infos_aliases.functions[0].tags is undefined + + - name: lambda_info | Gather mappings infos for given lambda function + lambda_info: + name: '{{ lambda_function_name }}' + query: mappings + register: lambda_infos_mappings + - name: lambda_info | Assert successfull retrieval of mappings information + assert: + that: + - lambda_infos_mappings is not failed + - lambda_infos_mappings.functions | length == 1 + - lambda_infos_mappings.functions[0].mappings is defined + - lambda_infos_mappings.functions[0].versions is undefined + - lambda_infos_mappings.functions[0].function_name == lambda_function_name + - lambda_infos_mappings.functions[0].aliases is undefined + - lambda_infos_mappings.functions[0].policy is undefined + - lambda_infos_mappings.functions[0].tags is undefined + + # More Lambda update tests + - name: test state=present with all nullable variables explicitly set to null + lambda: + name: '{{lambda_function_name}}' + runtime: '{{ lambda_python_runtime }}' + role: '{{ lambda_role_name }}' + zip_file: '{{zip_res.dest}}' + handler: '{{ lambda_python_handler }}' + description: + vpc_subnet_ids: + vpc_security_group_ids: + environment_variables: + dead_letter_arn: + register: result + - name: assert lambda remains as before + assert: + that: + - result is not failed + - result.changed == False + + - name: test putting an environment variable changes lambda (check mode) + lambda: + name: '{{lambda_function_name}}' + runtime: '{{ lambda_python_runtime }}' + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + zip_file: '{{zip_res.dest}}' + environment_variables: + EXTRA_MESSAGE: I think you are great!! + register: result + check_mode: yes + - name: assert lambda upload succeeded + assert: + that: + - result is not failed + - result.changed == True + + - name: test putting an environment variable changes lambda + lambda: + name: '{{lambda_function_name}}' + runtime: '{{ lambda_python_runtime }}' + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + zip_file: '{{zip_res.dest}}' + environment_variables: + EXTRA_MESSAGE: I think you are great!! + register: result + - name: assert lambda upload succeeded + assert: + that: + - result is not failed + - result.changed == True + - result.configuration.environment.variables.extra_message == "I think you are + great!!" + + - name: test lambda works + execute_lambda: + name: '{{lambda_function_name}}' + payload: + name: Mr Ansible Tests + register: result + - name: assert lambda manages to respond as expected + assert: + that: + - result is not failed + - result.result.output.message == "hello Mr Ansible Tests. I think you are great!!" + + # Deletion behavious + - name: test state=absent (expect changed=True) (check mode) + lambda: + name: '{{lambda_function_name}}' + state: absent + register: result + check_mode: yes + + - name: assert state=absent + assert: + that: + - result is not failed + - result is changed + + - name: test state=absent (expect changed=True) + lambda: + name: '{{lambda_function_name}}' + state: absent + register: result + + - name: assert state=absent + assert: + that: + - result is not failed + - result is changed + + - name: test state=absent (expect changed=False) when already deleted (check mode) + lambda: + name: '{{lambda_function_name}}' + state: absent + register: result + check_mode: yes + + - name: assert state=absent + assert: + that: + - result is not failed + - result is not changed + + - name: test state=absent (expect changed=False) when already deleted + lambda: + name: '{{lambda_function_name}}' + state: absent + register: result + + - name: assert state=absent + assert: + that: + - result is not failed + - result is not changed + + # Parallel creations and deletions + - name: parallel lambda creation 1/4 + lambda: + name: '{{lambda_function_name}}_1' + runtime: '{{ lambda_python_runtime }}' + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + zip_file: '{{zip_res.dest}}' + async: 1000 + register: async_1 + - name: parallel lambda creation 2/4 + lambda: + name: '{{lambda_function_name}}_2' + runtime: '{{ lambda_python_runtime }}' + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + zip_file: '{{zip_res.dest}}' + async: 1000 + register: async_2 + - name: parallel lambda creation 3/4 + lambda: + name: '{{lambda_function_name}}_3' + runtime: '{{ lambda_python_runtime }}' + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + zip_file: '{{zip_res.dest}}' + async: 1000 + register: async_3 + - name: parallel lambda creation 4/4 + lambda: + name: '{{lambda_function_name}}_4' + runtime: '{{ lambda_python_runtime }}' + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + zip_file: '{{zip_res.dest}}' + register: result + - name: assert lambda manages to respond as expected + assert: + that: + - result is not failed + - name: parallel lambda deletion 1/4 + lambda: + name: '{{lambda_function_name}}_1' + state: absent + zip_file: '{{zip_res.dest}}' + async: 1000 + register: async_1 + - name: parallel lambda deletion 2/4 + lambda: + name: '{{lambda_function_name}}_2' + state: absent + zip_file: '{{zip_res.dest}}' + async: 1000 + register: async_2 + - name: parallel lambda deletion 3/4 + lambda: + name: '{{lambda_function_name}}_3' + state: absent + zip_file: '{{zip_res.dest}}' + async: 1000 + register: async_3 + - name: parallel lambda deletion 4/4 + lambda: + name: '{{lambda_function_name}}_4' + state: absent + zip_file: '{{zip_res.dest}}' + register: result + - name: assert lambda creation has succeeded + assert: + that: + - result is not failed + + # Test creation with layers + - name: Create temporary directory for testing + tempfile: + suffix: lambda + state: directory + register: test_dir + + - name: Create python directory for lambda layer + file: + path: "{{ remote_tmp_dir }}/python" + state: directory + + - name: Create lambda layer library + copy: + content: | + def hello(): + print("Hello from the ansible amazon.aws lambda layer") + return 1 + dest: "{{ remote_tmp_dir }}/python/lambda_layer.py" + + - name: Create lambda layer archive + archive: + format: zip + path: "{{ remote_tmp_dir }}" + dest: "{{ remote_tmp_dir }}/lambda_layer.zip" + + - name: Create lambda layer + lambda_layer: + name: "{{ lambda_python_layers_names[0] }}" + description: '{{ lambda_python_layers_names[0] }} lambda layer' + content: + zip_file: "{{ remote_tmp_dir }}/lambda_layer.zip" + register: first_layer + + - name: Create another lambda layer + lambda_layer: + name: "{{ lambda_python_layers_names[1] }}" + description: '{{ lambda_python_layers_names[1] }} lambda layer' + content: + zip_file: "{{ remote_tmp_dir }}/lambda_layer.zip" + register: second_layer + + - name: Create lambda function with layers + lambda: + name: '{{ lambda_function_name_with_layer }}' + runtime: '{{ lambda_python_runtime }}' + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + zip_file: '{{ zip_res.dest }}' + layers: + - layer_version_arn: "{{ first_layer.layer_versions.0.layer_version_arn }}" + register: result + - name: Validate that lambda function was created with expected property + assert: + that: + - result is changed + - '"layers" in result.configuration' + - result.configuration.layers | length == 1 + - result.configuration.layers.0.arn == first_layer.layer_versions.0.layer_version_arn + + - name: Create lambda function with layers once again (validate idempotency) + lambda: + name: '{{ lambda_function_name_with_layer }}' + runtime: '{{ lambda_python_runtime }}' + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + zip_file: '{{ zip_res.dest }}' + layers: + - layer_version_arn: "{{ first_layer.layer_versions.0.layer_version_arn }}" + register: result + - name: Validate that no change were made + assert: + that: + - result is not changed + + - name: Create lambda function with mutiple layers + lambda: + name: '{{ lambda_function_name_with_multiple_layer }}' + runtime: '{{ lambda_python_runtime }}' + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + zip_file: '{{ zip_res.dest }}' + layers: + - layer_version_arn: "{{ first_layer.layer_versions.0.layer_version_arn }}" + - layer_name: "{{ second_layer.layer_versions.0.layer_arn }}" + version: "{{ second_layer.layer_versions.0.version }}" + register: result + - name: Validate that lambda function was created with expected property + assert: + that: + - result is changed + - '"layers" in result.configuration' + - result.configuration.layers | length == 2 + - first_layer.layer_versions.0.layer_version_arn in lambda_layer_versions + - second_layer.layer_versions.0.layer_version_arn in lambda_layer_versions + vars: + lambda_layer_versions: "{{ result.configuration.layers | map(attribute='arn') | list }}" + + - name: Create lambda function with mutiple layers and changing layers order (idempotency) + lambda: + name: '{{ lambda_function_name_with_multiple_layer }}' + runtime: '{{ lambda_python_runtime }}' + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + zip_file: '{{ zip_res.dest }}' + layers: + - layer_version_arn: "{{ second_layer.layer_versions.0.layer_version_arn }}" + - layer_name: "{{ first_layer.layer_versions.0.layer_arn }}" + version: "{{ first_layer.layer_versions.0.version }}" + register: result + - name: Validate that lambda function was created with expected property + assert: + that: + - result is not changed + + always: + + - name: Delete lambda layers + lambda_layer: + name: "{{ item }}" + version: -1 + state: absent + ignore_errors: true + with_items: "{{ lambda_python_layers_names }}" + + - name: ensure functions are absent at end of test + lambda: + name: '{{ item }}' + state: absent + ignore_errors: true + with_items: + - '{{ lambda_function_name }}' + - '{{ lambda_function_name }}_1' + - '{{ lambda_function_name }}_2' + - '{{ lambda_function_name }}_3' + - '{{ lambda_function_name }}_4' + + - name: ensure role has been removed at end of test + iam_role: + name: '{{ lambda_role_name }}' + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/tagging.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/tagging.yml new file mode 100644 index 000000000..135e83ff9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/tagging.yml @@ -0,0 +1,246 @@ +- name: Tests relating to tagging lambda + vars: + first_tags: + Key with Spaces: Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + second_tags: + New Key with Spaces: Value with spaces + NewCamelCaseKey: CamelCaseValue + newPascalCaseKey: pascalCaseValue + new_snake_case_key: snake_case_value + third_tags: + Key with Spaces: Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + New Key with Spaces: Updated Value with spaces + final_tags: + Key with Spaces: Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + New Key with Spaces: Updated Value with spaces + NewCamelCaseKey: CamelCaseValue + newPascalCaseKey: pascalCaseValue + new_snake_case_key: snake_case_value + # Mandatory settings + module_defaults: + amazon.aws.lambda: + runtime: '{{ lambda_python_runtime }}' + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + block: + + ### + + - name: test adding tags to lambda (check mode) + lambda: + name: '{{ lambda_function_name }}' + tags: '{{ first_tags }}' + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is changed + + - name: test adding tags to lambda + lambda: + name: '{{ lambda_function_name }}' + tags: '{{ first_tags }}' + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is changed + - update_result.tags == first_tags + + - name: test adding tags to lambda - idempotency (check mode) + lambda: + name: '{{ lambda_function_name }}' + tags: '{{ first_tags }}' + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is not changed + + - name: test adding tags to lambda - idempotency + lambda: + name: '{{ lambda_function_name }}' + tags: '{{ first_tags }}' + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is not changed + - update_result.tags == first_tags + + ### + + - name: test updating tags with purge on lambda (check mode) + lambda: + name: '{{ lambda_function_name }}' + tags: '{{ second_tags }}' + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is changed + + - name: test updating tags with purge on lambda + lambda: + name: '{{ lambda_function_name }}' + tags: '{{ second_tags }}' + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is changed + - update_result.tags == second_tags + + - name: test updating tags with purge on lambda - idempotency (check mode) + lambda: + name: '{{ lambda_function_name }}' + tags: '{{ second_tags }}' + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is not changed + + - name: test updating tags with purge on lambda - idempotency + lambda: + name: '{{ lambda_function_name }}' + tags: '{{ second_tags }}' + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is not changed + - update_result.tags == second_tags + + ### + + - name: test updating tags without purge on lambda (check mode) + lambda: + name: '{{ lambda_function_name }}' + tags: '{{ third_tags }}' + purge_tags: false + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is changed + + - name: test updating tags without purge on lambda + lambda: + name: '{{ lambda_function_name }}' + tags: '{{ third_tags }}' + purge_tags: false + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is changed + - update_result.tags == final_tags + + - name: test updating tags without purge on lambda - idempotency (check mode) + lambda: + name: '{{ lambda_function_name }}' + tags: '{{ third_tags }}' + purge_tags: false + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is not changed + + - name: test updating tags without purge on lambda - idempotency + lambda: + name: '{{ lambda_function_name }}' + tags: '{{ third_tags }}' + purge_tags: false + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is not changed + - update_result.tags == final_tags + + ### + + - name: test no tags param lambda (check mode) + lambda: + name: '{{ lambda_function_name }}' + register: update_result + check_mode: yes + - name: assert no change + assert: + that: + - update_result is not changed + - update_result.tags == final_tags + + + - name: test no tags param lambda + lambda: + name: '{{ lambda_function_name }}' + register: update_result + - name: assert no change + assert: + that: + - update_result is not changed + - update_result.tags == final_tags + + ### + + - name: test removing tags from lambda (check mode) + lambda: + name: '{{ lambda_function_name }}' + tags: {} + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is changed + + - name: test removing tags from lambda + lambda: + name: '{{ lambda_function_name }}' + tags: {} + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is changed + - update_result.tags == {} + + - name: test removing tags from lambda - idempotency (check mode) + lambda: + name: '{{ lambda_function_name }}' + tags: {} + register: update_result + check_mode: yes + - name: assert that update succeeded + assert: + that: + - update_result is not changed + + - name: test removing tags from lambda - idempotency + lambda: + name: '{{ lambda_function_name }}' + tags: {} + register: update_result + - name: assert that update succeeded + assert: + that: + - update_result is not changed + - update_result.tags == {} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/defaults/main.yml new file mode 100644 index 000000000..692a4f015 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# defaults file for lambda integration test +# IAM role names have to be less than 64 characters +# we hash the resource_prefix to get a shorter, unique string +lambda_function_name: 'ansible-test-{{ tiny_prefix }}' +lambda_role_name: 'ansible-test-{{ tiny_prefix }}-lambda' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/mini_lambda.py b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/mini_lambda.py new file mode 100644 index 000000000..901f6b55a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/mini_lambda.py @@ -0,0 +1,48 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os + + +def handler(event, context): + """ + The handler function is the function which gets called each time + the lambda is run. + """ + # printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find + # the log entry. + print("got event:\n" + json.dumps(event)) + + # if the name parameter isn't present this can throw an exception + # which will result in an amazon chosen failure from the lambda + # which can be completely fine. + + name = event["name"] + + # we can use environment variables as part of the configuration of the lambda + # which can change the behaviour of the lambda without needing a new upload + + extra = os.environ.get("EXTRA_MESSAGE") + if extra is not None and len(extra) > 0: + greeting = "hello {0}. {1}".format(name, extra) + else: + greeting = "hello " + name + + return {"message": greeting} + + +def main(): + """ + This main function will normally never be called during normal + lambda use. It is here for testing the lambda program only. + """ + event = {"name": "james"} + context = None + print(handler(event, context)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/minimal_trust_policy.json b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/minimal_trust_policy.json new file mode 100644 index 000000000..fb84ae9de --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/minimal_trust_policy.json @@ -0,0 +1,12 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml new file mode 100644 index 000000000..9b264f50c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml @@ -0,0 +1,622 @@ +- name: set connection information for AWS modules and run tests + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + collections: + - community.general + block: + # ============================================================== + # Preparation + - name: create minimal lambda role + iam_role: + name: '{{ lambda_role_name }}' + assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json") }}' + create_instance_profile: false + managed_policies: + - 'arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess' + register: iam_role + - name: wait 10 seconds for role to become available + pause: + seconds: 10 + when: iam_role.changed + - name: move lambda into place for archive module + copy: + src: mini_lambda.py + dest: '{{ output_dir }}/mini_lambda.py' + mode: preserve + - name: bundle lambda into a zip + register: zip_res + archive: + format: zip + path: '{{ output_dir }}/mini_lambda.py' + dest: '{{ output_dir }}/mini_lambda.zip' + + - name: Upload test lambda (version 1) + lambda: + name: '{{ lambda_function_name }}' + runtime: 'python3.7' + handler: 'mini_lambda.handler' + role: '{{ lambda_role_name }}' + zip_file: '{{ zip_res.dest }}' + register: lambda_a + - name: assert lambda upload succeeded + assert: + that: + - lambda_a is changed + + - name: Update lambda (version 2) + lambda: + name: '{{ lambda_function_name }}' + runtime: 'python3.8' + handler: 'mini_lambda.handler' + role: '{{ lambda_role_name }}' + register: lambda_b + - name: assert that update succeeded + assert: + that: + - lambda_b is changed + + - name: Update lambda (version 3 / LATEST) + lambda: + name: '{{ lambda_function_name }}' + runtime: 'python3.9' + handler: 'mini_lambda.handler' + role: '{{ lambda_role_name }}' + register: lambda_c + - name: assert that update succeeded + assert: + that: + - lambda_c is changed + + - name: Store Lambda info + vars: + _full_arn: '{{ lambda_a.configuration.function_arn }}' + set_fact: + lambda_arn: '{{ ":".join(_full_arn.split(":")[:-1]) }}' + + # ============================================================== + # Creation of an alias + - name: Create an alias (check mode) + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + check_mode: True + register: create_alias + - name: Check changed + assert: + that: + - create_alias is changed + + - name: Create an alias + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + register: create_alias + - name: Check changed and returned values + assert: + that: + - create_alias is changed + - '"alias_arn" in create_alias' + - create_alias.alias_arn.startswith(lambda_arn) + - create_alias.alias_arn.endswith("Testing") + - '"description" in create_alias' + - create_alias.description == "" + - '"function_version" in create_alias' + - create_alias.function_version == "$LATEST" + - '"name" in create_alias' + - create_alias.name == "Testing" + - '"revision_id" in create_alias' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + - name: Create an alias - idempotency (check mode) + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + check_mode: True + register: create_alias + - name: Check not changed + assert: + that: + - create_alias is not changed + + - name: Create an alias - idempotecy + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + register: create_alias + - name: Check not changed + assert: + that: + - create_alias is not changed + - '"alias_arn" in create_alias' + - create_alias.alias_arn.startswith(lambda_arn) + - create_alias.alias_arn.endswith("Testing") + - '"description" in create_alias' + - create_alias.description == "" + - '"function_version" in create_alias' + - create_alias.function_version == "$LATEST" + - '"name" in create_alias' + - create_alias.name == "Testing" + - '"revision_id" in create_alias' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + # ============================================================== + # Update description of an alias when none set to start + - name: Update an alias description (check mode) + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + description: 'Description 1' + check_mode: True + register: update_alias_description + - name: Check changed + assert: + that: + - update_alias_description is changed + + - name: Update an alias description + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + description: 'Description 1' + register: update_alias_description + - name: Check changed and returned values + assert: + that: + - update_alias_description is changed + - '"alias_arn" in update_alias_description' + - update_alias_description.alias_arn.startswith(lambda_arn) + - update_alias_description.alias_arn.endswith("Testing") + - '"description" in update_alias_description' + - update_alias_description.description == "Description 1" + - '"function_version" in update_alias_description' + - update_alias_description.function_version == "$LATEST" + - '"name" in update_alias_description' + - update_alias_description.name == "Testing" + - '"revision_id" in update_alias_description' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + - name: Update an alias description - idempotency (check mode) + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + description: 'Description 1' + check_mode: True + register: update_alias_description + - name: Check not changed + assert: + that: + - update_alias_description is not changed + + - name: Update an alias description - idempotecy + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + description: 'Description 1' + register: update_alias_description + - name: Check not changed + assert: + that: + - update_alias_description is not changed + - '"alias_arn" in update_alias_description' + - update_alias_description.alias_arn.startswith(lambda_arn) + - update_alias_description.alias_arn.endswith("Testing") + - '"description" in update_alias_description' + - update_alias_description.description == "Description 1" + - '"function_version" in update_alias_description' + - update_alias_description.function_version == "$LATEST" + - '"name" in update_alias_description' + - update_alias_description.name == "Testing" + - '"revision_id" in update_alias_description' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + # ============================================================== + # Update description of an alias when one set to start + - name: Update an alias description again (check mode) + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + description: 'description 2' + check_mode: True + register: update_alias_description + - name: Check changed + assert: + that: + - update_alias_description is changed + + - name: Update an alias description again + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + description: 'description 2' + register: update_alias_description + - name: Check changed and returned values + assert: + that: + - update_alias_description is changed + - '"alias_arn" in update_alias_description' + - update_alias_description.alias_arn.startswith(lambda_arn) + - update_alias_description.alias_arn.endswith("Testing") + - '"description" in update_alias_description' + - update_alias_description.description == "description 2" + - '"function_version" in update_alias_description' + - update_alias_description.function_version == "$LATEST" + - '"name" in update_alias_description' + - update_alias_description.name == "Testing" + - '"revision_id" in update_alias_description' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + # ============================================================== + # Update version of an alias + - name: Update an alias version (check mode) + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + function_version: 1 + check_mode: True + register: update_alias_version + - name: Check changed + assert: + that: + - update_alias_version is changed + + - name: Update an alias version + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + function_version: 1 + register: update_alias_version + - name: Check changed and returned values + assert: + that: + - update_alias_version is changed + - '"alias_arn" in update_alias_version' + - update_alias_version.alias_arn.startswith(lambda_arn) + - update_alias_version.alias_arn.endswith("Testing") + - '"description" in update_alias_version' + - update_alias_version.description == "description 2" + - '"function_version" in update_alias_version' + - update_alias_version.function_version == "1" + - '"name" in update_alias_version' + - update_alias_version.name == "Testing" + - '"revision_id" in update_alias_version' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + - name: Update an alias version - idempotency (check mode) + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + function_version: 1 + check_mode: True + register: update_alias_version + - name: Check not changed + assert: + that: + - update_alias_version is not changed + + - name: Update an alias version - idempotecy + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + function_version: 1 + register: update_alias_version + - name: Check not changed + assert: + that: + - update_alias_version is not changed + - '"alias_arn" in update_alias_version' + - update_alias_version.alias_arn.startswith(lambda_arn) + - update_alias_version.alias_arn.endswith("Testing") + - '"description" in update_alias_version' + - update_alias_version.description == "description 2" + - '"function_version" in update_alias_version' + - update_alias_version.function_version == "1" + - '"name" in update_alias_version' + - update_alias_version.name == "Testing" + - '"revision_id" in update_alias_version' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + - name: Update an alias version to implied LATEST (check mode) + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + # docs state that when not defined defaults to LATEST + #function_version: 1 + check_mode: True + register: update_alias_version + - name: Check changed + assert: + that: + - update_alias_version is changed + + - name: Update an alias version to implied LATEST + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + # docs state that when not defined defaults to LATEST + #function_version: 1 + register: update_alias_version + - name: Check changed and returned values + assert: + that: + - update_alias_version is changed + - '"alias_arn" in update_alias_version' + - update_alias_version.alias_arn.startswith(lambda_arn) + - update_alias_version.alias_arn.endswith("Testing") + - '"description" in update_alias_version' + - update_alias_version.description == "description 2" + - '"function_version" in update_alias_version' + - update_alias_version.function_version == "$LATEST" + - '"name" in update_alias_version' + - update_alias_version.name == "Testing" + - '"revision_id" in update_alias_version' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + # Make sure that 0 also causes a change + - name: Update an alias version + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + function_version: 1 + register: update_alias_version + - name: Check not changed + assert: + that: + - update_alias_version is changed + - '"alias_arn" in update_alias_version' + - update_alias_version.alias_arn.startswith(lambda_arn) + - update_alias_version.alias_arn.endswith("Testing") + - '"description" in update_alias_version' + - update_alias_version.description == "description 2" + - '"function_version" in update_alias_version' + - update_alias_version.function_version == "1" + - '"name" in update_alias_version' + - update_alias_version.name == "Testing" + - '"revision_id" in update_alias_version' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + - name: Update an alias version to explicit LATEST with 0 (check mode) + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + function_version: 0 + check_mode: True + register: update_alias_version + - name: Check changed + assert: + that: + - update_alias_version is changed + + - name: Update an alias version to explicit LATEST with 0 + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + function_version: 0 + register: update_alias_version + - name: Check changed and returned values + assert: + that: + - update_alias_version is changed + - '"alias_arn" in update_alias_version' + - update_alias_version.alias_arn.startswith(lambda_arn) + - update_alias_version.alias_arn.endswith("Testing") + - '"description" in update_alias_version' + - update_alias_version.description == "description 2" + - '"function_version" in update_alias_version' + - update_alias_version.function_version == "$LATEST" + - '"name" in update_alias_version' + - update_alias_version.name == "Testing" + - '"revision_id" in update_alias_version' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + - name: Update an alias version to explicit LATEST with 0 - idempotency (check mode) + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + function_version: 0 + check_mode: True + register: update_alias_version + - name: Check changed + assert: + that: + - update_alias_version is not changed + + - name: Update an alias version to explicit LATEST with 0 - idempotecy + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + name: Testing + function_version: 0 + register: update_alias_version + - name: Check changed and returned values + assert: + that: + - update_alias_version is not changed + - '"alias_arn" in update_alias_version' + - update_alias_version.alias_arn.startswith(lambda_arn) + - update_alias_version.alias_arn.endswith("Testing") + - '"description" in update_alias_version' + - update_alias_version.description == "description 2" + - '"function_version" in update_alias_version' + - update_alias_version.function_version == "$LATEST" + - '"name" in update_alias_version' + - update_alias_version.name == "Testing" + - '"revision_id" in update_alias_version' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + # ============================================================== + # Creation of an alias with all options + - name: Create an alias with all options (check mode) + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + description: 'Hello world' + name: stable + function_version: 1 + check_mode: True + register: create_alias + - name: Check changed + assert: + that: + - create_alias is changed + + - name: Create an alias with all options + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + description: 'Hello world' + name: stable + function_version: 1 + register: create_alias + - name: Check changed and returned values + assert: + that: + - create_alias is changed + - '"alias_arn" in create_alias' + - create_alias.alias_arn.startswith(lambda_arn) + - create_alias.alias_arn.endswith("stable") + - '"description" in create_alias' + - create_alias.description == "Hello world" + - '"function_version" in create_alias' + - create_alias.function_version == "1" + - '"name" in create_alias' + - create_alias.name == "stable" + - '"revision_id" in create_alias' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + - name: Create an alias with all options - idempotency (check mode) + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + description: 'Hello world' + name: stable + function_version: 1 + check_mode: True + register: create_alias + - name: Check not changed + assert: + that: + - create_alias is not changed + + - name: Create an alias wth all options - idempotecy + lambda_alias: + state: present + function_name: '{{ lambda_function_name }}' + description: 'Hello world' + name: stable + function_version: 1 + register: create_alias + - name: Check not changed + assert: + that: + - create_alias is not changed + - '"alias_arn" in create_alias' + - create_alias.alias_arn.startswith(lambda_arn) + - create_alias.alias_arn.endswith("stable") + - '"description" in create_alias' + - create_alias.description == "Hello world" + - '"function_version" in create_alias' + - create_alias.function_version == "1" + - '"name" in create_alias' + - create_alias.name == "stable" + - '"revision_id" in create_alias' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + # ============================================================== + # Deletion of an alias + - name: Delete an alias (check mode) + lambda_alias: + state: absent + function_name: '{{ lambda_function_name }}' + name: Testing + check_mode: True + register: delete_alias + - name: Check changed + assert: + that: + - delete_alias is changed + + - name: Delete an alias + lambda_alias: + state: absent + function_name: '{{ lambda_function_name }}' + name: Testing + register: delete_alias + - name: Check changed + assert: + that: + - delete_alias is changed + + - name: Delete an alias - idempotency (check mode) + lambda_alias: + state: absent + function_name: '{{ lambda_function_name }}' + name: Testing + check_mode: True + register: delete_alias + - name: Check not changed + assert: + that: + - delete_alias is not changed + + - name: Delete an alias - idempotecy + lambda_alias: + state: absent + function_name: '{{ lambda_function_name }}' + name: Testing + register: delete_alias + - name: Check not changed + assert: + that: + - delete_alias is not changed + + # ============================================================== + # Cleanup + always: + - name: ensure function is absent at end of test + lambda: + name: '{{lambda_function_name}}' + state: absent + ignore_errors: true + - name: ensure role has been removed at end of test + iam_role: + name: '{{ lambda_role_name }}' + state: absent + delete_instance_profile: True + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/defaults/main.yml new file mode 100644 index 000000000..200b6b4ba --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/defaults/main.yml @@ -0,0 +1,10 @@ +# defaults file for lambda integration test +# IAM role names have to be less than 64 characters +# we hash the resource_prefix to get a shorter, unique string +lambda_function_name: 'test-lambda-{{ tiny_prefix }}' +lambda_role_name: ansible-test-{{ tiny_prefix }}-lambda + +dynamodb_table_name: ansible-test-{{ tiny_prefix }} + +lambda_python_runtime: python3.9 +lambda_python_handler: mini_lambda.handler diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/mini_lambda.py b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/mini_lambda.py new file mode 100644 index 000000000..901f6b55a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/mini_lambda.py @@ -0,0 +1,48 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os + + +def handler(event, context): + """ + The handler function is the function which gets called each time + the lambda is run. + """ + # printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find + # the log entry. + print("got event:\n" + json.dumps(event)) + + # if the name parameter isn't present this can throw an exception + # which will result in an amazon chosen failure from the lambda + # which can be completely fine. + + name = event["name"] + + # we can use environment variables as part of the configuration of the lambda + # which can change the behaviour of the lambda without needing a new upload + + extra = os.environ.get("EXTRA_MESSAGE") + if extra is not None and len(extra) > 0: + greeting = "hello {0}. {1}".format(name, extra) + else: + greeting = "hello " + name + + return {"message": greeting} + + +def main(): + """ + This main function will normally never be called during normal + lambda use. It is here for testing the lambda program only. + """ + event = {"name": "james"} + context = None + print(handler(event, context)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/minimal_trust_policy.json b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/minimal_trust_policy.json new file mode 100644 index 000000000..fb84ae9de --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/minimal_trust_policy.json @@ -0,0 +1,12 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/meta/main.yml new file mode 100644 index 000000000..463f90ed0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/meta/main.yml @@ -0,0 +1,5 @@ +dependencies: +- role: setup_remote_tmp_dir +- role: setup_botocore_pip + vars: + botocore_version: 1.21.51 \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml new file mode 100644 index 000000000..349ee41ac --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml @@ -0,0 +1,117 @@ +- name: set connection information for AWS modules and run tests + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + collections: + - community.general + block: + + - name: Create test resources setup + import_tasks: setup.yml + +# TEST CREATE LAMBDA EVENT ======================================================================================== + - name: Create DynamoDB stream event mapping (trigger) - check_mode + amazon.aws.lambda_event: + state: present + event_source: stream + function_arn: '{{ lambda_function_arn }}' + source_params: + source_arn: '{{ dynamo_stream_arn }}' + enabled: True + batch_size: 500 + starting_position: LATEST + function_response_types: + - ReportBatchItemFailures + check_mode: true + register: create_lambda_event_result + + - assert: + that: + - create_lambda_event_result is changed + - create_lambda_event_result is not failed + - '"lambda:CreateEventSourceMapping" not in create_lambda_event_result.resource_actions' + + - name: Create DynamoDB stream event mapping (trigger) + amazon.aws.lambda_event: + state: present + event_source: stream + function_arn: '{{ lambda_function_arn }}' + source_params: + source_arn: '{{ dynamo_stream_arn }}' + enabled: True + batch_size: 500 + starting_position: LATEST + function_response_types: + - ReportBatchItemFailures + register: create_lambda_event_result + + - name: Get info on above trigger + command: 'aws lambda get-event-source-mapping --uuid {{ create_lambda_event_result.events.uuid }}' + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" + register: lambda_function_details + + - name: convert it to an object + set_fact: + lambda_function_details_obj: "{{ lambda_function_details.stdout | from_json }}" + + - assert: + that: + - lambda_function_details_obj.FunctionResponseTypes is defined + - lambda_function_details_obj.FunctionResponseTypes | length > 0 + - lambda_function_details_obj.FunctionResponseTypes[0] == "ReportBatchItemFailures" + - '"lambda:CreateEventSourceMapping" in create_lambda_event_result.resource_actions' + + - name: Create DynamoDB stream event mapping (trigger) - check_mode - idempotency + amazon.aws.lambda_event: + state: present + event_source: stream + function_arn: '{{ lambda_function_arn }}' + source_params: + source_arn: '{{ dynamo_stream_arn }}' + enabled: True + batch_size: 500 + starting_position: LATEST + function_response_types: + - ReportBatchItemFailures + check_mode: true + register: create_lambda_event_result + + - assert: + that: + - create_lambda_event_result is not changed + - create_lambda_event_result is not failed + - '"lambda:CreateEventSourceMapping" not in create_lambda_event_result.resource_actions' + + - name: Create DynamoDB stream event mapping (trigger) - idempotency + amazon.aws.lambda_event: + state: present + event_source: stream + function_arn: '{{ lambda_function_arn }}' + source_params: + source_arn: '{{ dynamo_stream_arn }}' + enabled: True + batch_size: 500 + starting_position: LATEST + function_response_types: + - ReportBatchItemFailures + register: create_lambda_event_result + + - assert: + that: + - create_lambda_event_result is not changed + - create_lambda_event_result is not failed + - '"lambda:CreateEventSourceMapping" not in create_lambda_event_result.resource_actions' + + +# ======================================================================================== + + always: + - name: Clean up test resources setup + import_tasks: teardown.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml new file mode 100644 index 000000000..df9b4ce1d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml @@ -0,0 +1,83 @@ +--- +- debug: msg="Starting test setup......" + +# CREATE DYNAMO DB TABLE +- name: Create minimal dynamo table + community.aws.dynamodb_table: + name: "{{ dynamodb_table_name }}" + hash_key_name: id + hash_key_type: STRING + tags: + Usage: Created By Integration Test + register: create_table_result + +# ENABLE DYNAMODB STREAM AND GET STREAM ARN +- name: Enable DynamoDB stream (currently not supported by community.aws.dynamodb_table) + command: aws dynamodb update-table --table-name "{{ dynamodb_table_name }}" --stream-specification StreamEnabled=True,StreamViewType=KEYS_ONLY + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" + register: enable_stream_result +- name: convert it to an object + set_fact: + enable_stream_result: "{{ enable_stream_result.stdout | from_json }}" +- name: Get DynamoDB stream ARN + set_fact: + dynamo_stream_arn: "{{ enable_stream_result.TableDescription.LatestStreamArn }}" + +# CREATE MINIMAL LAMBDA FUNCTION +# https://github.com/ansible/ansible/issues/77257 +- name: Set async_dir for HOME env + ansible.builtin.set_fact: + ansible_async_dir: "{{ lookup('env', 'HOME') }}/.ansible_async_{{ tiny_prefix }}/" + when: (lookup('env', 'HOME')) + +- name: create minimal lambda role + iam_role: + name: '{{ lambda_role_name }}' + assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json")}}' + create_instance_profile: false + managed_policies: + - arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess + - arn:aws:iam::aws:policy/AWSLambdaInvocation-DynamoDB + - arn:aws:iam::aws:policy/service-role/AWSLambdaDynamoDBExecutionRole + register: iam_role +- name: wait 10 seconds for role to become available + pause: + seconds: 10 + when: iam_role.changed + +- name: move lambda into place for archive module + copy: + src: mini_lambda.py + dest: '{{ output_dir }}/mini_lambda.py' + mode: preserve +- name: bundle lambda into a zip + register: zip_res + archive: + format: zip + path: '{{ output_dir }}/mini_lambda.py' + dest: '{{ output_dir }}/mini_lambda.zip' + +- name: test state=present - upload the lambda + lambda: + name: '{{ lambda_function_name }}' + runtime: '{{ lambda_python_runtime }}' + handler: '{{ lambda_python_handler }}' + role: '{{ lambda_role_name }}' + zip_file: '{{ zip_res.dest }}' + architecture: x86_64 + register: result + vars: + ansible_python_interpreter: '{{ botocore_virtualenv_interpreter }}' + +- name: assert lambda upload succeeded + assert: + that: + - result.changed + +- name: Get lambda function ARN + ansible.builtin.set_fact: + lambda_function_arn: "{{ result.configuration.function_arn }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml new file mode 100644 index 000000000..8b566aa7f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml @@ -0,0 +1,33 @@ +--- +- debug: msg="Starting test Teardown......" + +- name: Delete DynamoDB stream event mapping (trigger) + amazon.aws.lambda_event: + state: absent + event_source: stream + function_arn: '{{ lambda_function_arn }}' + source_params: + source_arn: "{{ dynamo_stream_arn }}" + enabled: True + batch_size: 500 + starting_position: LATEST + function_response_types: + - ReportBatchItemFailures + register: create_lambda_event_result + ignore_errors: true + +- name: Delete lambda function + lambda: + name: '{{ lambda_function_name }}' + state: absent + +- name: Delete dynamo table + community.aws.dynamodb_table: + name: "{{ dynamodb_table_name }}" + state: absent + +- name: Delete the role + community.aws.iam_role: + name: '{{ lambda_role_name }}' + assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json")}}' + state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/aliases new file mode 100644 index 000000000..d026dde5e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/aliases @@ -0,0 +1,2 @@ +cloud/aws +lambda_layer_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/defaults/main.yml new file mode 100644 index 000000000..4f6cbf245 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/defaults/main.yml @@ -0,0 +1,10 @@ +--- +lambda_hander_content: | + # Copyright: Ansible Project + # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + import logging + from datetime import datetime + logger = logging.getLogger() + logger.setLevel(logging.INFO) + def lambda_handler(event, context): + logger.info('Ansible amazon.aws collection lambda handler executed at {0}'.format(datetime.now().strftime("%y%m%d-%H%M%S"))) diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/tasks/main.yml new file mode 100644 index 000000000..8d511f00a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/tasks/main.yml @@ -0,0 +1,248 @@ +--- +- module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key | default(omit) }}' + aws_secret_key: '{{ aws_secret_key | default(omit) }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region | default(omit) }}' + + collections: + - amazon.aws + + vars: + s3_bucket_name: "{{ resource_prefix }}-bucket" + s3_bucket_object: "{{ resource_prefix }}-obj-1" + layer_name: "{{ resource_prefix }}-layer" + + block: + - name: Create temporary directory + tempfile: + state: directory + suffix: .lambda_handler + register: _dir + + - copy: + content: "{{ lambda_hander_content }}" + dest: "{{ _dir.path }}/lambda_handler.py" + remote_src: true + + - set_fact: + zip_file_path: "{{ _dir.path }}/lambda_handler.zip" + + - name: Create lambda handler archive + archive: + path: "{{ _dir.path }}/lambda_handler.py" + dest: "{{ zip_file_path }}" + format: zip + + - name: Create S3 bucket for testing + s3_bucket: + name: "{{ s3_bucket_name }}" + state: present + + - name: add object into bucket + s3_object: + bucket: "{{ s3_bucket_name }}" + mode: put + object: "{{ s3_bucket_object }}" + # permission: public-read # Commented on because botocore.exceptions.ClientError: An error occurred (AccessControlListNotSupported) when calling the PutObject operation: The bucket does not allow ACLs + src: "{{ zip_file_path }}" + + - name: Create lambda layer (check_mode=true) + lambda_layer: + name: "{{ layer_name }}" + description: '{{ resource_prefix }} lambda layer first version' + content: + zip_file: "{{ zip_file_path }}" + compatible_runtimes: + - python3.7 + license_info: GPL-3.0-only + register: create_check_mode + check_mode: true + + - name: Retrieve all layers versions + lambda_layer_info: + name: "{{ layer_name }}" + register: layers + + - name: Ensure lambda layer was not created + assert: + that: + - create_check_mode is changed + - create_check_mode.msg == "Create operation skipped - running in check mode" + - layers.layers_versions | length == 0 + + - name: Create lambda layer (first version) + lambda_layer: + name: "{{ layer_name }}" + description: '{{ resource_prefix }} lambda layer first version' + content: + zip_file: "{{ zip_file_path }}" + compatible_runtimes: + - python3.7 + license_info: GPL-3.0-only + register: first_version + + - name: Create another lambda layer version + lambda_layer: + name: "{{ layer_name }}" + description: '{{ resource_prefix }} lambda layer second version' + content: + s3_bucket: "{{ s3_bucket_name }}" + s3_key: "{{ s3_bucket_object }}" + compatible_runtimes: + - python3.7 + license_info: GPL-3.0-only + register: last_version + + - name: Retrieve all layers with latest version + lambda_layer_info: + register: layers + + - name: Ensure layer created above was found + assert: + that: + - '"layers_versions" in layers' + - first_version.layer_versions | length == 1 + - last_version.layer_versions | length == 1 + - last_version.layer_versions.0.layer_arn in layers_arns + - last_version.layer_versions.0.layer_version_arn in layers_version_arns + - first_version.layer_versions.0.layer_version_arn not in layers_version_arns + vars: + layers_arns: '{{ layers.layers_versions | map(attribute="layer_arn") | list }}' + layers_version_arns: '{{ layers.layers_versions | map(attribute="layer_version_arn") | list }}' + + - name: Retrieve all layers versions + lambda_layer_info: + name: "{{ layer_name }}" + register: layers + + - name: Ensure layer created above was found + assert: + that: + - '"layers_versions" in layers' + - layers.layers_versions | length == 2 + - first_version.layer_versions | length == 1 + - last_version.layer_versions | length == 1 + - last_version.layer_versions.0.layer_version_arn in layers_version_arns + - first_version.layer_versions.0.layer_version_arn in layers_version_arns + vars: + layers_version_arns: '{{ layers.layers_versions | map(attribute="layer_version_arn") | list }}' + + - name: Delete latest layer version + lambda_layer: + name: "{{ layer_name }}" + version: "{{ last_version.layer_versions.0.version }}" + state: absent + check_mode: true + register: delete_check_mode + + - name: Retrieve all layers versions + lambda_layer_info: + name: "{{ layer_name }}" + register: layers + + - name: Ensure no layer version was deleted + assert: + that: + - delete_check_mode is changed + - delete_check_mode.layer_versions | length == 1 + - layers.layers_versions | length == 2 + - last_version.layer_versions.0.layer_version_arn in layers_version_arns + - first_version.layer_versions.0.layer_version_arn in layers_version_arns + vars: + layers_version_arns: '{{ layers.layers_versions | map(attribute="layer_version_arn") | list }}' + + - name: Delete latest layer version + lambda_layer: + name: "{{ layer_name }}" + version: "{{ last_version.layer_versions.0.version }}" + state: absent + register: delete_layer + + - name: Retrieve all layers versions + lambda_layer_info: + name: "{{ layer_name }}" + register: layers + + - name: Ensure latest layer version was deleted + assert: + that: + - delete_layer is changed + - delete_layer.layer_versions | length == 1 + - layers.layers_versions | length == 1 + - last_version.layer_versions.0.layer_version_arn not in layers_version_arns + - first_version.layer_versions.0.layer_version_arn in layers_version_arns + vars: + layers_version_arns: '{{ layers.layers_versions | map(attribute="layer_version_arn") | list }}' + + - name: Delete again the latest layer version (idempotency) + lambda_layer: + name: "{{ layer_name }}" + version: "{{ last_version.layer_versions.0.version }}" + state: absent + register: delete_idempotent + + - name: Ensure nothing changed + assert: + that: + - delete_idempotent is not changed + + - name: Create multiple lambda layer versions + lambda_layer: + name: "{{ layer_name }}" + description: '{{ resource_prefix }} lambda layer version compatible with python3.{{ item }}' + content: + s3_bucket: "{{ s3_bucket_name }}" + s3_key: "{{ s3_bucket_object }}" + compatible_runtimes: + - "python3.{{ item }}" + license_info: GPL-3.0-only + with_items: ["9", "10"] + + - name: Delete all layer versions + lambda_layer: + name: "{{ layer_name }}" + version: -1 + state: absent + register: delete_layer + + - name: Retrieve all layers versions + lambda_layer_info: + name: "{{ layer_name }}" + register: layers + + - name: Ensure layer does not exist anymore + assert: + that: + - delete_layer is changed + - delete_layer.layer_versions | length > 1 + - layers.layers_versions | length == 0 + + always: + - name: Delete lambda layer if not deleted during testing + lambda_layer: + name: "{{ layer_name }}" + version: -1 + state: absent + ignore_errors: true + + - name: Delete temporary directory + file: + state: absent + path: "{{ _dir.path }}" + ignore_errors: true + + - name: Remove object from bucket + s3_object: + bucket: "{{ s3_bucket_name }}" + mode: delobj + object: "{{ s3_bucket_object }}" + ignore_errors: true + + - name: Delete S3 bucket + s3_bucket: + name: "{{ s3_bucket_name }}" + force: true + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/defaults/main.yml new file mode 100644 index 000000000..4f4252fa0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# defaults file for lambda_policy integration test +# IAM role names have to be less than 64 characters +# we hash the resource_prefix to get a shorter, unique string +lambda_function_name: '{{ tiny_prefix }}-api-endpoint' +lambda_role_name: 'ansible-test-{{ tiny_prefix }}-lambda-policy' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/mini_http_lambda.py b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/mini_http_lambda.py new file mode 100644 index 000000000..caccac908 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/mini_http_lambda.py @@ -0,0 +1,40 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + + +def handler(event, context): + """ + The handler function is the function which gets called each time + the lambda is run. + """ + # printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find + # the log entry. + print("got event:\n" + json.dumps(event)) + + # if the name parameter isn't present this can throw an exception + # which will result in an amazon chosen failure from the lambda + # which can be completely fine. + + name = event["pathParameters"]["greet_name"] + + return {"statusCode": 200, + "body": 'hello: "' + name + '"', + "headers": {}} + + +def main(): + """ + This main function will normally never be called during normal + lambda use. It is here for testing the lambda program only. + """ + event = {"name": "james"} + context = None + print(handler(event, context)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/minimal_trust_policy.json b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/minimal_trust_policy.json new file mode 100644 index 000000000..fb84ae9de --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/minimal_trust_policy.json @@ -0,0 +1,12 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml new file mode 100644 index 000000000..e0b514bde --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml @@ -0,0 +1,144 @@ +- name: Integration testing for lambda_policy + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + collections: + - community.general + - amazon.aws + + block: + - name: create minimal lambda role + iam_role: + name: '{{ lambda_role_name }}' + assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json") }}' + create_instance_profile: false + managed_policies: + - 'arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess' + register: iam_role + - name: wait 10 seconds for role to become available + pause: + seconds: 10 + when: iam_role.changed + + - name: test with no parameters + lambda_policy: null + register: result + ignore_errors: true + - name: assert failure when called with no parameters + assert: + that: + - result.failed + - 'result.msg.startswith("missing required arguments: ")' + - '"action" in result.msg' + - '"function_name" in result.msg' + - '"principal" in result.msg' + - '"statement_id" in result.msg' + + - name: move lambda into place for archive module + copy: + src: mini_http_lambda.py + dest: '{{ output_dir }}/mini_http_lambda.py' + mode: preserve + - name: bundle lambda into a zip + register: zip_res + archive: + format: zip + path: '{{ output_dir }}/mini_http_lambda.py' + dest: '{{ output_dir }}/mini_http_lambda.zip' + - name: create minimal lambda role + iam_role: + name: ansible_lambda_role + assume_role_policy_document: '{{ lookup(''file'', ''minimal_trust_policy.json'', convert_data=False) }}' + create_instance_profile: false + register: iam_role + - name: wait 10 seconds for role to become available + pause: + seconds: 10 + when: iam_role.changed + - name: test state=present - upload the lambda + lambda: + name: '{{lambda_function_name}}' + runtime: python3.9 + handler: mini_http_lambda.handler + role: '{{ lambda_role_name }}' + zip_file: '{{zip_res.dest}}' + register: lambda_result + - name: get the aws account ID for use in future commands + aws_caller_info: {} + register: aws_caller_info + - name: register lambda uri for use in template + set_fact: + mini_lambda_uri: arn:aws:apigateway:{{ aws_region }}:lambda:path/2015-03-31/functions/arn:aws:lambda:{{ aws_region }}:{{ aws_caller_info.account }}:function:{{ lambda_result.configuration.function_name }}/invocations + - name: build API file + template: + src: endpoint-test-swagger-api.yml.j2 + dest: '{{output_dir}}/endpoint-test-swagger-api.yml.j2' + - name: deploy new API + aws_api_gateway: + api_file: '{{output_dir}}/endpoint-test-swagger-api.yml.j2' + stage: lambdabased + register: create_result + - name: register api id for later + set_fact: + api_id: '{{ create_result.api_id }}' + - name: check API fails with permissions failure + uri: + url: https://{{create_result.api_id}}.execute-api.{{aws_region}}.amazonaws.com/lambdabased/mini/Mr_Ansible_Tester + register: unauth_uri_result + ignore_errors: true + - name: assert internal server error due to permissions + assert: + that: + - unauth_uri_result is failed + - unauth_uri_result.status == 500 + - name: give api gateway execute permissions on lambda + lambda_policy: + function_name: '{{ lambda_function_name }}' + state: present + statement_id: api-gateway-invoke-lambdas + action: lambda:InvokeFunction + principal: apigateway.amazonaws.com + source_arn: arn:aws:execute-api:{{ aws_region }}:{{ aws_caller_info.account }}:*/* + - name: try again but with ARN + lambda_policy: + function_name: '{{ lambda_result.configuration.function_arn }}' + state: present + statement_id: api-gateway-invoke-lambdas + action: lambda:InvokeFunction + principal: apigateway.amazonaws.com + source_arn: arn:aws:execute-api:{{ aws_region }}:{{ aws_caller_info.account }}:*/* + - name: check API works with execute permissions + uri: + url: https://{{create_result.api_id}}.execute-api.{{aws_region}}.amazonaws.com/lambdabased/mini/Mr_Ansible_Tester + register: uri_result + - name: assert API works success + assert: + that: + - uri_result + - name: deploy new API + aws_api_gateway: + api_file: '{{output_dir}}/endpoint-test-swagger-api.yml.j2' + stage: lambdabased + register: create_result + ignore_errors: true + always: + - name: destroy lambda for test cleanup if created + lambda: + name: '{{lambda_function_name}}' + state: absent + register: result + ignore_errors: true + - name: destroy API for test cleanup if created + aws_api_gateway: + state: absent + api_id: '{{api_id}}' + register: destroy_result + ignore_errors: true + - name: Clean up test role + iam_role: + name: '{{ lambda_role_name }}' + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j2 new file mode 100644 index 000000000..d62188477 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j2 @@ -0,0 +1,39 @@ +--- +swagger: "2.0" +info: + version: "2017-05-11T12:14:59Z" + title: "{{resource_prefix}}LambdaBased_API" +host: "fakeexample.execute-api.us-east-1.amazonaws.com" +basePath: "/lambdabased" +schemes: +- "https" +paths: + /mini/{greet_name}: + get: + produces: + - "application/json" + parameters: + - name: "greet_name" + in: "path" + required: true + type: "string" + responses: + 200: + description: "200 response" + schema: + $ref: "#/definitions/Empty" + x-amazon-apigateway-integration: + responses: + default: + statusCode: "200" + uri: "{{mini_lambda_uri}}" + requestTemplates: + application/json: "{\"statusCode\": 200}" + passthroughBehavior: "when_no_match" + httpMethod: "POST" + contentHandling: "CONVERT_TO_TEXT" + type: "aws_proxy" +definitions: + Empty: + type: "object" + title: "Empty Schema" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/README.md b/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/README.md new file mode 100644 index 000000000..03b5bdc0d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/README.md @@ -0,0 +1,5 @@ +## Fake integration suite + +This is a fake integration suite including an aliases file listing every module name with missing integration tests (some of them are covered by units). + +This fake suite is necessary for the new CI ansible-test-splitter behaviour. Namely, if one of the modules (listed in the aliases file) without a test suite is modified, the CI is run for the entire collection since the ansible-test-splitter won't find any target match. This fake integration suite helps handle this situation by avoiding running the CI for the whole collection. Furthermore, since the modules listed in the aliases file are marked as disabled, tests are automatically skipped. \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/aliases b/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/aliases new file mode 100644 index 000000000..3d3a12fd6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/aliases @@ -0,0 +1,5 @@ +disabled + +lambda_event +rds_instance_snapshot +rds_snapshot_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/tasks/main.yaml new file mode 100644 index 000000000..0dcc162b8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/tasks/main.yaml @@ -0,0 +1,130 @@ +- set_fact: + # As a lookup plugin we don't have access to module_defaults + connection_args: + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token | default(omit) }}" + no_log: True + +- module_defaults: + group/aws: + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + block: + - name: 'Check for EC2 Classic support (has-ec2-classic)' + set_fact: + has_ec2_classic: "{{ lookup('amazon.aws.aws_account_attribute', + attribute='has-ec2-classic', + wantlist=True, + **connection_args) }}" + - assert: + that: + - ( has_ec2_classic is sameas true ) or ( has_ec2_classic is sameas false ) + + - name: 'Fetch all account attributes (wantlist=True)' + set_fact: + account_attrs: "{{ lookup('amazon.aws.aws_account_attribute', + wantlist=True, + **connection_args) }}" + - assert: + that: + # Not guaranteed that there will be a default-vpc + - '"default-vpc" in account_attrs' + - '"max-elastic-ips" in account_attrs' + - account_attrs['max-elastic-ips'][0] | int + - '"max-instances" in account_attrs' + - account_attrs['max-instances'][0] | int + # EC2 and VPC are both valid values, but we can't guarantee which are available + - '"supported-platforms" in account_attrs' + - account_attrs['supported-platforms'] | difference(['VPC', 'EC2']) | length == 0 + - '"vpc-max-elastic-ips" in account_attrs' + - account_attrs['vpc-max-elastic-ips'][0] | int + - '"vpc-max-security-groups-per-interface" in account_attrs' + - account_attrs['vpc-max-security-groups-per-interface'][0] | int + + # Not espcially useful, but let's be thorough and leave hints what folks could + # expect + - name: 'Fetch all account attributes (wantlist=False)' + set_fact: + account_attrs: "{{ lookup('amazon.aws.aws_account_attribute', + wantlist=False, + **connection_args) }}" + - assert: + that: + - '"default-vpc" in split_attrs' + - '"max-elastic-ips" in split_attrs' + - '"max-instances" in split_attrs' + - '"supported-platforms" in split_attrs' + - '"vpc-max-elastic-ips" in split_attrs' + - '"vpc-max-security-groups-per-interface" in split_attrs' + vars: + split_attrs: '{{ account_attrs.split(",") }}' + + - name: 'Check for Default VPC (default-vpc)' + set_fact: + default_vpc: "{{ lookup('amazon.aws.aws_account_attribute', + attribute='default-vpc', + **connection_args) }}" + - assert: + that: + - (default_vpc == "none") + or + default_vpc.startswith("vpc-") + + - name: 'Check for maximum number of EIPs (max-elastic-ips)' + set_fact: + max_eips: "{{ lookup('amazon.aws.aws_account_attribute', + attribute='max-elastic-ips', + **connection_args) }}" + - assert: + that: + - max_eips | int + + - name: 'Check for maximum number of Instances (max-instances)' + set_fact: + max_instances: "{{ lookup('amazon.aws.aws_account_attribute', + attribute='max-instances', + **connection_args) }}" + - assert: + that: + - max_instances | int + + - name: 'Check for maximum number of EIPs in a VPC (vpc-max-elastic-ips)' + set_fact: + vpc_max_eips: "{{ lookup('amazon.aws.aws_account_attribute', + attribute='vpc-max-elastic-ips', + **connection_args) }}" + - assert: + that: + - vpc_max_eips | int + + - name: 'Check for maximum number of Security Groups per Interface (vpc-max-security-groups-per-interface)' + set_fact: + max_sg_per_int: "{{ lookup('amazon.aws.aws_account_attribute', + attribute='vpc-max-security-groups-per-interface', + **connection_args) }}" + - assert: + that: + - max_sg_per_int | int + + - name: 'Check for support of Classic EC2 vs VPC (supported-platforms)' + set_fact: + supported_plat: "{{ lookup('amazon.aws.aws_account_attribute', + attribute='supported-platforms', + **connection_args) }}" + - assert: + that: + - supported_plat.split(',') | difference(['VPC', 'EC2']) | length == 0 + + - name: 'Check for support of Classic EC2 vs VPC (supported-platforms) (wantlist)' + set_fact: + supported_plat: "{{ lookup('amazon.aws.aws_account_attribute', + attribute='supported-platforms', + wantlist=True, + **connection_args) }}" + - assert: + that: + - supported_plat | difference(['VPC', 'EC2']) | length == 0 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/tasks/main.yaml new file mode 100644 index 000000000..a22580e3b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/tasks/main.yaml @@ -0,0 +1,120 @@ +- set_fact: + # As a lookup plugin we don't have access to module_defaults + connection_args: + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token | default(omit) }}" + no_log: True + +- module_defaults: + group/aws: + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + collections: + - amazon.aws + - community.aws + block: + - name: define secret name + set_fact: + secret_name: "ansible-test-{{ tiny_prefix }}-secret" + secret_value: "{{ lookup('password', '/dev/null chars=ascii_lowercase,digits,punctuation length=16') }}" + skip: "skip" + warn: "warn" + + - name: lookup missing secret (skip) + set_fact: + missing_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, on_missing=skip, **connection_args) }}" + + - name: assert that missing_secret is defined + assert: + that: + - missing_secret is defined + - missing_secret | list | length == 0 + + - name: lookup missing secret (warn) + set_fact: + missing_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, on_missing=warn, **connection_args) }}" + + - name: assert that missing_secret is defined + assert: + that: + - missing_secret is defined + - missing_secret | list | length == 0 + + - name: lookup missing secret (error) + set_fact: + missing_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, **connection_args) }}" + ignore_errors: True + register: get_missing_secret + + - name: assert that setting the missing_secret failed + assert: + that: + - get_missing_secret is failed + + - name: create secret "{{ secret_name }}" + aws_secret: + name: "{{ secret_name }}" + secret: "{{ secret_value }}" + tags: + ansible-test: "aws-tests-integration" + state: present + + - name: read secret value + set_fact: + look_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, **connection_args) }}" + + - name: assert that secret was successfully retrieved + assert: + that: + - look_secret == secret_value + + - name: delete secret + aws_secret: + name: "{{ secret_name }}" + state: absent + recovery_window: 7 + + - name: lookup deleted secret (skip) + set_fact: + deleted_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, on_deleted=skip, **connection_args) }}" + + - name: assert that deleted_secret is defined + assert: + that: + - deleted_secret is defined + - deleted_secret | list | length == 0 + + - name: lookup deleted secret (warn) + set_fact: + deleted_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, on_deleted=warn, **connection_args) }}" + + - name: assert that deleted_secret is defined + assert: + that: + - deleted_secret is defined + - deleted_secret | list | length == 0 + + - name: lookup deleted secret (error) + set_fact: + missing_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, **connection_args) }}" + ignore_errors: True + register: get_deleted_secret + + - name: assert that setting the deleted_secret failed + assert: + that: + - get_deleted_secret is failed + + always: + + # delete secret created + - name: delete secret + aws_secret: + name: "{{ secret_name }}" + state: absent + recovery_window: 0 + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/tasks/main.yaml new file mode 100644 index 000000000..4599ba19a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/tasks/main.yaml @@ -0,0 +1,148 @@ +- name: lookup range with no arguments + set_fact: + no_params: "{{ lookup('amazon.aws.aws_service_ip_ranges') }}" + +- name: assert that we're returned a single string + assert: + that: + - no_params is defined + - no_params is string + +- name: lookup range with wantlist + set_fact: + want_list: "{{ lookup('amazon.aws.aws_service_ip_ranges', wantlist=True) }}" + want_ipv6_list: "{{ lookup('amazon.aws.aws_service_ip_ranges', wantlist=True, ipv6_prefixes=True) }}" + +- name: assert that we're returned a list + assert: + that: + - want_list is defined + - want_list is iterable + - want_list is not string + - want_list | length > 1 + - want_list[0] | ansible.utils.ipv4 + - want_ipv6_list is defined + - want_ipv6_list is iterable + - want_ipv6_list is not string + - want_ipv6_list | length > 1 + - want_ipv6_list[0] | ansible.utils.ipv6 + + +- name: lookup range with service + set_fact: + s3_ips: "{{ lookup('amazon.aws.aws_service_ip_ranges', service='S3', wantlist=True) }}" + s3_ipv6s: "{{ lookup('amazon.aws.aws_service_ip_ranges', service='S3', wantlist=True, ipv6_prefixes=True) }}" + +- name: assert that we're returned a list + assert: + that: + - s3_ips is defined + - s3_ips is iterable + - s3_ips is not string + - s3_ips | length > 1 + - s3_ips[0] | ansible.utils.ipv4 + - s3_ipv6s is defined + - s3_ipv6s is iterable + - s3_ipv6s is not string + - s3_ipv6s | length > 1 + - s3_ipv6s[0] | ansible.utils.ipv6 + +- name: lookup range with a different service + set_fact: + route53_ips: "{{ lookup('amazon.aws.aws_service_ip_ranges', service='ROUTE53_HEALTHCHECKS', wantlist=True) }}" + route53_ipv6s: "{{ lookup('amazon.aws.aws_service_ip_ranges', service='ROUTE53_HEALTHCHECKS', wantlist=True, ipv6_prefixes=True) }}" + +- name: assert that we're returned a list + assert: + that: + - route53_ips is defined + - route53_ips is iterable + - route53_ips is not string + - route53_ips | length > 1 + - route53_ips[0] | ansible.utils.ipv4 + - route53_ipv6s is defined + - route53_ipv6s is iterable + - route53_ipv6s is not string + - route53_ipv6s | length > 1 + - route53_ipv6s[0] | ansible.utils.ipv6 + + +- name: assert that service IPV4s and IPV6s do not overlap + assert: + that: + - route53_ips | intersect(s3_ips) | length == 0 + - route53_ipv6s | intersect(s3_ipv6s) | length == 0 + +- name: lookup range with region + set_fact: + us_east_1_ips: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='us-east-1', wantlist=True) }}" + +- name: lookup IPV6 range with region + set_fact: + us_east_1_ipv6s: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='us-east-1', wantlist=True, ipv6_prefixes=True) }}" + +- name: assert that we're returned a list + assert: + that: + - us_east_1_ips is defined + - us_east_1_ips is iterable + - us_east_1_ips is not string + - us_east_1_ips | length > 1 + - us_east_1_ips[0] | ansible.utils.ipv4 + - us_east_1_ipv6s is defined + - us_east_1_ipv6s is iterable + - us_east_1_ipv6s is not string + - us_east_1_ipv6s | length > 1 + - us_east_1_ipv6s[0] | ansible.utils.ipv6 + +- name: lookup range with a different region + set_fact: + eu_central_1_ips: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='eu-central-1', wantlist=True) }}" + eu_central_1_ipv6s: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='eu-central-1', wantlist=True, ipv6_prefixes=True) }}" + +- name: assert that we're returned a list + assert: + that: + - eu_central_1_ips is defined + - eu_central_1_ips is iterable + - eu_central_1_ips is not string + - eu_central_1_ips | length > 1 + - eu_central_1_ips[0] | ansible.utils.ipv4 + - eu_central_1_ipv6s is defined + - eu_central_1_ipv6s is iterable + - eu_central_1_ipv6s is not string + - eu_central_1_ipv6s | length > 1 + - eu_central_1_ipv6s[0] | ansible.utils.ipv6 + +- name: assert that regional IPs don't overlap + assert: + that: + - eu_central_1_ips | intersect(us_east_1_ips) | length == 0 + - eu_central_1_ipv6s | intersect(us_east_1_ipv6s) | length == 0 + +- name: lookup range with service and region + set_fact: + s3_us_ips: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='us-east-1', service='S3', wantlist=True) }}" + s3_us_ipv6s: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='us-east-1', service='S3', wantlist=True, ipv6_prefixes=True) }}" + +- name: assert that we're returned a list + assert: + that: + - s3_us_ips is defined + - s3_us_ips is iterable + - s3_us_ips is not string + - s3_us_ips | length > 1 + - s3_us_ips[0] | ansible.utils.ipv4 + - s3_us_ipv6s is defined + - s3_us_ipv6s is iterable + - s3_us_ipv6s is not string + - s3_us_ipv6s | length > 1 + - s3_us_ipv6s[0] | ansible.utils.ipv6 + +- name: assert that the regional service IPs are a subset of the regional IPs and service IPs. + assert: + that: + - ( s3_us_ips | intersect(us_east_1_ips) | length ) == ( s3_us_ips | length ) + - ( s3_us_ips | intersect(s3_ips) | length ) == ( s3_us_ips | length ) + - ( s3_us_ipv6s | intersect(us_east_1_ipv6s) | length ) == ( s3_us_ipv6s | length ) + - ( s3_us_ipv6s | intersect(s3_ipv6s) | length ) == ( s3_us_ipv6s | length ) diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/defaults/main.yml new file mode 100644 index 000000000..218afac1c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/defaults/main.yml @@ -0,0 +1,2 @@ +--- +ssm_key_prefix: '{{ resource_prefix }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/tasks/main.yml new file mode 100644 index 000000000..d46c7b20b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/tasks/main.yml @@ -0,0 +1,276 @@ +--- +- set_fact: + # As a lookup plugin we don't have access to module_defaults + connection_args: + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token | default(omit) }}" + no_log: True + +- name: 'aws_ssm lookup plugin integration tests' + collections: + - amazon.aws + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + vars: + skip: 'skip' + warn: 'warn' + simple_name: '/{{ ssm_key_prefix }}/Simple' + simple_description: 'This is a simple example' + simple_value: 'A simple VALue' + updated_value: 'A simple (updated) VALue' + path_name: '/{{ ssm_key_prefix }}/path' + path_name_a: '{{ path_name }}/key_one' + path_shortname_a: 'key_one' + path_name_b: '{{ path_name }}/keyTwo' + path_shortname_b: 'keyTwo' + path_name_c: '{{ path_name }}/Nested/Key' + path_shortname_c: 'Key' + path_description: 'This is somewhere to store a set of keys' + path_value_a: 'value_one' + path_value_b: 'valueTwo' + path_value_c: 'Value Three' + missing_name: '{{ path_name }}/IDoNotExist' + block: + + # ============================================================ + # Simple key/value + - name: lookup a missing key (error) + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, **connection_args) }}" + ignore_errors: true + register: lookup_missing + - assert: + that: + - lookup_missing is failed + + - name: lookup a missing key (warn) + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, on_missing=warn, **connection_args) }}" + register: lookup_missing + - assert: + that: + - lookup_value | list | length == 0 + + - name: lookup a single missing key (skip) + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, on_missing=skip, **connection_args) }}" + register: lookup_missing + - assert: + that: + - lookup_value | list | length == 0 + + - name: Create key/value pair in aws parameter store + aws_ssm_parameter_store: + name: '{{ simple_name }}' + description: '{{ simple_description }}' + value: '{{ simple_value }}' + + - name: Lookup a single key + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, **connection_args) }}" + - assert: + that: + - lookup_value == simple_value + + - name: Create key/value pair in aws parameter store + aws_ssm_parameter_store: + name: '{{ simple_name }}' + description: '{{ simple_description }}' + value: '{{ simple_value }}' + + - name: Lookup a single key + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, **connection_args) }}" + - assert: + that: + - lookup_value == simple_value + + - name: Update key/value pair in aws parameter store + aws_ssm_parameter_store: + name: '{{ simple_name }}' + description: '{{ simple_description }}' + value: '{{ updated_value }}' + + - name: Lookup updated single key + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, **connection_args) }}" + - assert: + that: + - lookup_value == updated_value + + - name: Lookup original value from single key + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name + ':1', **connection_args) }}" + - assert: + that: + - lookup_value == simple_value + + # ============================================================ + + - name: Create nested key/value pair in aws parameter store (1) + aws_ssm_parameter_store: + name: '{{ path_name_a }}' + description: '{{ path_description }}' + value: '{{ path_value_a }}' + + - name: Create nested key/value pair in aws parameter store (2) + aws_ssm_parameter_store: + name: '{{ path_name_b }}' + description: '{{ path_description }}' + value: '{{ path_value_b }}' + + - name: Create nested key/value pair in aws parameter store (3) + aws_ssm_parameter_store: + name: '{{ path_name_c }}' + description: '{{ path_description }}' + value: '{{ path_value_c }}' + + # ============================================================ + - name: Lookup a keys using bypath + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, bypath=True, wantlist=True, **connection_args ) | first }}" + - assert: + that: + - path_name_a in lookup_value + - lookup_value[path_name_a] == path_value_a + - path_name_b in lookup_value + - lookup_value[path_name_b] == path_value_b + - lookup_value | length == 2 + + - name: Lookup a keys using bypath and recursive + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, bypath=True, recursive=True, wantlist=True, **connection_args ) | first }}" + - assert: + that: + - path_name_a in lookup_value + - lookup_value[path_name_a] == path_value_a + - path_name_b in lookup_value + - lookup_value[path_name_b] == path_value_b + - path_name_c in lookup_value + - lookup_value[path_name_c] == path_value_c + - lookup_value | length == 3 + + - name: Lookup a keys using bypath and shortname + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, bypath=True, shortnames=True, wantlist=True, **connection_args ) | first }}" + - assert: + that: + - path_shortname_a in lookup_value + - lookup_value[path_shortname_a] == path_value_a + - path_shortname_b in lookup_value + - lookup_value[path_shortname_b] == path_value_b + - lookup_value | length == 2 + + - name: Lookup a keys using bypath and recursive and shortname + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, bypath=True, recursive=True, shortnames=True, wantlist=True, **connection_args ) | first }}" + - assert: + that: + - path_shortname_a in lookup_value + - lookup_value[path_shortname_a] == path_value_a + - path_shortname_b in lookup_value + - lookup_value[path_shortname_b] == path_value_b + - path_shortname_c in lookup_value + - lookup_value[path_shortname_c] == path_value_c + - lookup_value | length == 3 + + # ============================================================ + + - name: Explicitly lookup two keys + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, path_name_a, wantlist=True, **connection_args) }}" + - assert: + that: + - lookup_value | list | length == 2 + - lookup_value[0] == updated_value + - lookup_value[1] == path_value_a + + ### + + - name: Explicitly lookup two keys - one missing + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, missing_name, wantlist=True, **connection_args) }}" + ignore_errors: True + register: lookup_missing + - assert: + that: + - lookup_missing is failed + + - name: Explicitly lookup two keys - one missing (skip) + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, missing_name, on_missing=skip, wantlist=True, **connection_args) }}" + - assert: + that: + - lookup_value | list | length == 2 + - lookup_value[0] == updated_value + - lookup_value | bool == False + + ### + + - name: Explicitly lookup two paths - one missing + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, missing_name, bypath=True, wantlist=True, **connection_args) }}" + ignore_errors: True + register: lookup_missing + - assert: + that: + - lookup_missing is failed + + - name: Explicitly lookup two paths - one missing (skip) + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, missing_name, on_missing=skip, bypath=True, wantlist=True, **connection_args) }}" + - assert: + that: + - lookup_value | list | length == 2 + - lookup_value[1] | bool == False + - path_name_a in lookup_value[0] + - lookup_value[0][path_name_a] == path_value_a + - path_name_b in lookup_value[0] + - lookup_value[0][path_name_b] == path_value_b + - lookup_value[0] | length == 2 + + ### + + - name: Explicitly lookup two paths with recurse - one missing + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, missing_name, bypath=True, recursive=True, wantlist=True, **connection_args) }}" + ignore_errors: True + register: lookup_missing + - assert: + that: + - lookup_missing is failed + + - name: Explicitly lookup two paths with recurse - one missing (skip) + set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, missing_name, on_missing=skip, bypath=True, recursive=True, wantlist=True, **connection_args) }}" + - assert: + that: + - lookup_value | list | length == 2 + - lookup_value[1] | bool == False + - path_name_a in lookup_value[0] + - lookup_value[0][path_name_a] == path_value_a + - path_name_b in lookup_value[0] + - lookup_value[0][path_name_b] == path_value_b + - path_name_c in lookup_value[0] + - lookup_value[0][path_name_c] == path_value_c + - lookup_value[0] | length == 3 + + always: + # ============================================================ + - name: Delete remaining key/value pairs in aws parameter store + aws_ssm_parameter_store: + name: "{{item}}" + state: absent + ignore_errors: True + with_items: + - '{{ path_name_c }}' + - '{{ path_name_b }}' + - '{{ path_name_c }}' + - '{{ path_name }}' + - '{{ simple_name }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/aliases b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/aliases new file mode 100644 index 000000000..4fd9dff76 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/aliases @@ -0,0 +1 @@ +module_utils_botocore diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/main.yml new file mode 100644 index 000000000..a8dedcf47 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/main.yml @@ -0,0 +1,12 @@ +--- +- hosts: localhost + tasks: + - name: Call aws_az_info + amazon.aws.aws_az_info: + register: result + - name: Get called information + amazon.aws.aws_caller_info: + register: result + - assert: + that: + - lookup('ansible.builtin.env', '_ANSIBLE_PLACEBO_RECORD') or (lookup('ansible.builtin.env', '_ANSIBLE_PLACEBO_REPLAY') and result.user_id == "AWZBREIZHEOMABRONIFVGFS6GH") diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/record.sh b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/record.sh new file mode 100755 index 000000000..180e58d05 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/record.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# +set -eux + +if [ -d recording ]; then + echo "Please check and remove the 'recording' directory." + exit 1 +fi +if [ -v ANSIBLE_TEST_PYTHON_VERSION ]; then + echo "Please call ./runme.sh directly without ansible-test" + exit 1 +fi +export _ANSIBLE_PLACEBO_RECORD=recording + +mkdir recording +ansible-playbook main.yml -vvv +account_id=$(aws sts get-caller-identity --query "Account" --output text) +user_id=$(aws sts get-caller-identity --query "UserId" --output text) +find recording -type f -exec sed -i "s,$account_id,123456789012,g" "{}" \; +find recording -type f -exec sed -i "s,$user_id,AIDA12345EXAMPLE54321,g" "{}" \; +find recording -type f -exec sed -i "s,$USER,george,g" "{}" \; +tar cfzv recording.tar.gz recording +rm -r recording diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/recording.tar.gz b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/recording.tar.gz new file mode 100644 index 000000000..29c8dd90a Binary files /dev/null and b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/recording.tar.gz differ diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/runme.sh new file mode 100755 index 000000000..2f0d591a4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/runme.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# + +set -eux + +export ANSIBLE_ROLES_PATH=../ + + +tar xfzv recording.tar.gz +export _ANSIBLE_PLACEBO_REPLAY=${PWD}/recording +export AWS_ACCESS_KEY_ID=disabled +export AWS_SECRET_ACCESS_KEY=disabled +export AWS_SESSION_TOKEN=disabled +export AWS_DEFAULT_REGION=us-east-2 +ansible-playbook main.yml -vvv diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/aliases b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/aliases new file mode 100644 index 000000000..d13ca0492 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/aliases @@ -0,0 +1,4 @@ +cloud/aws + +module_utils_botocore +module_utils_modules diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/inventory b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/inventory new file mode 100644 index 000000000..5093e8582 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/inventory @@ -0,0 +1,6 @@ +[tests] +localhost + +[all:vars] +ansible_connection=local +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/main.yml new file mode 100644 index 000000000..29604c495 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/main.yml @@ -0,0 +1,8 @@ +- hosts: all + gather_facts: no + collections: + - amazon.aws + - community.aws + roles: + # Test the behaviour of module_utils.core.AnsibleAWSModule.client (boto3) + - 'ansibleawsmodule.client' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/amazonroot.pem b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/amazonroot.pem new file mode 100644 index 000000000..a6f3e92af --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/amazonroot.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj +ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM +9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw +IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6 +VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L +93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm +jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA +A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI +U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs +N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv +o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU +5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy +rqXRfboQnoZsG4q5WTP468SQvvG5 +-----END CERTIFICATE----- diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/isrg-x1.pem b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/isrg-x1.pem new file mode 100644 index 000000000..b85c8037f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/isrg-x1.pem @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py new file mode 100644 index 000000000..5e2c8e3e8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py @@ -0,0 +1,46 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# A bare-minimum Ansible Module based on AnsibleAWSModule used for testing some +# of the core behaviour around AWS/Boto3 connection details + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +try: + from botocore.exceptions import BotoCoreError, ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict + + +def main(): + module = AnsibleAWSModule( + argument_spec={}, + supports_check_mode=True, + ) + + decorator = AWSRetry.jittered_backoff() + client = module.client('ec2', retry_decorator=decorator) + + filters = ansible_dict_to_boto3_filter_list({'name': 'amzn2-ami-hvm-2.0.202006*-x86_64-gp2'}) + + try: + images = client.describe_images(aws_retry=True, ImageIds=[], Filters=filters, Owners=['amazon'], ExecutableUsers=[]) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg='Fail JSON AWS') + + # Return something, just because we can. + module.exit_json( + changed=False, + **camel_dict_to_snake_dict(images)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml new file mode 100644 index 000000000..d8b08ab22 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: [] +collections: + - amazon.aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml new file mode 100644 index 000000000..7ad4e7a34 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml @@ -0,0 +1,202 @@ +--- +- name: 'Create temporary location for CA files' + tempfile: + state: directory + suffix: 'test-CAs' + register: ca_tmp + +- name: 'Ensure we have Amazons root CA available to us' + copy: + src: 'amazonroot.pem' + dest: '{{ ca_tmp.path }}/amazonroot.pem' + mode: 0644 + +- name: 'Ensure we have a another CA (ISRG-X1) bundle available to us' + copy: + src: 'isrg-x1.pem' + dest: '{{ ca_tmp.path }}/isrg-x1.pem' + mode: 0644 + +################################################################################## +# Test disabling cert validation (make sure we don't error) + +- name: 'Test basic operation using default CA bundle (no validation) - parameter' + example_module: + region: '{{ aws_region }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + validate_certs: False + register: default_bundle_result + +- assert: + that: + - default_bundle_result is successful + +################################################################################## +# Tests using Amazon's CA (the one the endpoint certs should be signed with) + +- name: 'Test basic operation using Amazons root CA - parameter' + example_module: + region: '{{ aws_region }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + aws_ca_bundle: '{{ ca_tmp.path }}/amazonroot.pem' + register: amazon_ca_result + +- assert: + that: + - amazon_ca_result is successful + +- name: 'Test basic operation using Amazons root CA - environment' + example_module: + region: '{{ aws_region }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + environment: + AWS_CA_BUNDLE: '{{ ca_tmp.path }}/amazonroot.pem' + register: amazon_ca_result + +- assert: + that: + - amazon_ca_result is successful + +- name: 'Test basic operation using Amazons root CA (no validation) - parameter' + example_module: + region: '{{ aws_region }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + aws_ca_bundle: '{{ ca_tmp.path }}/amazonroot.pem' + validate_certs: False + register: amazon_ca_result + +- assert: + that: + - amazon_ca_result is successful + +- name: 'Test basic operation using Amazons root CA (no validation) - environment' + example_module: + region: '{{ aws_region }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + validate_certs: False + environment: + AWS_CA_BUNDLE: '{{ ca_tmp.path }}/amazonroot.pem' + register: amazon_ca_result + +- assert: + that: + - amazon_ca_result is successful + +################################################################################## +# Tests using ISRG's CA (one that the endpoint certs *aren't* signed with) + +- name: 'Test basic operation using a different CA - parameter' + example_module: + region: '{{ aws_region }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem' + register: isrg_ca_result + ignore_errors: yes + +- assert: + that: + - isrg_ca_result is failed + # Caught when we try to do something, and passed to fail_json_aws + - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg' + - '"Fail JSON AWS" in isrg_ca_result.msg' + +- name: 'Test basic operation using a different CA - environment' + example_module: + region: '{{ aws_region }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + environment: + AWS_CA_BUNDLE: '{{ ca_tmp.path }}/isrg-x1.pem' + register: isrg_ca_result + ignore_errors: yes + +- assert: + that: + - isrg_ca_result is failed + # Caught when we try to do something, and passed to fail_json_aws + - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg' + - '"Fail JSON AWS" in isrg_ca_result.msg' + +- name: 'Test basic operation using a different CA (no validation) - parameter' + example_module: + region: '{{ aws_region }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem' + validate_certs: False + register: isrg_ca_result + +- assert: + that: + - isrg_ca_result is successful + +- name: 'Test basic operation using a different CA (no validation) - environment' + example_module: + region: '{{ aws_region }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + validate_certs: False + environment: + AWS_CA_BUNDLE: '{{ ca_tmp.path }}/isrg-x1.pem' + register: isrg_ca_result + +- assert: + that: + - isrg_ca_result is successful + +################################################################################## +# https://github.com/ansible-collections/amazon.aws/issues/129 +- name: 'Test CA bundle is used when authenticating with a profile - implied validation' + example_module: + profile: 'test_profile' + aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem' + register: isrg_ca_result + ignore_errors: yes + +- assert: + that: + - isrg_ca_result is failed + # Caught when we try to do something, and passed to fail_json_aws + - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg' + - '"Fail JSON AWS" in isrg_ca_result.msg' + +- name: 'Test CA bundle is used when authenticating with a profile - explicit validation' + example_module: + profile: 'test_profile' + aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem' + validate_certs: True + register: isrg_ca_result + ignore_errors: yes + +- assert: + that: + - isrg_ca_result is failed + # Caught when we try to do something, and passed to fail_json_aws + - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg' + - '"Fail JSON AWS" in isrg_ca_result.msg' + +- name: 'Test CA bundle is used when authenticating with a profile - explicitly disable validation' + example_module: + profile: 'test_profile' + aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem' + validate_certs: False + register: isrg_ca_result + +- assert: + that: + - isrg_ca_result is success diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml new file mode 100644 index 000000000..94925829b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml @@ -0,0 +1,281 @@ +--- +################################################################################## +# Tests using standard credential parameters + +- name: 'Test basic operation using simple credentials (simple-parameters)' + example_module: + region: '{{ aws_region }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + register: credential_result + +- assert: + that: + - credential_result is successful + +- name: 'Test basic operation using simple credentials (aws-parameters)' + example_module: + aws_region: '{{ aws_region }}' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + aws_security_token: '{{ security_token }}' + register: credential_result + +- assert: + that: + - credential_result is successful + +- name: 'Test basic operation using simple credentials (ec2-parameters)' + example_module: + ec2_region: '{{ aws_region }}' + ec2_access_key: '{{ aws_access_key }}' + ec2_secret_key: '{{ aws_secret_key }}' + access_token: '{{ security_token }}' + register: credential_result + +- assert: + that: + - credential_result is successful + +################################################################################## +# Tests using standard credentials from environment variables + +- name: 'Test basic operation using simple credentials (aws-environment)' + example_module: + environment: + AWS_REGION: '{{ aws_region }}' + AWS_ACCESS_KEY_ID: '{{ aws_access_key }}' + AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}' + AWS_SECURITY_TOKEN: '{{ security_token }}' + register: credential_result + +- assert: + that: + - credential_result is successful + +- name: 'Test basic operation using simple credentials (aws2-environment)' + example_module: + environment: + AWS_DEFAULT_REGION: '{{ aws_region }}' + AWS_ACCESS_KEY: '{{ aws_access_key }}' + AWS_SECRET_KEY: '{{ aws_secret_key }}' + AWS_SESSION_TOKEN: '{{ security_token }}' + register: credential_result + +- assert: + that: + - credential_result is successful + +- name: 'Test basic operation using simple credentials (ec2-environment)' + example_module: + environment: + EC2_REGION: '{{ aws_region }}' + EC2_ACCESS_KEY: '{{ aws_access_key }}' + EC2_SECRET_KEY: '{{ aws_secret_key }}' + EC2_SECURITY_TOKEN: '{{ security_token }}' + register: credential_result + +- assert: + that: + - credential_result is successful + +################################################################################## +# Tests for missing parameters + +- name: 'Test with missing region' + example_module: + region: '{{ omit }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + register: missing_region + ignore_errors: True + +- assert: + that: + - missing_region is failed + - '"requires a region" in missing_region.msg' + +- name: 'Test with missing access key' + example_module: + region: '{{ aws_region }}' + access_key: '{{ omit }}' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + register: missing_access + ignore_errors: True + +- assert: + that: + - missing_access is failed + - '"Partial credentials found" in missing_access.msg' + - '"aws_access_key_id" in missing_access.msg' + +- name: 'Test with missing secret key' + example_module: + region: '{{ aws_region }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ omit }}' + security_token: '{{ security_token }}' + register: missing_secret + ignore_errors: True + +- assert: + that: + - missing_secret is failed + - '"Partial credentials found" in missing_secret.msg' + - '"aws_secret_access_key" in missing_secret.msg' + +- name: 'Test with missing security token' + example_module: + region: '{{ aws_region }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ omit }}' + register: missing_token + ignore_errors: True + +- assert: + that: + - missing_token is failed + # Caught when we try to do something, and passed to fail_json_aws + - '"AuthFailure" in missing_token.msg' + - '"Fail JSON AWS" in missing_token.msg' + - '"error" in missing_token' + - '"code" in missing_token.error' + - missing_token.error.code == 'AuthFailure' + - '"message" in missing_token.error' + +################################################################################## +# Run an additional authentication request to ensure that we're out of any +# deny-lists caused by bad requests +- name: 'Perform valid authentication to avoid deny-listing' + example_module: + aws_region: '{{ aws_region }}' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + aws_security_token: '{{ security_token }}' + register: anti_denylist + until: anti_denylist is success + retries: 5 + delay: 5 + +################################################################################## +# Tests for bad parameters + +- name: 'Test with bad region' + example_module: + region: 'junk-example' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + register: bad_region + ignore_errors: True + +- assert: + that: + - bad_region is failed + - '"msg" in bad_region' + - '"Could not connect to the endpoint URL" in bad_region.msg' + - '"Fail JSON AWS" in bad_region.msg' + - '"ec2.junk-example" in bad_region.msg' + +- name: 'Test with bad access key' + example_module: + region: '{{ aws_region }}' + access_key: 'junk-example' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + register: bad_access + ignore_errors: True + +- assert: + that: + - bad_access is failed + # Caught when we try to do something, and passed to fail_json_aws + - '"AuthFailure" in bad_access.msg' + - '"Fail JSON AWS" in bad_access.msg' + - '"error" in bad_access' + - '"code" in bad_access.error' + - bad_access.error.code == 'AuthFailure' + - '"message" in bad_access.error' + +# Run an additional authentication request to ensure that we're out of any +# deny-lists caused by bad requests +- name: 'Perform valid authentication to avoid deny-listing' + example_module: + aws_region: '{{ aws_region }}' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + aws_security_token: '{{ security_token }}' + register: anti_denylist + until: anti_denylist is success + retries: 5 + delay: 5 + +- name: 'Test with bad secret key' + example_module: + region: '{{ aws_region }}' + access_key: '{{ aws_access_key }}' + secret_key: 'junk-example' + security_token: '{{ security_token }}' + register: bad_secret + ignore_errors: True + +- assert: + that: + - bad_secret is failed + # Caught when we try to do something, and passed to fail_json_aws + - '"AuthFailure" in bad_secret.msg' + - '"Fail JSON AWS" in bad_secret.msg' + - '"error" in bad_secret' + - '"code" in bad_secret.error' + - bad_secret.error.code == 'AuthFailure' + - '"message" in bad_secret.error' + +# Run an additional authentication request to ensure that we're out of any +# deny-lists caused by bad requests +- name: 'Perform valid authentication to avoid deny-listing' + example_module: + aws_region: '{{ aws_region }}' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + aws_security_token: '{{ security_token }}' + register: anti_denylist + until: anti_denylist is success + retries: 5 + delay: 5 + +- name: 'Test with bad security token' + example_module: + region: '{{ aws_region }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + security_token: 'junk-example' + register: bad_token + ignore_errors: True + +- assert: + that: + - bad_token is failed + # Caught when we try to do something, and passed to fail_json_aws + - '"AuthFailure" in bad_token.msg' + - '"Fail JSON AWS" in bad_token.msg' + - '"error" in bad_token' + - '"code" in bad_token.error' + - bad_token.error.code == 'AuthFailure' + - '"message" in bad_token.error' + +# Run an additional authentication request to ensure that we're out of any +# deny-lists caused by bad requests +- name: 'Perform valid authentication to avoid deny-listing' + example_module: + aws_region: '{{ aws_region }}' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + aws_security_token: '{{ security_token }}' + register: anti_denylist + until: anti_denylist is success + retries: 5 + delay: 5 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml new file mode 100644 index 000000000..590af9134 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml @@ -0,0 +1,123 @@ +--- +################################################################################## +# Tests using Endpoints + +- name: 'Test basic operation using standard endpoint (aws-parameters)' + example_module: + region: '{{ aws_region }}' + aws_endpoint_url: 'https://ec2.{{ aws_region }}.amazonaws.com' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + aws_security_token: '{{ security_token }}' + register: standard_endpoint_result + +- name: 'Check that we connected to the standard endpoint' + assert: + that: + - standard_endpoint_result is successful + - '"ec2:DescribeImages" in standard_endpoint_result.resource_actions' + +# The FIPS endpoints aren't available in every region, this will trigger errors +# outside of: [ us-east-1, us-east-2, us-west-1, us-west-2 ] + +- name: 'Test basic operation using FIPS endpoint (aws-parameters)' + example_module: + region: '{{ aws_region }}' + aws_endpoint_url: 'https://ec2-fips.us-east-1.amazonaws.com' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + aws_security_token: '{{ security_token }}' + register: fips_endpoint_result + +- name: 'Check that we connected to the FIPS endpoint' + assert: + that: + - fips_endpoint_result is successful + - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions' + +- name: 'Test basic operation using FIPS endpoint (aws-parameters)' + example_module: + region: '{{ aws_region }}' + endpoint_url: 'https://ec2-fips.us-east-1.amazonaws.com' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + aws_security_token: '{{ security_token }}' + register: fips_endpoint_result + +- name: 'Check that we connected to the FIPS endpoint' + assert: + that: + - fips_endpoint_result is successful + - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions' + +- name: 'Test basic operation using FIPS endpoint (aws-parameters)' + example_module: + region: '{{ aws_region }}' + ec2_url: 'https://ec2-fips.us-east-1.amazonaws.com' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + aws_security_token: '{{ security_token }}' + register: fips_endpoint_result + +- name: 'Check that we connected to the FIPS endpoint' + assert: + that: + - fips_endpoint_result is successful + - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions' + +################################################################################## +# Tests using environment variables + +- name: 'Test basic operation using FIPS endpoint (aws-environment)' + example_module: + region: '{{ aws_region }}' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + aws_security_token: '{{ security_token }}' + environment: + AWS_URL: 'https://ec2-fips.us-east-1.amazonaws.com' + register: fips_endpoint_result + +- name: 'Check that we connected to the FIPS endpoint' + assert: + that: + - fips_endpoint_result is successful + - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions' + +- name: 'Test basic operation using FIPS endpoint (ec2-environment)' + example_module: + region: '{{ aws_region }}' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + aws_security_token: '{{ security_token }}' + environment: + EC2_URL: 'https://ec2-fips.us-east-1.amazonaws.com' + register: fips_endpoint_result + +- name: 'Check that we connected to the FIPS endpoint' + assert: + that: + - fips_endpoint_result is successful + - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions' + +################################################################################## +# Tests using a bad endpoint URL +# - This demonstrates that endpoint_url overrode region + +- name: 'Test with bad endpoint URL' + example_module: + region: '{{ aws_region }}' + endpoint_url: 'https://junk.{{ aws_region }}.amazonaws.com' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token }}' + register: bad_endpoint + ignore_errors: True + +- assert: + that: + - bad_endpoint is failed + - '"msg" in bad_endpoint' + - '"Could not connect to the endpoint URL" in bad_endpoint.msg' + - '"Fail JSON AWS" in bad_endpoint.msg' + - '"junk.{{ aws_region }}.amazonaws.com" in bad_endpoint.msg' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml new file mode 100644 index 000000000..dc61fad68 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: 'Tests around standard credentials' + include_tasks: 'credentials.yml' + +- name: 'Tests around profiles' + include_tasks: 'profiles.yml' + +- name: 'Tests around endpoints' + include_tasks: 'endpoints.yml' + +- name: 'Tests around CA Bundles' + include_tasks: 'ca_bundle.yml' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml new file mode 100644 index 000000000..1673a5e15 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml @@ -0,0 +1,74 @@ +--- +################################################################################## +# Tests using profiles instead of directly consuming credentials + +- name: 'Test basic operation using profile (simple-parameters)' + example_module: + profile: 'test_profile' + register: profile_result + +- assert: + that: + - profile_result is successful + +- name: 'Test basic operation using profile (aws-parameters)' + example_module: + aws_profile: 'test_profile' + register: profile_result + +- assert: + that: + - profile_result is successful + +- name: 'Test basic operation using profile (aws-environment)' + example_module: + environment: + AWS_PROFILE: 'test_profile' + register: profile_result + +- assert: + that: + - profile_result is successful + +- name: 'Test basic operation using profile (aws2-environment)' + example_module: + environment: + AWS_DEFAULT_PROFILE: 'test_profile' + register: profile_result + +- assert: + that: + - profile_result is successful + +################################################################################## +# Tests with bad profile + +- name: 'Test with bad profile' + example_module: + profile: 'junk-profile' + register: bad_profile + ignore_errors: True + +- assert: + that: + - bad_profile is failed + - '"msg" in bad_profile' + - '"junk-profile" in bad_profile.msg' + - '"could not be found" in bad_profile.msg' + +- name: 'Test with profile and credentials (should error)' + example_module: + profile: 'test_profile' + aws_region: '{{ aws_region }}' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + aws_security_token: '{{ security_token }}' + register: bad_profile + ignore_errors: True + +- assert: + that: + - bad_profile is failed + - '"msg" in bad_profile' + - '"Passing both" in bad_profile.msg' + - '"not supported" in bad_profile.msg' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/runme.sh new file mode 100755 index 000000000..9b0536d26 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/runme.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -eux + +ANSIBLE_ROLES_PATH="../" +# Boto3 +AWS_CONFIG_FILE="$( pwd )/boto3_config" +# Boto2 +BOTO_CONFIG="$( pwd )/boto3_config" + +export ANSIBLE_ROLES_PATH +export AWS_CONFIG_FILE +export BOTO_CONFIG + +ansible-playbook setup.yml -i localhost "$@" +ansible-playbook main.yml -i inventory "$@" -e "@session_credentials.yml" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/setup.yml new file mode 100644 index 000000000..9b219eb20 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/setup.yml @@ -0,0 +1,40 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + # =========================================================== + # While CI uses a dedicated session, the easiest way to run + # tests outside of CI is with a simple access/secret key pair. + # + # For consistency, use sts_session_token to grab session + # credentials if we're not already using a session + # Note: this can't be done within a session, hence the slightly + # strange dance + - name: 'Get a session token if we are using a basic key' + when: + - security_token is not defined + block: + - name: 'Get a session token' + sts_session_token: + region: '{{ aws_region }}' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + register: session_token + no_log: true + - name: 'Override initial tokens' + set_fact: + session_access_key: '{{ session_token.sts_creds.access_key }}' + session_secret_key: '{{ session_token.sts_creds.secret_key }}' + session_security_token: '{{ session_token.sts_creds.session_token }}' + no_log: true + + - name: 'Write out credentials' + template: + dest: './session_credentials.yml' + src: 'session_credentials.yml.j2' + + - name: 'Write out boto config file' + template: + dest: './boto3_config' + src: 'boto_config.j2' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/boto_config.j2 b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/boto_config.j2 new file mode 100644 index 000000000..f8668f057 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/boto_config.j2 @@ -0,0 +1,5 @@ +[profile test_profile] +region = {{ aws_region }} +aws_access_key_id = {{ session_access_key | default(aws_access_key) }} +aws_secret_access_key = {{ session_secret_key | default(aws_secret_key) }} +aws_security_token = {{ session_security_token | default(security_token) }} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/session_credentials.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/session_credentials.yml.j2 new file mode 100644 index 000000000..bb0304393 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/session_credentials.yml.j2 @@ -0,0 +1,3 @@ +aws_access_key: {{ session_access_key | default(aws_access_key) }} +aws_secret_key: {{ session_secret_key | default(aws_secret_key) }} +security_token: {{ session_security_token | default(security_token) }} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/aliases b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/inventory b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/inventory new file mode 100644 index 000000000..5093e8582 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/inventory @@ -0,0 +1,6 @@ +[tests] +localhost + +[all:vars] +ansible_connection=local +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/main.yml new file mode 100644 index 000000000..4edc36377 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/main.yml @@ -0,0 +1,7 @@ +- hosts: all + gather_facts: no + collections: + - amazon.aws + roles: + # Test the behaviour of module_utils.core.AnsibleAWSModule.client (boto3) + - 'get_waiter' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py new file mode 100644 index 000000000..4e16fb1bc --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py @@ -0,0 +1,39 @@ +#!/usr/bin/python +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# A bare-minimum Ansible Module based on AnsibleAWSModule used for testing some +# of the core behaviour around AWS/Boto3 connection details + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter + + +def main(): + argument_spec = dict( + client=dict(required=True, type='str'), + waiter_name=dict(required=True, type='str'), + with_decorator=dict(required=False, type='bool', default=False), + ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + decorator = None + if module.params.get('with_decorator'): + decorator = AWSRetry.jittered_backoff() + + client = module.client(module.params.get('client'), retry_decorator=decorator) + waiter = get_waiter(client, module.params.get('waiter_name')) + + module.exit_json(changed=False, waiter_attributes=dir(waiter)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml new file mode 100644 index 000000000..d8b08ab22 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: [] +collections: + - amazon.aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml new file mode 100644 index 000000000..466d9584e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml @@ -0,0 +1,36 @@ +--- +- module_defaults: + example_module: + region: '{{ aws_region }}' + access_key: '{{ aws_access_key }}' + secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + block: + - name: 'Attempt to get a waiter (no retry decorator)' + example_module: + client: 'ec2' + waiter_name: 'internet_gateway_exists' + register: test_no_decorator + + - assert: + that: + - test_no_decorator is succeeded + # Standard methods on a boto3 wrapper + - '"wait" in test_no_decorator.waiter_attributes' + - '"name" in test_no_decorator.waiter_attributes' + - '"config" in test_no_decorator.waiter_attributes' + + - name: 'Attempt to get a waiter (with decorator)' + example_module: + client: 'ec2' + waiter_name: 'internet_gateway_exists' + with_decorator: True + register: test_with_decorator + + - assert: + that: + - test_with_decorator is succeeded + # Standard methods on a boto3 wrapper + - '"wait" in test_with_decorator.waiter_attributes' + - '"name" in test_with_decorator.waiter_attributes' + - '"config" in test_with_decorator.waiter_attributes' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/runme.sh new file mode 100755 index 000000000..78a6f6dbe --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/runme.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -eux + +ANSIBLE_ROLES_PATH="../" +export ANSIBLE_ROLES_PATH + +ansible-playbook main.yml -i inventory "$@" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/aliases new file mode 100644 index 000000000..6e9f239e0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/aliases @@ -0,0 +1,5 @@ +time=10m + +cloud/aws + +rds_cluster_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/inventory b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/inventory new file mode 100644 index 000000000..1acd86420 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/inventory @@ -0,0 +1,23 @@ +[tests] +# basic rds_cluster cretion tests +create + +# restore cluster tests +restore + +# TODO: Cannot be tested in the CI because: +# An error occurred (InvalidParameterValue) when calling the CreateDBCluster operation: Replication from cluster in same region is not supported +# promote + +# security groups db tests +create_sgs + +# basic modify operations applied on the rds cluster +modify + +# tag rds cluster test +tag + +[all:vars] +ansible_connection=local +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/main.yml new file mode 100644 index 000000000..2674f4268 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/main.yml @@ -0,0 +1,10 @@ +# Beware: most of our tests here are run in parallel. +# To add new tests you'll need to add a new host to the inventory and a matching +# '{{ inventory_hostname }}'.yml file in roles/rds_cluster/tasks/ + +- hosts: all + gather_facts: no + strategy: free + serial: 6 + roles: + - rds_cluster diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/defaults/main.yml new file mode 100644 index 000000000..f1217a95e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/defaults/main.yml @@ -0,0 +1,36 @@ +# defaults file for rds_cluster + +# Create cluster +cluster_id: ansible-test-{{ inventory_hostname | replace('_','-') }}{{ tiny_prefix}} +username: testrdsusername +password: test-rds_password +engine: aurora +port: 3306 +tags_create: + Name: ansible-test-cluster-{{ tiny_prefix }} + Created_By: Ansible_rds_cluster_integration_test + +# Modify cluster +new_cluster_id: ansible-test-cluster-{{ tiny_prefix }}-new +new_port: 1155 +new_password: test-rds_password-new +new_db_parameter_group_name: ansible-test-db-parameter-group-{{ tiny_prefix }}-new + +# Tag cluster +tags_patch: + Name: '{{ tiny_prefix }}-new' + Created_by: Ansible rds_cluster integration tests + +# Create cluster in a VPC +vpc_name: ansible-test-vpc-{{ tiny_prefix }} +vpc_cidr: 10.{{ 256 | random(seed=tiny_prefix) }}.0.0/16 +subnets: +- {cidr: '10.{{ 256 | random(seed=tiny_prefix) }}.1.0/24', zone: '{{ aws_region }}a'} +- {cidr: '10.{{ 256 | random(seed=tiny_prefix) }}.2.0/24', zone: '{{ aws_region }}b'} +- {cidr: '10.{{ 256 | random(seed=tiny_prefix) }}.3.0/24', zone: '{{ aws_region }}c'} +- {cidr: '10.{{ 256 | random(seed=tiny_prefix) }}.4.0/24', zone: '{{ aws_region }}d'} + +security_groups: +- '{{ tiny_prefix }}-sg-1' +- '{{ tiny_prefix }}-sg-2' +- '{{ tiny_prefix }}-sg-3' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/meta/main.yml new file mode 100644 index 000000000..73b314ff7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/meta/main.yml @@ -0,0 +1 @@ +--- \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/main.yml new file mode 100644 index 000000000..55f8a551e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/main.yml @@ -0,0 +1,10 @@ +- name: rds_cluster integration tests + module_defaults: + group/aws: + region: '{{ aws_region }}' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + + block: + - include: ./test_{{ inventory_hostname }}.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create.yml new file mode 100644 index 000000000..54b3143ff --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create.yml @@ -0,0 +1,123 @@ +- block: + - name: Ensure the resource doesn't exist + rds_cluster: + id: '{{ cluster_id }}' + state: absent + engine: '{{ engine}}' + username: '{{ username }}' + password: '{{ password }}' + skip_final_snapshot: true + register: _result_delete_db_cluster + + - assert: + that: + - not _result_delete_db_cluster.changed + ignore_errors: yes + + - name: Get info of all existing clusters + rds_cluster_info: + register: _result_cluster_info + + - assert: + that: + - _result_cluster_info is successful + + - name: Create minimal aurora cluster in default VPC and default subnet group (CHECK + MODE) + rds_cluster: + engine: '{{ engine }}' + username: '{{ username }}' + password: '{{ password }}' + cluster_id: '{{ cluster_id }}' + tags: '{{ tags_create }}' + register: _result_create_db_cluster + check_mode: true + + - assert: + that: + - _result_create_db_cluster.changed + + - name: Create minimal aurora cluster in default VPC and default subnet group + rds_cluster: + engine: '{{ engine }}' + username: '{{ username }}' + password: '{{ password }}' + cluster_id: '{{ cluster_id }}' + tags: '{{ tags_create }}' + register: _result_create_db_cluster + + - assert: + that: + - _result_create_db_cluster.changed + - "'allocated_storage' in _result_create_db_cluster" + - _result_create_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_db_cluster" + - _result_create_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_db_cluster" + - "'db_cluster_identifier' in _result_create_db_cluster" + - _result_create_db_cluster.db_cluster_identifier == "{{ cluster_id }}" + - "'db_cluster_parameter_group' in _result_create_db_cluster" + - "'db_cluster_resource_id' in _result_create_db_cluster" + - "'endpoint' in _result_create_db_cluster" + - "'engine' in _result_create_db_cluster" + - _result_create_db_cluster.engine == "{{ engine }}" + - "'engine_mode' in _result_create_db_cluster" + - _result_create_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_create_db_cluster" + - "'master_username' in _result_create_db_cluster" + - _result_create_db_cluster.master_username == "{{ username }}" + - "'port' in _result_create_db_cluster" + - _result_create_db_cluster.port == {{ port }} + - "'status' in _result_create_db_cluster" + - _result_create_db_cluster.status == 'available' + - _result_create_db_cluster.storage_encrypted == true + - "'tags' in _result_create_db_cluster" + - _result_create_db_cluster.tags | length == 2 + - _result_create_db_cluster.tags["Created_By"] == "{{ tags_create["Created_By"]}}" + - _result_create_db_cluster.tags["Name"] == "{{ tags_create["Name"]}}" + - "'vpc_security_groups' in _result_create_db_cluster" + - name: Get info of the existing cluster + rds_cluster_info: + cluster_id: '{{ cluster_id }}' + register: result_cluster_info + + - assert: + that: + - result_cluster_info is successful + + - name: Create minimal aurora cluster in default VPC and default subnet group - + idempotence (CHECK MODE) + rds_cluster: + engine: '{{ engine }}' + username: '{{ username }}' + password: '{{ password }}' + cluster_id: '{{ cluster_id }}' + tags: '{{ tags_create }}' + register: _result_create_db_cluster + check_mode: true + + - assert: + that: + - not _result_create_db_cluster.changed + + - name: Create minimal aurora cluster in default VPC and default subnet group - + idempotence + rds_cluster: + engine: '{{ engine }}' + username: '{{ username }}' + password: '{{ password }}' + cluster_id: '{{ cluster_id }}' + tags: '{{ tags_create }}' + register: _result_create_db_cluster + + - assert: + that: + - not _result_create_db_cluster.changed + + always: + - name: Delete DB cluster without creating a final snapshot + rds_cluster: + state: absent + cluster_id: '{{ cluster_id }}' + skip_final_snapshot: true + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create_sgs.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create_sgs.yml new file mode 100644 index 000000000..99362ee07 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create_sgs.yml @@ -0,0 +1,208 @@ +- block: + - name: Ensure the resource doesn't exist + rds_cluster: + id: '{{ cluster_id }}' + state: absent + engine: '{{ engine}}' + username: '{{ username }}' + password: '{{ password }}' + skip_final_snapshot: true + register: _result_delete_db_cluster + + - assert: + that: + - not _result_delete_db_cluster.changed + ignore_errors: yes + + - name: Create a VPC + ec2_vpc_net: + name: '{{ vpc_name }}' + state: present + cidr_block: '{{ vpc_cidr }}' + tags: + Name: '{{ vpc_name }}' + Description: Created by rds_cluster integration tests + register: _result_create_vpc + + - name: Create subnets + ec2_vpc_subnet: + cidr: '{{ item.cidr }}' + az: '{{ item.zone }}' + vpc_id: '{{ _result_create_vpc.vpc.id }}' + tags: + Name: '{{ resource_prefix }}-subnet' + Description: created by rds_cluster integration tests + state: present + register: _result_create_subnet + loop: '{{ subnets }}' + + - name: Create security groups + ec2_group: + name: '{{ item }}' + description: Created by rds_cluster integration tests + state: present + register: _result_create_sg + loop: '{{ security_groups }}' + + - name: Create an RDS cluster in the VPC with two security groups + rds_cluster: + id: '{{ cluster_id }}' + engine: '{{ engine }}' + username: '{{ username }}' + password: '{{ password }}' + vpc_security_group_ids: + - '{{ _result_create_sg.results.0.group_id }}' + - '{{ _result_create_sg.results.1.group_id }}' + register: _result_create_db_cluster + + - assert: + that: + - _result_create_db_cluster.changed + - "'allocated_storage' in _result_create_db_cluster" + - _result_create_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_db_cluster" + - _result_create_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_db_cluster" + - "'db_cluster_identifier' in _result_create_db_cluster" + - _result_create_db_cluster.db_cluster_identifier == "{{ cluster_id }}" + - "'db_cluster_parameter_group' in _result_create_db_cluster" + - "'db_cluster_resource_id' in _result_create_db_cluster" + - "'endpoint' in _result_create_db_cluster" + - "'engine' in _result_create_db_cluster" + - _result_create_db_cluster.engine == "{{ engine }}" + - "'engine_mode' in _result_create_db_cluster" + - _result_create_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_create_db_cluster" + - "'master_username' in _result_create_db_cluster" + - _result_create_db_cluster.master_username == "{{ username }}" + - "'port' in _result_create_db_cluster" + - _result_create_db_cluster.port == {{ port }} + - "'status' in _result_create_db_cluster" + - _result_create_db_cluster.status == 'available' + - _result_create_db_cluster.storage_encrypted == true + - "'tags' in _result_create_db_cluster" + - "'vpc_security_groups' in _result_create_db_cluster" + - _result_create_db_cluster.vpc_security_groups | selectattr('status', 'in', + ['active', 'adding']) | list | length == 2 + + - name: Add a new security group without purge (check_mode) + rds_cluster: + id: '{{ cluster_id }}' + state: present + vpc_security_group_ids: + - '{{ _result_create_sg.results.2.group_id }}' + apply_immediately: true + purge_security_groups: false + check_mode: true + register: _result_create_db_cluster + + - assert: + that: + - _result_create_db_cluster.changed + + - name: Add a new security group without purge + rds_cluster: + id: '{{ cluster_id }}' + state: present + vpc_security_group_ids: + - '{{ _result_create_sg.results.2.group_id }}' + apply_immediately: true + purge_security_groups: false + register: _result_create_db_cluster + + - assert: + that: + - _result_create_db_cluster.changed + - "'allocated_storage' in _result_create_db_cluster" + - _result_create_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_db_cluster" + - _result_create_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_db_cluster" + - "'db_cluster_identifier' in _result_create_db_cluster" + - _result_create_db_cluster.db_cluster_identifier == "{{ cluster_id }}" + - "'db_cluster_parameter_group' in _result_create_db_cluster" + - "'db_cluster_resource_id' in _result_create_db_cluster" + - "'endpoint' in _result_create_db_cluster" + - "'engine' in _result_create_db_cluster" + - _result_create_db_cluster.engine == "{{ engine }}" + - "'engine_mode' in _result_create_db_cluster" + - _result_create_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_create_db_cluster" + - "'master_username' in _result_create_db_cluster" + - _result_create_db_cluster.master_username == "{{ username }}" + - "'port' in _result_create_db_cluster" + - _result_create_db_cluster.port == {{ port }} + - "'status' in _result_create_db_cluster" + - _result_create_db_cluster.status == 'available' + - _result_create_db_cluster.storage_encrypted == true + - "'tags' in _result_create_db_cluster" + - "'vpc_security_groups' in _result_create_db_cluster" + - _result_create_db_cluster.vpc_security_groups | selectattr('status', 'in', + ['active', 'adding']) | list | length == 3 + + - name: Add a new security group without purge (test idempotence) + rds_cluster: + id: '{{ cluster_id }}' + state: present + vpc_security_group_ids: + - '{{ _result_create_sg.results.2.group_id }}' + apply_immediately: true + purge_security_groups: false + register: _result_create_db_cluster + + - assert: + that: + - not _result_create_db_cluster.changed + + - name: Add a security group with purge + rds_cluster: + id: '{{ cluster_id }}' + state: present + vpc_security_group_ids: + - '{{ _result_create_sg .results.2.group_id }}' + apply_immediately: true + register: _result_create_db_cluster + + - assert: + that: + - _result_create_db_cluster.changed + - _result_create_db_cluster.db_cluster_identifier == '{{ cluster_id }}' + - _result_create_db_cluster.vpc_security_groups | selectattr('status', 'in', + ['active', 'adding']) | list | length == 1 + + always: + - name: Delete DB cluster without creating a final snapshot + rds_cluster: + state: absent + cluster_id: '{{ cluster_id }}' + skip_final_snapshot: true + ignore_errors: true + + - name: Remove security groups + ec2_group: + name: '{{ item }}' + description: created by rds_cluster integration tests + state: absent + loop: '{{ security_groups }}' + + - name: Remove subnets + ec2_vpc_subnet: + cidr: '{{ item.cidr }}' + az: '{{ item.zone }}' + vpc_id: '{{ _result_create_vpc.vpc.id }}' + tags: + Name: '{{ resource_prefix }}-subnet' + Description: Created by rds_cluster integration tests + state: absent + ignore_errors: yes + loop: '{{ subnets }}' + + - name: Delete VPC + ec2_vpc_net: + name: '{{ vpc_name }}' + state: absent + cidr_block: '{{ vpc_cidr }}' + tags: + Name: '{{ vpc_name }}' + Description: Created by rds_cluster integration tests + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_modify.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_modify.yml new file mode 100644 index 000000000..f72357ddc --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_modify.yml @@ -0,0 +1,270 @@ +- block: + - name: Ensure the resource doesn't exist + rds_cluster: + id: '{{ cluster_id }}' + state: absent + engine: '{{ engine }}' + username: '{{ username }}' + password: '{{ password }}' + skip_final_snapshot: true + register: _result_delete_db_cluster + + - assert: + that: + - not _result_delete_db_cluster.changed + ignore_errors: yes + + # Follow up to Aurora Serverless V2 release, we use an aurora-mysql to + # avoid the following error when we try to adjust the port: + # You currently can't modify EndpointPort with Aurora Serverless. + - name: Create an Aurora-MySQL DB cluster + rds_cluster: + id: '{{ cluster_id }}' + state: present + engine: aurora-mysql + engine_mode: provisioned + username: '{{ username }}' + password: '{{ password }}' + register: _result_create_source_db_cluster + + - assert: + that: + - _result_create_source_db_cluster.changed + - _result_create_source_db_cluster.changed + - "'allocated_storage' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.db_cluster_identifier == '{{ cluster_id }}' + - "'db_cluster_parameter_group' in _result_create_source_db_cluster" + - "'db_cluster_resource_id' in _result_create_source_db_cluster" + - "'endpoint' in _result_create_source_db_cluster" + - "'engine' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine == "aurora-mysql" + - "'engine_mode' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_create_source_db_cluster" + - "'master_username' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.master_username == "{{ username }}" + - "'port' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.port == {{ port }} + - "'status' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.status == "available" + - "'tags' in _result_create_source_db_cluster" + - "'vpc_security_groups' in _result_create_source_db_cluster" + + - name: Modify DB cluster password + rds_cluster: + id: '{{ cluster_id }}' + state: present + password: '{{ new_password }}' + force_update_password: true + apply_immediately: true + register: _result_modify_password + + - assert: + that: + - _result_modify_password.changed + - "'allocated_storage' in _result_modify_password" + - _result_modify_password.allocated_storage == 1 + - "'cluster_create_time' in _result_modify_password" + - _result_modify_password.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_modify_password" + - _result_modify_password.db_cluster_identifier == '{{ cluster_id }}' + - "'db_cluster_parameter_group' in _result_modify_password" + - "'db_cluster_resource_id' in _result_modify_password" + - "'endpoint' in _result_modify_password" + - "'engine' in _result_modify_password" + - _result_modify_password.engine == "aurora-mysql" + - "'engine_mode' in _result_modify_password" + - _result_modify_password.engine_mode == "provisioned" + - "'engine_version' in _result_modify_password" + - "'master_username' in _result_modify_password" + - _result_modify_password.master_username == "{{ username }}" + - "'port' in _result_create_source_db_cluster" + - _result_modify_password.port == {{ port }} + - "'status' in _result_modify_password" + - _result_modify_password.status == "available" + - "'tags' in _result_modify_password" + - "'vpc_security_groups' in _result_modify_password" + + - name: Modify DB cluster port + rds_cluster: + id: '{{ cluster_id }}' + state: present + port: '{{ new_port }}' + register: _result_modify_port + + - assert: + that: + - _result_modify_port.changed + - "'allocated_storage' in _result_modify_port" + - _result_modify_port.allocated_storage == 1 + - "'cluster_create_time' in _result_modify_port" + - _result_modify_port.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_modify_port" + - _result_modify_port.db_cluster_identifier == '{{ cluster_id }}' + - "'db_cluster_parameter_group' in _result_modify_port" + - "'db_cluster_resource_id' in _result_modify_port" + - "'endpoint' in _result_modify_port" + - "'engine' in _result_modify_port" + - _result_modify_port.engine == "aurora-mysql" + - "'engine_mode' in _result_modify_port" + - _result_modify_port.engine_mode == "provisioned" + - "'engine_version' in _result_modify_port" + - "'master_username' in _result_modify_port" + - _result_modify_port.master_username == "{{ username }}" + - "'port' in _result_modify_port" + - _result_modify_port.port == {{ new_port }} + - "'status' in _result_modify_port" + - _result_modify_port.status == "available" + - "'tags' in _result_modify_port" + - "'vpc_security_groups' in _result_modify_port" + + - name: Modify DB cluster identifier + rds_cluster: + id: '{{ cluster_id }}' + state: present + purge_tags: false + new_cluster_id: '{{ new_cluster_id }}' + apply_immediately: true + register: _result_modify_id + + - assert: + that: + - _result_modify_id.changed + - "'allocated_storage' in _result_modify_id" + - _result_modify_id.allocated_storage == 1 + - "'cluster_create_time' in _result_modify_id" + - _result_modify_id.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_modify_id" + - _result_modify_id.db_cluster_identifier == '{{ new_cluster_id }}' + - "'db_cluster_parameter_group' in _result_modify_id" + - "'db_cluster_resource_id' in _result_modify_id" + - "'endpoint' in _result_modify_id" + - "'engine' in _result_modify_id" + - _result_modify_id.engine == "aurora-mysql" + - "'engine_mode' in _result_modify_id" + - _result_modify_id.engine_mode == "provisioned" + - "'engine_version' in _result_modify_id" + - "'master_username' in _result_modify_id" + - _result_modify_id.master_username == "{{ username }}" + - "'port' in _result_modify_id" + - _result_modify_id.port == {{ new_port }} + - "'status' in _result_modify_id" + - _result_modify_id.status == "available" + - "'tags' in _result_modify_id" + - "'vpc_security_groups' in _result_modify_id" + + - name: Check if DB cluster parameter group exists + command: aws rds describe-db-cluster-parameter-groups --db-cluster-parameter-group-name + {{ new_db_parameter_group_name }} + environment: + AWS_ACCESS_KEY_ID: '{{ aws_access_key }}' + AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}' + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: '{{ aws_region }}' + register: _result_check_db_parameter_group + ignore_errors: true + changed_when: _result_check_db_parameter_group.rc == 0 + + - name: Create DB cluster parameter group if not exists + command: aws rds create-db-cluster-parameter-group --db-cluster-parameter-group-name + {{ new_db_parameter_group_name }} --db-parameter-group-family aurora-mysql5.7 --description + "Test DB cluster parameter group" + environment: + AWS_ACCESS_KEY_ID: '{{ aws_access_key }}' + AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}' + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: '{{ aws_region }}' + register: _result_create_db_parameter_group + when: _result_check_db_parameter_group.rc != 0 + + - name: Modify DB cluster parameter group + rds_cluster: + id: '{{ new_cluster_id }}' + state: present + db_cluster_parameter_group_name: '{{ new_db_parameter_group_name }}' + apply_immediately: true + register: _result_modify_db_parameter_group_name + + - assert: + that: + - _result_modify_db_parameter_group_name.changed + - "'allocated_storage' in _result_modify_db_parameter_group_name" + - _result_modify_db_parameter_group_name.allocated_storage == 1 + - "'cluster_create_time' in _result_modify_db_parameter_group_name" + - _result_modify_db_parameter_group_name.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_modify_db_parameter_group_name" + - _result_modify_db_parameter_group_name.db_cluster_identifier == '{{ new_cluster_id + }}' + - "'db_cluster_parameter_group' in _result_modify_db_parameter_group_name" + - "'db_cluster_resource_id' in _result_modify_db_parameter_group_name" + - "'endpoint' in _result_modify_db_parameter_group_name" + - "'engine' in _result_modify_db_parameter_group_name" + - _result_modify_db_parameter_group_name.engine == "aurora-mysql" + - "'engine_mode' in _result_modify_db_parameter_group_name" + - _result_modify_db_parameter_group_name.engine_mode == "provisioned" + - "'engine_version' in _result_modify_db_parameter_group_name" + - "'master_username' in _result_modify_db_parameter_group_name" + - _result_modify_db_parameter_group_name.master_username == "{{ username }}" + - "'port' in _result_modify_db_parameter_group_name" + - _result_modify_db_parameter_group_name.db_cluster_parameter_group == "{{ new_db_parameter_group_name + }}" + - "'status' in _result_modify_db_parameter_group_name" + - _result_modify_db_parameter_group_name.status == "available" + - "'tags' in _result_modify_db_parameter_group_name" + - "'vpc_security_groups' in _result_modify_db_parameter_group_name" + + - name: Delete DB cluster without creating a final snapshot (CHECK MODE) + rds_cluster: + state: absent + cluster_id: '{{ new_cluster_id }}' + skip_final_snapshot: true + register: _result_delete_cluster + check_mode: true + + - assert: + that: + - _result_delete_cluster.changed + + - name: Delete DB cluster without creating a final snapshot + rds_cluster: + state: absent + cluster_id: '{{ new_cluster_id }}' + skip_final_snapshot: true + register: _result_delete_cluster + + - assert: + that: + - _result_delete_cluster.changed + + - name: Delete DB cluster without creating a final snapshot (idempotence) + rds_cluster: + state: absent + cluster_id: '{{ new_cluster_id }}' + skip_final_snapshot: true + register: _result_delete_cluster + + - assert: + that: + - not _result_delete_cluster.changed + + always: + - name: Delete DB cluster without creating a final snapshot + rds_cluster: + state: absent + cluster_id: '{{ cluster_id }}' + skip_final_snapshot: true + ignore_errors: true + + - name: Delete cluster parameter group + command: aws rds delete-db-cluster-parameter-group --db-cluster-parameter-group-name + {{ new_db_parameter_group_name }} + environment: + AWS_ACCESS_KEY_ID: '{{ aws_access_key }}' + AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}' + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: '{{ aws_region }}' + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_promote.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_promote.yml new file mode 100644 index 000000000..8443063ad --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_promote.yml @@ -0,0 +1,187 @@ +- block: + - name: Ensure the resource doesn't exist + rds_cluster: + id: '{{ cluster_id }}' + state: absent + engine: '{{ engine}}' + username: '{{ username }}' + password: '{{ password }}' + skip_final_snapshot: true + register: _result_delete_db_cluster + + - assert: + that: + - not _result_delete_db_cluster.changed + ignore_errors: yes + + - name: Set the two regions for the source DB and the read replica + set_fact: + region_src: '{{ aws_region }}' + region_dest: '{{ aws_region }}' + + - name: Create a source DB cluster + rds_cluster: + cluster_id: '{{ cluster_id }}' + state: present + engine: '{{ engine}}' + username: '{{ username }}' + password: '{{ password }}' + region: '{{ region_src }}' + tags: + Name: '{{ cluster_id }}' + Created_by: Ansible rds_cluster tests + register: _result_create_src_db_cluster + + - assert: + that: + - _result_create_src_db_cluster.changed + - "'allocated_storage' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.db_cluster_identifier == '{{ cluster_id }}' + - "'db_cluster_parameter_group' in _result_create_src_db_cluster" + - "'db_cluster_resource_id' in _result_create_src_db_cluster" + - "'endpoint' in _result_create_src_db_cluster" + - "'engine' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.engine == "{{ engine }}" + - "'engine_mode' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_create_src_db_cluster" + - "'master_username' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.master_username == "{{ username }}" + - "'port' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.port == {{ port }} + - "'status' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.status == "available" + - "'tags' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.tags | length == 2 + - _result_create_src_db_cluster.tags.Name == '{{ cluster_id }}' + - _result_create_src_db_cluster.tags.Created_by == 'Ansible rds_cluster tests' + - "'vpc_security_groups' in _result_create_src_db_cluster" + + - name: Get info on DB cluster + rds_cluster_info: + db_cluster_identifier: '{{ cluster_id }}' + register: _result_cluster_info + + - assert: + that: + - _result_cluster_info is successful + + - name: Set the ARN of the source DB cluster + set_fact: + src_db_cluster_arn: '{{ _result_cluster_info.clusters[0].db_cluster_arn}}' + + - name: Create a DB cluster read replica in a different region + rds_cluster: + id: '{{ cluster_id }}-replica' + state: present + replication_source_identifier: '{{ src_db_cluster_arn }}' + engine: '{{ engine}}' + region: '{{ region_dest }}' + tags: + Name: '{{ cluster_id }}' + Created_by: Ansible rds_cluster tests + wait: yes + register: _result_create_replica_db_cluster + + - assert: + that: + - _result_create_replica_db_cluster.changed + - "'allocated_storage' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.db_cluster_identifier == '{{ cluster_id + }}' + - "'db_cluster_parameter_group' in _result_create_replica_db_cluster" + - "'db_cluster_resource_id' in _result_create_replica_db_cluster" + - "'endpoint' in _result_create_replica_db_cluster" + - "'engine' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.engine == "{{ engine }}" + - "'engine_mode' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_create_replica_db_cluster" + - "'master_username' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.master_username == "{{ username }}" + - "'port' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.port == {{ port }} + - "'status' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.status == "available" + - "'tags' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.tags | length == 2 + - _result_create_replica_db_cluster.tags.Name == '{{ cluster_id }}' + - _result_create_replica_db_cluster.tags.Created_by == 'Ansible rds_cluster + tests' + - "'vpc_security_groups' in _result_create_replica_db_cluster" + + - name: Test idempotence with a DB cluster read replica + rds_cluster: + id: '{{ cluster_id }}-replica' + state: present + replication_source_identifier: '{{ src_db_cluster_arn }}' + engine: '{{ engine}}' + region: '{{ region_dest }}' + tags: + Name: '{{ cluster_id }}' + Created_by: Ansible rds_cluster tests + register: _result_create_replica_db_cluster + + - assert: + that: + - not _result_create_replica_db_cluster.changed + + - name: Get info of existing DB cluster + rds_cluster_info: + db_cluster_identifier: '{{ cluster_id }}-replica' + region: '{{ region_dest }}' + register: _result_cluster_info + + - assert: + that: + - _result_cluster_info is successful + # - _result_cluster_info.clusters | length == 0 + + - name: Promote the DB cluster read replica + rds_cluster: + cluster_id: '{{ cluster_id }}-replica' + state: present + promote: true + region: '{{ region_dest }}' + register: _result_promote_replica_db_cluster + + - assert: + that: + - _result_promote_replica_db_cluster.changed + + - name: Promote the DB cluster read replica (idempotence) + rds_cluster: + cluster_id: '{{ cluster_id }}-replica' + state: present + promote: true + region: '{{ region_dest }}' + register: _result_promote_replica_db_cluster + + - assert: + that: + - not _result_promote_replica_db_cluster.changed + + always: + - name: Remove the DB cluster + rds_cluster: + id: '{{ cluster_id }}' + state: absent + skip_final_snapshot: true + region: '{{ region_src }}' + ignore_errors: yes + + - name: Remove the DB cluster read replica + rds_cluster: + id: '{{ cluster_id }}-replica' + state: absent + skip_final_snapshot: true + region: '{{ region_dest }}' + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_restore.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_restore.yml new file mode 100644 index 000000000..b991a457b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_restore.yml @@ -0,0 +1,185 @@ +- block: + - name: Ensure the resource doesn't exist + rds_cluster: + id: '{{ cluster_id }}' + state: absent + engine: '{{ engine}}' + username: '{{ username }}' + password: '{{ password }}' + skip_final_snapshot: true + register: _result_delete_db_cluster + + - assert: + that: + - not _result_delete_db_cluster.changed + ignore_errors: true + + - name: Create a source DB cluster + rds_cluster: + id: '{{ cluster_id }}' + state: present + engine: '{{ engine}}' + backup_retention_period: 1 + username: '{{ username }}' + password: '{{ password }}' + wait: true + register: _result_create_source_db_cluster + + - assert: + that: + - _result_create_source_db_cluster.changed + - "'allocated_storage' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_source_db_cluster" + - "'db_cluster_identifier' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.db_cluster_identifier == "{{ cluster_id }}" + - "'db_cluster_parameter_group' in _result_create_source_db_cluster" + - "'db_cluster_resource_id' in _result_create_source_db_cluster" + - "'endpoint' in _result_create_source_db_cluster" + - "'engine' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine == "{{ engine }}" + - "'engine_mode' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_create_source_db_cluster" + - "'master_username' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.master_username == "{{ username }}" + - "'port' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.port == {{ port }} + - "'status' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.status == 'available' + - _result_create_source_db_cluster.storage_encrypted == true + - "'tags' in _result_create_source_db_cluster" + - "'vpc_security_groups' in _result_create_source_db_cluster" + + - name: Create a point in time DB cluster + rds_cluster: + state: present + id: '{{ cluster_id }}-point-in-time' + source_db_cluster_identifier: '{{ cluster_id }}' + creation_source: cluster + engine: '{{ engine}}' + username: '{{ username }}' + password: '{{ password }}' + use_latest_restorable_time: true + tags: + Name: '{{ cluster_id }}' + Created_by: Ansible rds_cluster tests + register: _result_restored_db_cluster + + - assert: + that: + - _result_restored_db_cluster.changed + - "'allocated_storage' in _result_restored_db_cluster" + - _result_restored_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_restored_db_cluster" + - _result_restored_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_restored_db_cluster" + - _result_restored_db_cluster.db_cluster_identifier == '{{ cluster_id }}-point-in-time' + - "'db_cluster_parameter_group' in _result_restored_db_cluster" + - "'db_cluster_resource_id' in _result_restored_db_cluster" + - "'endpoint' in _result_restored_db_cluster" + - "'engine' in _result_restored_db_cluster" + - _result_restored_db_cluster.engine == "{{ engine }}" + - "'engine_mode' in _result_restored_db_cluster" + - _result_restored_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_restored_db_cluster" + - "'master_username' in _result_restored_db_cluster" + - _result_restored_db_cluster.master_username == "{{ username }}" + - "'port' in _result_restored_db_cluster" + - _result_restored_db_cluster.port == {{ port }} + - "'status' in _result_restored_db_cluster" + - _result_restored_db_cluster.status == "available" + - "'tags' in _result_restored_db_cluster" + - _result_restored_db_cluster.tags | length == 2 + - _result_restored_db_cluster.tags.Name == '{{ cluster_id }}' + - _result_restored_db_cluster.tags.Created_by == 'Ansible rds_cluster tests' + - "'vpc_security_groups' in _result_restored_db_cluster" + + - name: Create a point in time DB cluster (idempotence) + rds_cluster: + state: present + id: '{{ cluster_id }}-point-in-time' + source_db_cluster_identifier: '{{ cluster_id }}' + creation_source: cluster + engine: '{{ engine}}' + username: '{{ username }}' + password: '{{ password }}' + restore_to_time: '{{ _result_restored_db_cluster.latest_restorable_time }}' + tags: + Name: '{{ cluster_id }}' + Created_by: Ansible rds_cluster tests + register: _result_restored_db_cluster + + - assert: + that: + - not _result_restored_db_cluster.changed + + - name: Take a snapshot of the DB cluster + rds_cluster_snapshot: + state: present + db_cluster_identifier: '{{ cluster_id }}' + db_cluster_snapshot_identifier: '{{ cluster_id }}-snapshot' + wait: true + register: _result_cluster_snapshot + + - assert: + that: + - _result_cluster_snapshot.changed + + - name: Restore DB cluster from source (snapshot) + rds_cluster: + creation_source: snapshot + engine: '{{ engine }}' + cluster_id: '{{ cluster_id }}-restored-snapshot' + snapshot_identifier: '{{ cluster_id }}-snapshot' + wait: true + register: _result_restored_db_cluster + + - assert: + that: + - _result_restored_db_cluster.changed + - "'allocated_storage' in _result_restored_db_cluster" + - _result_restored_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_restored_db_cluster" + - _result_restored_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_restored_db_cluster" + - _result_restored_db_cluster.db_cluster_identifier == '{{ cluster_id }}-restored-snapshot' + - "'db_cluster_parameter_group' in _result_restored_db_cluster" + - "'db_cluster_resource_id' in _result_restored_db_cluster" + - "'endpoint' in _result_restored_db_cluster" + - "'engine' in _result_restored_db_cluster" + - _result_restored_db_cluster.engine == "{{ engine }}" + - "'engine_mode' in _result_restored_db_cluster" + - _result_restored_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_restored_db_cluster" + - "'master_username' in _result_restored_db_cluster" + - _result_restored_db_cluster.master_username == "{{ username }}" + - "'port' in _result_restored_db_cluster" + - _result_restored_db_cluster.port == {{ port }} + - "'status' in _result_restored_db_cluster" + - _result_restored_db_cluster.status == "available" + - "'tags' in _result_restored_db_cluster" + - "'vpc_security_groups' in _result_restored_db_cluster" + + # TODO: export a snapshot to an S3 bucket and restore cluster from it + # Requires rds_export_task module + always: + - name: Delete the snapshot + rds_cluster_snapshot: + db_cluster_snapshot_identifier: '{{ cluster_id }}-snapshot' + state: absent + register: _result_delete_snapshot + ignore_errors: true + + - name: Delete DB cluster without creating a final snapshot + rds_cluster: + state: absent + cluster_id: '{{ item }}' + skip_final_snapshot: true + ignore_errors: true + loop: + - '{{ cluster_id }}' + - '{{ cluster_id }}-point-in-time' + - '{{ cluster_id }}-restored-snapshot' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_tag.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_tag.yml new file mode 100644 index 000000000..be0fa3ee3 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_tag.yml @@ -0,0 +1,290 @@ +- block: + - name: Ensure the resource doesn't exist + rds_cluster: + id: '{{ cluster_id }}' + state: absent + engine: '{{ engine}}' + username: '{{ username }}' + password: '{{ password }}' + skip_final_snapshot: true + register: _result_delete_db_cluster + + - assert: + that: + - not _result_delete_db_cluster.changed + ignore_errors: yes + + - name: Create a DB cluster + rds_cluster: + engine: '{{ engine }}' + username: '{{ username }}' + password: '{{ password }}' + cluster_id: '{{ cluster_id }}' + tags: '{{ tags_create }}' + register: _result_create_db_cluster + + - assert: + that: + - _result_create_db_cluster.changed + - "'allocated_storage' in _result_create_db_cluster" + - _result_create_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_db_cluster" + - _result_create_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_db_cluster" + - "'db_cluster_identifier' in _result_create_db_cluster" + - _result_create_db_cluster.db_cluster_identifier == "{{ cluster_id }}" + - "'db_cluster_parameter_group' in _result_create_db_cluster" + - "'db_cluster_resource_id' in _result_create_db_cluster" + - "'endpoint' in _result_create_db_cluster" + - "'engine' in _result_create_db_cluster" + - _result_create_db_cluster.engine == "{{ engine }}" + - "'engine_mode' in _result_create_db_cluster" + - _result_create_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_create_db_cluster" + - "'master_username' in _result_create_db_cluster" + - _result_create_db_cluster.master_username == "{{ username }}" + - "'port' in _result_create_db_cluster" + - _result_create_db_cluster.port == {{ port }} + - "'status' in _result_create_db_cluster" + - _result_create_db_cluster.status == 'available' + - _result_create_db_cluster.storage_encrypted == true + - "'tags' in _result_create_db_cluster" + - _result_create_db_cluster.tags | length == 2 + - _result_create_db_cluster.tags["Created_By"] == "{{ tags_create["Created_By"] + }}" + - _result_create_db_cluster.tags["Name"] == "{{ tags_create["Name"]}}" + - "'vpc_security_groups' in _result_create_db_cluster" + + - name: Test tags are not purged if purge_tags is False + rds_cluster: + engine: '{{ engine }}' + username: '{{ username }}' + password: '{{ new_password }}' + cluster_id: '{{ cluster_id }}' + tags: {} + purge_tags: false + register: _result_tag_db_cluster + + - assert: + that: + - not _result_tag_db_cluster.changed + - "'allocated_storage' in _result_tag_db_cluster" + - _result_tag_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_tag_db_cluster" + - _result_tag_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_tag_db_cluster" + - "'db_cluster_identifier' in _result_tag_db_cluster" + - _result_tag_db_cluster.db_cluster_identifier == "{{ cluster_id }}" + - "'db_cluster_parameter_group' in _result_tag_db_cluster" + - "'db_cluster_resource_id' in _result_tag_db_cluster" + - "'endpoint' in _result_tag_db_cluster" + - "'engine' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine == "{{ engine }}" + - "'engine_mode' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_tag_db_cluster" + - "'master_username' in _result_tag_db_cluster" + - _result_tag_db_cluster.master_username == "{{ username }}" + - "'port' in _result_tag_db_cluster" + - _result_tag_db_cluster.port == {{ port }} + - "'status' in _result_tag_db_cluster" + - _result_tag_db_cluster.status == 'available' + - _result_tag_db_cluster.storage_encrypted == true + - "'tags' in _result_tag_db_cluster" + - _result_tag_db_cluster.tags | length == 2 + - _result_tag_db_cluster.tags["Created_By"] == "{{ tags_create["Created_By"] + }}" + - _result_tag_db_cluster.tags["Name"] == "{{ tags_create["Name"] }}" + - "'vpc_security_groups' in _result_tag_db_cluster" + + - name: Add a tag and remove a tag (purge_tags is True) + rds_cluster: + cluster_id: '{{ cluster_id }}' + state: present + tags: '{{ tags_patch }}' + register: _result_tag_db_cluster + + - assert: + that: + - _result_tag_db_cluster.changed + - "'allocated_storage' in _result_tag_db_cluster" + - _result_tag_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_tag_db_cluster" + - _result_tag_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_tag_db_cluster" + - "'db_cluster_identifier' in _result_tag_db_cluster" + - _result_tag_db_cluster.db_cluster_identifier == "{{ cluster_id }}" + - "'db_cluster_parameter_group' in _result_tag_db_cluster" + - "'db_cluster_resource_id' in _result_tag_db_cluster" + - "'endpoint' in _result_tag_db_cluster" + - "'engine' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine == "{{ engine }}" + - "'engine_mode' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_tag_db_cluster" + - "'master_username' in _result_tag_db_cluster" + - _result_tag_db_cluster.master_username == "{{ username }}" + - "'port' in _result_tag_db_cluster" + - _result_tag_db_cluster.port == {{ port }} + - "'status' in _result_tag_db_cluster" + - _result_tag_db_cluster.status == 'available' + - _result_tag_db_cluster.storage_encrypted == true + - "'tags' in _result_tag_db_cluster" + - _result_tag_db_cluster.tags | length == 2 + - _result_tag_db_cluster.tags["Name"] == "{{ tags_patch['Name'] }}" + - "'vpc_security_groups' in _result_tag_db_cluster" + + - name: Purge a tag from the cluster (CHECK MODE) + rds_cluster: + engine: '{{ engine }}' + username: '{{ username }}' + password: '{{ password }}' + cluster_id: '{{ cluster_id }}' + tags: + Created_By: Ansible_rds_cluster_integration_test + register: _result_tag_db_cluster + check_mode: true + + - assert: + that: + - _result_tag_db_cluster.changed + + - name: Purge a tag from the cluster + rds_cluster: + engine: '{{ engine }}' + username: '{{ username }}' + password: '{{ password }}' + cluster_id: '{{ cluster_id }}' + tags: + Created_By: Ansible_rds_cluster_integration_test + register: _result_tag_db_cluster + + - assert: + that: + - _result_tag_db_cluster.changed + - "'allocated_storage' in _result_tag_db_cluster" + - _result_tag_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_tag_db_cluster" + - _result_tag_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_tag_db_cluster" + - "'db_cluster_identifier' in _result_tag_db_cluster" + - _result_tag_db_cluster.db_cluster_identifier == "{{ cluster_id }}" + - "'db_cluster_parameter_group' in _result_tag_db_cluster" + - "'db_cluster_resource_id' in _result_tag_db_cluster" + - "'endpoint' in _result_tag_db_cluster" + - "'engine' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine == "{{ engine }}" + - "'engine_mode' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_tag_db_cluster" + - "'master_username' in _result_tag_db_cluster" + - _result_tag_db_cluster.master_username == "{{ username }}" + - "'port' in _result_tag_db_cluster" + - _result_tag_db_cluster.port == {{ port }} + - "'status' in _result_tag_db_cluster" + - _result_tag_db_cluster.status == 'available' + - _result_tag_db_cluster.storage_encrypted == true + - "'tags' in _result_tag_db_cluster" + - _result_tag_db_cluster.tags | length == 1 + - _result_tag_db_cluster.tags["Created_By"] == "Ansible_rds_cluster_integration_test" + - "'vpc_security_groups' in _result_tag_db_cluster" + + - name: Add a tag to the cluster (CHECK MODE) + rds_cluster: + engine: '{{ engine }}' + username: '{{ username }}' + password: '{{ password }}' + cluster_id: '{{ cluster_id }}' + tags: + Name: cluster-{{ resource_prefix }} + Created_By: Ansible_rds_cluster_integration_test + register: _result_tag_db_cluster + check_mode: true + + - assert: + that: + - _result_tag_db_cluster.changed + + - name: Add a tag to the cluster + rds_cluster: + engine: '{{ engine }}' + username: '{{ username }}' + password: '{{ password }}' + cluster_id: '{{ cluster_id }}' + tags: '{{ tags_create }}' + register: _result_tag_db_cluster + + - assert: + that: + - _result_tag_db_cluster.changed + - "'allocated_storage' in _result_tag_db_cluster" + - _result_tag_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_tag_db_cluster" + - _result_tag_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_tag_db_cluster" + - "'db_cluster_identifier' in _result_tag_db_cluster" + - _result_tag_db_cluster.db_cluster_identifier == "{{ cluster_id }}" + - "'db_cluster_parameter_group' in _result_tag_db_cluster" + - "'db_cluster_resource_id' in _result_tag_db_cluster" + - "'endpoint' in _result_tag_db_cluster" + - "'engine' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine == "{{ engine }}" + - "'engine_mode' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_tag_db_cluster" + - "'master_username' in _result_tag_db_cluster" + - _result_tag_db_cluster.master_username == "{{ username }}" + - "'port' in _result_tag_db_cluster" + - _result_tag_db_cluster.port == {{ port }} + - "'status' in _result_tag_db_cluster" + - _result_tag_db_cluster.status == 'available' + - _result_tag_db_cluster.storage_encrypted == true + - "'tags' in _result_tag_db_cluster" + - _result_tag_db_cluster.tags | length == 2 + - _result_tag_db_cluster.tags["Created_By"] == "{{ tags_create["Created_By"]}}" + - _result_tag_db_cluster.tags["Name"] == "{{ tags_create["Name"]}}" + - "'vpc_security_groups' in _result_tag_db_cluster" + - name: Remove all tags + rds_cluster: + engine: '{{ engine }}' + username: '{{ username }}' + password: '{{ password }}' + cluster_id: '{{ cluster_id }}' + tags: {} + register: _result_tag_db_cluster + + - assert: + that: + - _result_tag_db_cluster.changed + - "'allocated_storage' in _result_tag_db_cluster" + - _result_tag_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_tag_db_cluster" + - _result_tag_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_tag_db_cluster" + - "'db_cluster_identifier' in _result_tag_db_cluster" + - _result_tag_db_cluster.db_cluster_identifier == "{{ cluster_id }}" + - "'db_cluster_parameter_group' in _result_tag_db_cluster" + - "'db_cluster_resource_id' in _result_tag_db_cluster" + - "'endpoint' in _result_tag_db_cluster" + - "'engine' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine == "{{ engine }}" + - "'engine_mode' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_tag_db_cluster" + - "'master_username' in _result_tag_db_cluster" + - _result_tag_db_cluster.master_username == "{{ username }}" + - "'port' in _result_tag_db_cluster" + - _result_tag_db_cluster.port == {{ port }} + - "'status' in _result_tag_db_cluster" + - _result_tag_db_cluster.status == 'available' + - _result_tag_db_cluster.storage_encrypted == true + - "'tags' in _result_tag_db_cluster" + - _result_tag_db_cluster.tags | length == 0 + - "'vpc_security_groups' in _result_tag_db_cluster" + always: + - name: Delete DB cluster without creating a final snapshot + rds_cluster: + state: absent + cluster_id: '{{ cluster_id }}' + skip_final_snapshot: true + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/vars/main.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/vars/main.yml @@ -0,0 +1 @@ +--- diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/runme.sh new file mode 100755 index 000000000..21720b263 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/runme.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# +# Beware: most of our tests here are run in parallel. +# To add new tests you'll need to add a new host to the inventory and a matching +# '{{ inventory_hostname }}'.yml file in roles/rds_cluster/tasks/ + + +set -eux + +export ANSIBLE_ROLES_PATH=../ + +ansible-playbook main.yml -i inventory "$@" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/aliases new file mode 100644 index 000000000..dc15e8ab0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/aliases @@ -0,0 +1,7 @@ +cloud/aws + +# It takes >20min to spawn the mutlti az cluster +disabled + +rds_cluster +rds_cluster_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/defaults/main.yml new file mode 100644 index 000000000..f666a2d77 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/defaults/main.yml @@ -0,0 +1,7 @@ +# Create cluster +cluster_id: ansible-test-{{ tiny_prefix }} +username: testrdsusername +password: "{{ lookup('password', 'dev/null length=12 chars=ascii_letters,digits') }}" +tags_create: + Name: ansible-test-cluster-{{ tiny_prefix }} + Created_By: Ansible_rds_cluster_integration_test diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/meta/main.yml new file mode 100644 index 000000000..39e88928a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - role: setup_botocore_pip + vars: + botocore_version: "1.23.44" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/tasks/main.yml new file mode 100644 index 000000000..911eb60de --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/tasks/main.yml @@ -0,0 +1,79 @@ +--- +- module_defaults: + group/aws: + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + collections: + - amazon.aws + + block: + - name: Ensure the resource doesn't exist + rds_cluster: + id: '{{ cluster_id }}' + state: absent + engine: 'mysql' + username: '{{ username }}' + password: '{{ password }}' + skip_final_snapshot: true + register: _result_delete_db_cluster + + - assert: + that: + - not _result_delete_db_cluster.changed + ignore_errors: true + + - name: Create a source DB cluster (CHECK_MODE) + rds_cluster: + id: '{{ cluster_id }}' + state: present + engine: 'mysql' + engine_version: 8.0.28 + allocated_storage: 100 + iops: 5000 + db_cluster_instance_class: db.r6gd.xlarge + username: '{{ username }}' + password: '{{ password }}' + wait: true + tags: '{{ tags_create }}' + register: _result_create_source_db_cluster + check_mode: True + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - assert: + that: + - _result_create_source_db_cluster.changed + + - name: Create a source DB cluster + rds_cluster: + id: '{{ cluster_id }}' + state: present + engine: 'mysql' + engine_version: 8.0.28 + allocated_storage: 100 + iops: 5000 + db_cluster_instance_class: db.r6gd.xlarge + username: '{{ username }}' + password: '{{ password }}' + wait: true + tags: '{{ tags_create }}' + register: _result_create_source_db_cluster + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - assert: + that: + - _result_create_source_db_cluster.changed + + always: + + - name: Delete DB cluster without creating a final snapshot + rds_cluster: + state: absent + cluster_id: '{{ item }}' + skip_final_snapshot: true + ignore_errors: true + loop: + - '{{ cluster_id }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/aliases new file mode 100644 index 000000000..7f2c75f26 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/aliases @@ -0,0 +1,5 @@ +time=10m + +cloud/aws + +rds_snapshot_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/defaults/main.yml new file mode 100644 index 000000000..268ab154f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/defaults/main.yml @@ -0,0 +1,13 @@ +--- +# defaults file for rds_cluster_snapshot +_resource_prefix: 'ansible-test-{{ tiny_prefix }}' + +# Create RDS cluster +cluster_id: '{{ _resource_prefix }}-rds-cluster' +username: 'testrdsusername' +password: "{{ lookup('password', 'dev/null length=12 chars=ascii_letters,digits') }}" +engine: 'aurora' +port: 3306 + +# Create snapshot +snapshot_id: '{{ _resource_prefix }}-rds-cluster-snapshot' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/tasks/main.yml new file mode 100644 index 000000000..a105044d9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/tasks/main.yml @@ -0,0 +1,480 @@ +--- +- module_defaults: + group/aws: + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + collections: + - amazon.aws + + block: + - name: Create a source DB cluster + rds_cluster: + id: "{{ cluster_id }}" + state: present + engine: "{{ engine}}" + backup_retention_period: 1 + username: "{{ username }}" + password: "{{ password }}" + preferred_backup_window: "01:15-01:45" + register: _result_create_source_db_cluster + + - assert: + that: + - _result_create_source_db_cluster.changed + - "'allocated_storage' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.db_cluster_identifier == "{{ cluster_id }}" + - "'db_cluster_parameter_group' in _result_create_source_db_cluster" + - "'db_cluster_resource_id' in _result_create_source_db_cluster" + - "'endpoint' in _result_create_source_db_cluster" + - "'engine' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine == "{{ engine }}" + - "'engine_mode' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_create_source_db_cluster" + - "'master_username' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.master_username == "{{ username }}" + - "'port' in _result_create_source_db_cluster" + - "_result_create_source_db_cluster.port == {{ port }}" + - "'status' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.status == "available" + - "'tags' in _result_create_source_db_cluster" + - "'vpc_security_groups' in _result_create_source_db_cluster" + + - name: Get all RDS snapshots for the existing DB cluster + rds_snapshot_info: + db_cluster_identifier: "{{ cluster_id }}" + register: _result_cluster_snapshot_info + + - assert: + that: + - _result_cluster_snapshot_info is successful + - _result_cluster_snapshot_info.cluster_snapshots | length == 0 + + - name: Take a snapshot of the existing DB cluster (CHECK_MODE) + rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: "{{ snapshot_id }}" + check_mode: true + register: _result_cluster_snapshot + + - assert: + that: + - _result_cluster_snapshot.changed + + - name: Take a snapshot of the existing DB cluster + rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: "{{ snapshot_id }}" + wait: true + register: _result_cluster_snapshot + + - assert: + that: + - _result_cluster_snapshot.changed + - "'allocated_storage' in _result_cluster_snapshot" + - "'cluster_create_time' in _result_cluster_snapshot" + - "'db_cluster_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_identifier == "{{ cluster_id }}" + - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}" + - "'db_cluster_snapshot_arn' in _result_cluster_snapshot" + - "'engine' in _result_cluster_snapshot" + - _result_cluster_snapshot.engine == "{{ engine }}" + # - "'engine_mode' in _result_cluster_snapshot" + # - _result_cluster_snapshot.engine_mode == "provisioned" + - "'engine_version' in _result_cluster_snapshot" + - "'iam_database_authentication_enabled' in _result_cluster_snapshot" + - "'license_model' in _result_cluster_snapshot" + - "'master_username' in _result_cluster_snapshot" + - _result_cluster_snapshot.master_username == "{{ username }}" + - "'snapshot_create_time' in _result_cluster_snapshot" + - "'snapshot_type' in _result_cluster_snapshot" + - "'status' in _result_cluster_snapshot" + - _result_create_source_db_cluster.status == "available" + - "'storage_encrypted' in _result_cluster_snapshot" + - "'tags' in _result_cluster_snapshot" + - "'vpc_id' in _result_cluster_snapshot" + + - name: Get information about the existing DB snapshot + rds_snapshot_info: + db_cluster_snapshot_identifier: "{{ snapshot_id }}" + register: _result_cluster_snapshot_info + + - assert: + that: + - _result_cluster_snapshot_info is successful + - _result_cluster_snapshot_info.cluster_snapshots[0].db_cluster_identifier == "{{ cluster_id }}" + - _result_cluster_snapshot_info.cluster_snapshots[0].db_cluster_snapshot_identifier == "{{ snapshot_id }}" + + - name: Get info of the existing DB cluster + rds_cluster_info: + cluster_id: "{{ cluster_id }}" + register: result_cluster_info + + - assert: + that: + - result_cluster_info is successful + + - name: Create another source DB cluster + rds_cluster: + id: "{{ cluster_id }}-b" + state: present + engine: "{{ engine}}" + backup_retention_period: 1 + username: "{{ username }}" + password: "{{ password }}" + preferred_backup_window: "01:15-01:45" + register: _result_create_source_db_cluster + + - assert: + that: + - _result_create_source_db_cluster.changed + - "'allocated_storage' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.db_cluster_identifier == "{{ cluster_id }}-b" + - "'db_cluster_parameter_group' in _result_create_source_db_cluster" + - "'db_cluster_resource_id' in _result_create_source_db_cluster" + - "'endpoint' in _result_create_source_db_cluster" + - "'engine' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine == "{{ engine }}" + - "'engine_mode' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_create_source_db_cluster" + - "'master_username' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.master_username == "{{ username }}" + - "'port' in _result_create_source_db_cluster" + - "_result_create_source_db_cluster.port == {{ port }}" + - "'status' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.status == "available" + - "'tags' in _result_create_source_db_cluster" + - "'vpc_security_groups' in _result_create_source_db_cluster" + + - name: Take another snapshot of the existing DB cluster + rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}-b" + db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" + wait: true + register: _result_cluster_snapshot + + - assert: + that: + - _result_cluster_snapshot.changed + - "'allocated_storage' in _result_cluster_snapshot" + - "'cluster_create_time' in _result_cluster_snapshot" + - "'db_cluster_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_identifier == "{{ cluster_id }}-b" + - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-b" + - "'db_cluster_snapshot_arn' in _result_cluster_snapshot" + - "'engine' in _result_cluster_snapshot" + - _result_cluster_snapshot.engine == "{{ engine }}" + # - "'engine_mode' in _result_cluster_snapshot" + # - _result_cluster_snapshot.engine_mode == "provisioned" + - "'engine_version' in _result_cluster_snapshot" + - "'iam_database_authentication_enabled' in _result_cluster_snapshot" + - "'license_model' in _result_cluster_snapshot" + - "'master_username' in _result_cluster_snapshot" + - _result_cluster_snapshot.master_username == "{{ username }}" + - "'snapshot_create_time' in _result_cluster_snapshot" + - "'snapshot_type' in _result_cluster_snapshot" + - "'status' in _result_cluster_snapshot" + - _result_create_source_db_cluster.status == "available" + - "'storage_encrypted' in _result_cluster_snapshot" + - "'tags' in _result_cluster_snapshot" + - "'vpc_id' in _result_cluster_snapshot" + + - name: Get all RDS snapshots for the existing DB cluster + rds_snapshot_info: + db_cluster_identifier: "{{ cluster_id }}-b" + register: _result_cluster_snapshot_info + + - assert: + that: + - _result_cluster_snapshot_info is successful + - _result_cluster_snapshot_info.cluster_snapshots | length == 1 + + - name: Delete existing DB cluster snapshot (CHECK_MODE) + rds_cluster_snapshot: + state: absent + db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" + register: _result_delete_snapshot + check_mode: true + + - assert: + that: + - _result_delete_snapshot.changed + + - name: Delete the existing DB cluster snapshot + rds_cluster_snapshot: + state: absent + db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" + register: _result_delete_snapshot + + - assert: + that: + - _result_delete_snapshot.changed + + - name: Get info of the existing DB cluster + rds_cluster_info: + cluster_id: "{{ cluster_id }}" + register: _result_cluster_info + + - assert: + that: + - result_cluster_info is successful + + - name: Take another snapshot of the existing DB cluster and assign tags + rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" + wait: true + tags: + tag_one: '{{ snapshot_id }}-b One' + "Tag Two": 'two {{ snapshot_id }}-b' + register: _result_cluster_snapshot + + - assert: + that: + - _result_cluster_snapshot.changed + - "'allocated_storage' in _result_cluster_snapshot" + - "'cluster_create_time' in _result_cluster_snapshot" + - "'db_cluster_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_identifier == "{{ cluster_id }}" + - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-b" + - "'db_cluster_snapshot_arn' in _result_cluster_snapshot" + - "'engine' in _result_cluster_snapshot" + - _result_cluster_snapshot.engine == "{{ engine }}" + # - "'engine_mode' in _result_cluster_snapshot" + # - _result_cluster_snapshot.engine_mode == "provisioned" + - "'engine_version' in _result_cluster_snapshot" + - "'iam_database_authentication_enabled' in _result_cluster_snapshot" + - "'license_model' in _result_cluster_snapshot" + - "'master_username' in _result_cluster_snapshot" + - _result_cluster_snapshot.master_username == "{{ username }}" + - "'snapshot_create_time' in _result_cluster_snapshot" + - "'snapshot_type' in _result_cluster_snapshot" + - "'status' in _result_cluster_snapshot" + - _result_create_source_db_cluster.status == "available" + - "'storage_encrypted' in _result_cluster_snapshot" + - "'tags' in _result_cluster_snapshot" + - _result_cluster_snapshot.tags | length == 2 + - _result_cluster_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One" + - _result_cluster_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" + - "'vpc_id' in _result_cluster_snapshot" + + - name: Attempt to take another snapshot of the existing DB cluster and assign tags (idempotence) + rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" + wait: true + tags: + tag_one: '{{ snapshot_id }}-b One' + "Tag Two": 'two {{ snapshot_id }}-b' + register: _result_cluster_snapshot + + - assert: + that: + - not _result_cluster_snapshot.changed + + - name: Take another snapshot of the existing DB cluster and update tags + rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" + tags: + tag_three: '{{ snapshot_id }}-b Three' + "Tag Two": 'two {{ snapshot_id }}-b' + register: _result_cluster_snapshot + + - assert: + that: + - _result_cluster_snapshot.changed + - "'allocated_storage' in _result_cluster_snapshot" + - "'cluster_create_time' in _result_cluster_snapshot" + - "'db_cluster_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_identifier == "{{ cluster_id }}" + - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-b" + - "'db_cluster_snapshot_arn' in _result_cluster_snapshot" + - "'engine' in _result_cluster_snapshot" + - _result_cluster_snapshot.engine == "{{ engine }}" + # - "'engine_mode' in _result_cluster_snapshot" + # - _result_cluster_snapshot.engine_mode == "provisioned" + - "'engine_version' in _result_cluster_snapshot" + - "'iam_database_authentication_enabled' in _result_cluster_snapshot" + - "'license_model' in _result_cluster_snapshot" + - "'master_username' in _result_cluster_snapshot" + - _result_cluster_snapshot.master_username == "{{ username }}" + - "'snapshot_create_time' in _result_cluster_snapshot" + - "'snapshot_type' in _result_cluster_snapshot" + - "'status' in _result_cluster_snapshot" + - _result_create_source_db_cluster.status == "available" + - "'storage_encrypted' in _result_cluster_snapshot" + - "'tags' in _result_cluster_snapshot" + - _result_cluster_snapshot.tags | length == 2 + - _result_cluster_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three" + - _result_cluster_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" + - "'vpc_id' in _result_cluster_snapshot" + + - name: Take another snapshot of the existing DB cluster and update tags without purge + rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" + purge_tags: false + tags: + tag_one: '{{ snapshot_id }}-b One' + register: _result_cluster_snapshot + + - assert: + that: + - _result_cluster_snapshot.changed + - "'allocated_storage' in _result_cluster_snapshot" + - "'cluster_create_time' in _result_cluster_snapshot" + - "'db_cluster_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_identifier == "{{ cluster_id }}" + - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-b" + - "'db_cluster_snapshot_arn' in _result_cluster_snapshot" + - "'engine' in _result_cluster_snapshot" + - _result_cluster_snapshot.engine == "{{ engine }}" + # - "'engine_mode' in _result_cluster_snapshot" + # - _result_cluster_snapshot.engine_mode == "provisioned" + - "'engine_version' in _result_cluster_snapshot" + - "'iam_database_authentication_enabled' in _result_cluster_snapshot" + - "'license_model' in _result_cluster_snapshot" + - "'master_username' in _result_cluster_snapshot" + - _result_cluster_snapshot.master_username == "{{ username }}" + - "'snapshot_create_time' in _result_cluster_snapshot" + - "'snapshot_type' in _result_cluster_snapshot" + - "'status' in _result_cluster_snapshot" + - _result_create_source_db_cluster.status == "available" + - "'storage_encrypted' in _result_cluster_snapshot" + - "'tags' in _result_cluster_snapshot" + - _result_cluster_snapshot.tags | length == 3 + - _result_cluster_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One" + - _result_cluster_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" + - _result_cluster_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three" + - "'vpc_id' in _result_cluster_snapshot" + + - name: Take another snapshot of the existing DB cluster and do not specify any tag to ensure previous tags are not removed + rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" + register: _result_cluster_snapshot + + - assert: + that: + - not _result_cluster_snapshot.changed + + # ------------------------------------------------------------------------------------------ + # Test copying a snapshot + ### Copying a DB cluster snapshot from a different region is supported, but not in CI, + ### because the aws-terminator only terminates resources in one region. + - set_fact: + _snapshot_arn: "{{ _result_cluster_snapshot.db_cluster_snapshot_arn }}" + + - name: Copy a DB cluster snapshot (check mode) + rds_cluster_snapshot: + id: "{{ snapshot_id }}-copy" + source_id: "{{ snapshot_id }}-b" + copy_tags: yes + wait: true + register: _result_cluster_copy_snapshot + check_mode: yes + + - assert: + that: + - _result_cluster_copy_snapshot.changed + + - name: Copy a DB cluster snapshot + rds_cluster_snapshot: + id: "{{ snapshot_id }}-copy" + source_id: "{{ snapshot_id }}-b" + copy_tags: yes + wait: true + register: _result_cluster_copy_snapshot + + - assert: + that: + - _result_cluster_copy_snapshot.changed + - _result_cluster_copy_snapshot.db_cluster_identifier == "{{ cluster_id }}" + - _result_cluster_copy_snapshot.source_db_cluster_snapshot_arn == "{{ _snapshot_arn }}" + - _result_cluster_copy_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-copy" + - "'tags' in _result_cluster_copy_snapshot" + - _result_cluster_copy_snapshot.tags | length == 3 + - _result_cluster_copy_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One" + - _result_cluster_copy_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" + - _result_cluster_copy_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three" + + - name: Copy a DB cluster snapshot (idempotence - check mode) + rds_cluster_snapshot: + id: "{{ snapshot_id }}-copy" + source_id: "{{ snapshot_id }}-b" + copy_tags: yes + wait: true + register: _result_cluster_copy_snapshot + check_mode: yes + + - assert: + that: + - not _result_cluster_copy_snapshot.changed + + - name: Copy a DB cluster snapshot (idempotence) + rds_cluster_snapshot: + id: "{{ snapshot_id }}-copy" + source_id: "{{ snapshot_id }}-b" + copy_tags: yes + wait: true + register: _result_cluster_copy_snapshot + + - assert: + that: + - not _result_cluster_copy_snapshot.changed + - _result_cluster_copy_snapshot.db_cluster_identifier == "{{ cluster_id }}" + - _result_cluster_copy_snapshot.source_db_cluster_snapshot_arn == "{{ _snapshot_arn }}" + - _result_cluster_copy_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-copy" + - "'tags' in _result_cluster_copy_snapshot" + - _result_cluster_copy_snapshot.tags | length == 3 + - _result_cluster_copy_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One" + - _result_cluster_copy_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" + - _result_cluster_copy_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three" + + always: + - name: Delete the existing DB cluster snapshots + rds_cluster_snapshot: + state: absent + db_cluster_snapshot_identifier: "{{ item }}" + register: _result_delete_snapshot + ignore_errors: true + loop: + - "{{ snapshot_id }}" + - "{{ snapshot_id }}-b" + - "{{ snapshot_id }}-copy" + + - name: Delete the existing DB cluster without creating a final snapshot + rds_cluster: + state: absent + cluster_id: "{{ item }}" + skip_final_snapshot: true + register: _result_delete_cluster + ignore_errors: true + loop: + - "{{ cluster_id }}" + - "{{ cluster_id }}-b" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/vars/main.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/vars/main.yml @@ -0,0 +1 @@ +--- diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/aliases new file mode 100644 index 000000000..bff4494c2 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/aliases @@ -0,0 +1,3 @@ +cloud/aws +time=30m +rds_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/defaults/main.yml new file mode 100644 index 000000000..3647e4126 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/defaults/main.yml @@ -0,0 +1,9 @@ +instance_id: ansible-test-{{ tiny_prefix }} +modified_instance_id: '{{ instance_id }}-updated' +username: test +password: test12345678 +db_instance_class: db.t3.micro + +# For aurora tests +cluster_id: '{{ instance_id }}-cluster' +aurora_db_instance_class: db.t3.medium diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/tasks/main.yml new file mode 100644 index 000000000..522894afc --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/tasks/main.yml @@ -0,0 +1,122 @@ +- name: rds_instance / aurora integration tests + collections: + - community.aws + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + - name: Ensure the resource doesn't exist + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + + - name: Create minimal aurora cluster in default VPC and default subnet group + rds_cluster: + state: present + engine: aurora-postgresql + engine_mode: provisioned + cluster_id: '{{ cluster_id }}' + username: '{{ username }}' + password: '{{ password }}' + tags: + CreatedBy: rds_instance integration tests + register: my_cluster + + - assert: + that: + - my_cluster.engine_mode == "provisioned" + + - name: Create an Aurora instance + rds_instance: + id: '{{ instance_id }}' + cluster_id: '{{ cluster_id }}' + engine: aurora-postgresql + state: present + db_instance_class: '{{ aurora_db_instance_class }}' + tags: + CreatedBy: rds_instance integration tests + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == '{{ instance_id }}' + - result.tags | length == 1 + + - name: Create an Aurora instance with both username/password and id - invalid + rds_instance: + id: '{{ instance_id }}-new' + cluster_id: '{{ cluster_id }}' + engine: aurora-postgresql + state: present + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ aurora_db_instance_class }}' + tags: + CreatedBy: rds_instance integration tests + register: result + ignore_errors: yes + + - assert: + that: + - result.failed + - "'Set master user password for the DB Cluster' in result.msg" + + - name: Attempt to modify password (a cluster-managed attribute) + rds_instance: + id: '{{ instance_id }}' + state: present + password: '{{ password }}' + force_update_password: true + apply_immediately: true + register: result + ignore_errors: yes + + - assert: + that: + - result.failed + - "'Modify master user password for the DB Cluster using the ModifyDbCluster\ + \ API' in result.msg" + - "'Please see rds_cluster' in result.msg" + + - name: Modify aurora instance port (a cluster-managed attribute) + rds_instance: + id: '{{ instance_id }}' + state: present + port: 1150 + register: result + ignore_errors: yes + + - assert: + that: + - not result.changed + - "'Modify database endpoint port number for the DB Cluster using the ModifyDbCluster\ + \ API' in result.msg" + - "'Please see rds_cluster' in result.msg" + + always: + + - name: Delete the instance + rds_instance: + id: '{{ item }}' + state: absent + skip_final_snapshot: true + wait: false + loop: + - '{{ instance_id }}' + - '{{ modified_instance_id }}' + ignore_errors: yes + + - name: Delete the cluster + rds_cluster: + cluster_id: '{{ cluster_id }}' + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/aliases new file mode 100644 index 000000000..777fbe40a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/aliases @@ -0,0 +1,3 @@ +cloud/aws +time=15m +rds_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml new file mode 100644 index 000000000..fd3a29a79 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml @@ -0,0 +1,16 @@ +instance_id: ansible-test-{{ tiny_prefix }} +modified_instance_id: '{{ instance_id }}-updated' +username: test +password: test12345678 +db_instance_class: db.t3.micro +modified_db_instance_class: db.t3.medium +allocated_storage: 20 +io1_allocated_storage: 100 +io1_modified_allocated_storage: 110 +monitoring_interval: 60 +preferred_maintenance_window: mon:06:20-mon:06:50 +storage_type: io1 +iops: 1000 + +# For mariadb tests +mariadb_engine_version: 10.6.10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/enhanced_monitoring_assume_policy.json b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/enhanced_monitoring_assume_policy.json new file mode 100644 index 000000000..29acf369f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/enhanced_monitoring_assume_policy.json @@ -0,0 +1,13 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Principal": { + "Service": "monitoring.rds.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/s3_integration_policy.json b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/s3_integration_policy.json new file mode 100644 index 000000000..71f07d07c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/s3_integration_policy.json @@ -0,0 +1,16 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket", + "rds:*" + ], + "Resource": "*" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/s3_integration_trust_policy.json b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/s3_integration_trust_policy.json new file mode 100644 index 000000000..9ea5ec3b4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/s3_integration_trust_policy.json @@ -0,0 +1,13 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Principal": { + "Service": "rds.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml new file mode 100644 index 000000000..024e0978a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml @@ -0,0 +1,205 @@ +- name: rds_instance / complex integration tests + collections: + - community.aws + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + #TODO: test availability_zone and multi_az + - name: Ensure the resource doesn't exist + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create an enhanced monitoring role + iam_role: + assume_role_policy_document: "{{ lookup('file','files/enhanced_monitoring_assume_policy.json')\ + \ }}" + name: '{{ instance_id }}-role' + state: present + managed_policy: arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole + register: enhanced_monitoring_role + + - name: Create a mariadb instance + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + engine_version: '{{ mariadb_engine_version }}' + allow_major_version_upgrade: true + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ io1_allocated_storage }}' + storage_type: '{{ storage_type }}' + iops: '{{ iops }}' + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == '{{ instance_id }}' + + - name: Add IAM roles to mariab (should fail - iam roles not supported for mariadb) + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + engine_version: '{{ mariadb_engine_version }}' + allow_major_version_upgrade: true + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ io1_allocated_storage }}' + storage_type: '{{ storage_type }}' + iops: '{{ iops }}' + iam_roles: + - role_arn: my_role + feature_name: my_feature + register: result + ignore_errors: true + + - assert: + that: + - result.failed + - '"is not valid for adding IAM roles" in result.msg' + + # TODO: test modifying db_subnet_group_name, db_security_groups, db_parameter_group_name, option_group_name, + # monitoring_role_arn, monitoring_interval, domain, domain_iam_role_name, cloudwatch_logs_export_configuration + + # Test multiple modifications including enabling enhanced monitoring + + - name: Modify several attributes - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + allocated_storage: '{{ io1_modified_allocated_storage }}' + storage_type: '{{ storage_type }}' + db_instance_class: '{{ modified_db_instance_class }}' + backup_retention_period: 2 + preferred_backup_window: 05:00-06:00 + preferred_maintenance_window: '{{ preferred_maintenance_window }}' + auto_minor_version_upgrade: false + monitoring_interval: '{{ monitoring_interval }}' + monitoring_role_arn: '{{ enhanced_monitoring_role.arn }}' + iops: '{{ iops }}' + port: 1150 + max_allocated_storage: 150 + apply_immediately: true + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Modify several attributes + rds_instance: + id: '{{ instance_id }}' + state: present + allocated_storage: '{{ io1_modified_allocated_storage }}' + storage_type: '{{ storage_type }}' + db_instance_class: '{{ modified_db_instance_class }}' + backup_retention_period: 2 + preferred_backup_window: 05:00-06:00 + preferred_maintenance_window: '{{ preferred_maintenance_window }}' + auto_minor_version_upgrade: false + monitoring_interval: '{{ monitoring_interval }}' + monitoring_role_arn: '{{ enhanced_monitoring_role.arn }}' + iops: '{{ iops }}' + port: 1150 + max_allocated_storage: 150 + apply_immediately: true + register: result + + - assert: + that: + - result.changed + - '"allocated_storage" in result.pending_modified_values or result.allocated_storage + == io1_modified_allocated_storage' + - '"max_allocated_storage" in result.pending_modified_values or result.max_allocated_storage + == 150' + - '"port" in result.pending_modified_values or result.endpoint.port == 1150' + - '"db_instance_class" in result.pending_modified_values or result.db_instance_class + == modified_db_instance_class' + - '"monitoring_interval" in result.pending_modified_values or result.monitoring_interval + == monitoring_interval' + + - name: Idempotence modifying several pending attributes - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + allocated_storage: '{{ io1_modified_allocated_storage }}' + storage_type: '{{ storage_type }}' + db_instance_class: '{{ modified_db_instance_class }}' + backup_retention_period: 2 + preferred_backup_window: 05:00-06:00 + preferred_maintenance_window: '{{ preferred_maintenance_window }}' + auto_minor_version_upgrade: false + monitoring_interval: '{{ monitoring_interval }}' + monitoring_role_arn: '{{ enhanced_monitoring_role.arn }}' + iops: '{{ iops }}' + port: 1150 + max_allocated_storage: 150 + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Idempotence modifying several pending attributes + rds_instance: + id: '{{ instance_id }}' + state: present + allocated_storage: '{{ io1_modified_allocated_storage }}' + storage_type: '{{ storage_type }}' + db_instance_class: '{{ modified_db_instance_class }}' + backup_retention_period: 2 + preferred_backup_window: 05:00-06:00 + preferred_maintenance_window: '{{ preferred_maintenance_window }}' + auto_minor_version_upgrade: false + monitoring_interval: '{{ monitoring_interval }}' + monitoring_role_arn: '{{ enhanced_monitoring_role.arn }}' + iops: '{{ iops }}' + port: 1150 + max_allocated_storage: 150 + register: result + + - assert: + that: + - not result.changed + - '"allocated_storage" in result.pending_modified_values or result.allocated_storage + == io1_modified_allocated_storage' + - '"max_allocated_storage" in result.pending_modified_values or result.max_allocated_storage + == 150' + - '"port" in result.pending_modified_values or result.endpoint.port == 1150' + - '"db_instance_class" in result.pending_modified_values or result.db_instance_class + == modified_db_instance_class' + + always: + - name: Delete the instance + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: yes + + - name: Remove enhanced monitoring role + iam_role: + assume_role_policy_document: "{{ lookup('file','files/enhanced_monitoring_assume_policy.json')\ + \ }}" + name: '{{ instance_id }}-role' + state: absent + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/aliases new file mode 100644 index 000000000..777fbe40a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/aliases @@ -0,0 +1,3 @@ +cloud/aws +time=15m +rds_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml new file mode 100644 index 000000000..fafb0becc --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml @@ -0,0 +1,9 @@ +instance_id: ansible-test-{{ tiny_prefix }} +modified_instance_id: '{{ instance_id }}-updated' +username: test +password: test12345678 +db_instance_class: db.t3.micro +allocated_storage: 20 + +# For mariadb tests +mariadb_engine_version: 10.6.10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml new file mode 100644 index 000000000..e13573416 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml @@ -0,0 +1,206 @@ +- name: rds_instance / modify integration tests + collections: + - community.aws + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + - name: Ensure the resource doesn't exist + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create a mariadb instance + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + engine_version: '{{ mariadb_engine_version }}' + allow_major_version_upgrade: true + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + tags: + Name: '{{ instance_id }}' + Created_by: Ansible rds_instance tests + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == '{{ instance_id }}' + + - name: Create a DB instance with an invalid engine + rds_instance: + id: '{{ instance_id }}' + state: present + engine: thisisnotavalidengine + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + register: result + ignore_errors: true + + - assert: + that: + - result.failed + - '"value of engine must be one of" in result.msg' + + - name: Add IAM roles to mariadb (should fail - iam roles not supported for mariadb) + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + engine_version: '{{ mariadb_engine_version }}' + allow_major_version_upgrade: true + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + iam_roles: + - role_arn: my_role + feature_name: my_feature + register: result + ignore_errors: true + + - assert: + that: + - result.failed + - '"is not valid for adding IAM roles" in result.msg' + + # TODO: test modifying db_subnet_group_name, db_security_groups, db_parameter_group_name, option_group_name, + # monitoring_role_arn, monitoring_interval, domain, domain_iam_role_name, cloudwatch_logs_export_configuration + + # ------------------------------------------------------------------------------------------ + - name: Modify the storage type without immediate application - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + storage_type: gp3 + apply_immediately: false + register: result + check_mode: yes + + - assert: + that: + - result.changed + - 'result.storage_type == "gp2"' + + - name: Modify the storage type without immediate application + rds_instance: + id: '{{ instance_id }}' + state: present + storage_type: gp3 + apply_immediately: false + register: result + + - assert: + that: + - result.changed + - 'result.pending_modified_values.storage_type == "gp3"' + - 'result.storage_type == "gp2"' + + - name: Modify the storage type without immediate application - idempotent + rds_instance: + id: '{{ instance_id }}' + state: present + storage_type: gp3 + apply_immediately: false + register: result + check_mode: yes + + - assert: + that: + - not result.changed + - 'result.pending_modified_values.storage_type == "gp3"' + - 'result.storage_type == "gp2"' + + - name: Modify the storage type back to gp2 without immediate application + rds_instance: + id: '{{ instance_id }}' + state: present + storage_type: gp2 + apply_immediately: false + register: result + + - assert: + that: + - result.changed + - 'result.pending_modified_values == {}' + - 'result.storage_type == "gp2"' + + - name: Modify the instance name without immediate application - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + new_id: '{{ modified_instance_id }}' + apply_immediately: false + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Modify the instance name without immediate application + rds_instance: + id: '{{ instance_id }}' + state: present + new_id: '{{ modified_instance_id }}' + apply_immediately: false + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == "{{ instance_id }}" + + - name: Immediately apply the pending update - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + new_id: '{{ modified_instance_id }}' + apply_immediately: true + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Immediately apply the pending update + rds_instance: + id: '{{ instance_id }}' + state: present + new_id: '{{ modified_instance_id }}' + apply_immediately: true + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == "{{ modified_instance_id }}" + + always: + - name: Delete the instance + rds_instance: + id: '{{ item }}' + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: yes + loop: + - '{{ instance_id }}' + - '{{ modified_instance_id }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/aliases new file mode 100644 index 000000000..777fbe40a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/aliases @@ -0,0 +1,3 @@ +cloud/aws +time=15m +rds_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/defaults/main.yml new file mode 100644 index 000000000..41d99538a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/defaults/main.yml @@ -0,0 +1,12 @@ +instance_id: ansible-test-{{ tiny_prefix }} +username: test +password: test12345678 +db_instance_class: db.t3.micro +allocated_storage: 20 + +# For oracle tests +# Smallest instance that permits modification of the coreCount +oracle_ee_db_instance_class: db.r5.2xlarge +modified_processor_features: + coreCount: 4 + threadsPerCore: 2 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/tasks/main.yml new file mode 100644 index 000000000..260a37951 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/tasks/main.yml @@ -0,0 +1,141 @@ +- name: rds_instance / processor integration tests + collections: + - community.aws + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + - name: Ensure the resource doesn't exist + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create an oracle-ee DB instance + rds_instance: + id: '{{ instance_id }}' + state: present + engine: oracle-ee + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ oracle_ee_db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + storage_encrypted: true + processor_features: {} + register: result + + - assert: + that: + - result.changed + + - name: Modify the processor features - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + engine: oracle-ee + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ oracle_ee_db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + storage_encrypted: true + processor_features: '{{ modified_processor_features }}' + apply_immediately: true + register: result + check_mode: true + + - assert: + that: + - result.changed + + - name: Modify the processor features + rds_instance: + id: '{{ instance_id }}' + state: present + engine: oracle-ee + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ oracle_ee_db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + storage_encrypted: true + processor_features: '{{ modified_processor_features }}' + apply_immediately: true + register: result + + - assert: + that: + - result.changed + - result.pending_modified_values.processor_features.coreCount == "{{ modified_processor_features.coreCount + }}" + - result.pending_modified_values.processor_features.threadsPerCore == "{{ modified_processor_features.threadsPerCore + }}" + + - name: Modify the processor features (idempotence) - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + engine: oracle-ee + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ oracle_ee_db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + storage_encrypted: true + processor_features: '{{ modified_processor_features }}' + apply_immediately: true + register: result + check_mode: true + + - assert: + that: + - not result.changed + + - name: Modify the processor features (idempotence) + rds_instance: + id: '{{ instance_id }}' + state: present + engine: oracle-ee + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ oracle_ee_db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + storage_encrypted: true + processor_features: '{{ modified_processor_features }}' + apply_immediately: true + register: result + + # Check if processor features either are pending or already changed + - assert: + that: + - not result.changed + - (result.pending_modified_values.processor_features.coreCount is defined and + result.pending_modified_values.processor_features.coreCount == "{{ modified_processor_features.coreCount + }}") or (result.processor_features.coreCount is defined and result.processor_features.coreCount + == "{{ modified_processor_features.coreCount }}") + - (result.pending_modified_values.processor_features.threadsPerCore is defined + and result.pending_modified_values.processor_features.threadsPerCore == "{{ + modified_processor_features.threadsPerCore }}") or (result.processor_features.threadsPerCore + is defined and result.processor_features.threadsPerCore == "{{ modified_processor_features.threadsPerCore + }}") + + always: + + - name: Delete the DB instance + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + wait: false + register: result + + - assert: + that: + - result.changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/aliases new file mode 100644 index 000000000..4bdc79e47 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/aliases @@ -0,0 +1,3 @@ +cloud/aws +time=25m +rds_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/defaults/main.yml new file mode 100644 index 000000000..b559f8c3f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/defaults/main.yml @@ -0,0 +1,6 @@ +instance_id: ansible-test-{{ tiny_prefix }} +modified_instance_id: '{{ instance_id }}-updated' +username: test +password: test12345678 +db_instance_class: db.t3.micro +allocated_storage: 20 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/tasks/main.yml new file mode 100644 index 000000000..c282f1f23 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/tasks/main.yml @@ -0,0 +1,234 @@ +- name: rds_instance / replica integration tests + collections: + - community.aws + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + - name: set the two regions for the source DB and the replica + set_fact: + region_src: '{{ aws_region }}' + region_dest: '{{ aws_region }}' + + - name: Ensure the resource doesn't exist + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + region: '{{ region_src }}' + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create a source DB instance + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mysql + backup_retention_period: 1 + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + region: '{{ region_src }}' + tags: + Name: '{{ instance_id }}' + Created_by: Ansible rds_instance tests + register: source_db + + - assert: + that: + - source_db.changed + - source_db.db_instance_identifier == '{{ instance_id }}' + + # ------------------------------------------------------------------------------------------ + + - name: Create a read replica in a different region - check_mode + rds_instance: + id: '{{ instance_id }}-replica' + state: present + source_db_instance_identifier: '{{ instance_id }}' + engine: mysql + username: '{{ username }}' + password: '{{ password }}' + read_replica: true + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + region: '{{ region_dest }}' + tags: + Name: '{{ instance_id }}' + Created_by: Ansible rds_instance tests + wait: yes + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Create a read replica in a different region + rds_instance: + id: '{{ instance_id }}-replica' + state: present + source_db_instance_identifier: '{{ instance_id }}' + engine: mysql + username: '{{ username }}' + password: '{{ password }}' + read_replica: true + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + region: '{{ region_dest }}' + tags: + Name: '{{ instance_id }}' + Created_by: Ansible rds_instance tests + wait: yes + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == '{{ instance_id }}-replica' + - result.tags | length == 2 + - result.tags.Name == '{{ instance_id }}' + - result.tags.Created_by == 'Ansible rds_instance tests' + + - name: Test idempotence with a read replica - check_mode + rds_instance: + id: '{{ instance_id }}-replica' + state: present + source_db_instance_identifier: '{{ instance_id }}' + engine: mysql + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + region: '{{ region_dest }}' + tags: + Name: '{{ instance_id }}' + Created_by: Ansible rds_instance tests + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Test idempotence with a read replica + rds_instance: + id: '{{ instance_id }}-replica' + state: present + source_db_instance_identifier: '{{ instance_id }}' + engine: mysql + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + region: '{{ region_dest }}' + tags: + Name: '{{ instance_id }}' + Created_by: Ansible rds_instance tests + register: result + + - assert: + that: + - not result.changed + + - name: Test idempotence with read_replica=True + rds_instance: + id: '{{ instance_id }}-replica' + state: present + read_replica: true + source_db_instance_identifier: '{{ instance_id }}' + engine: mysql + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + region: '{{ region_dest }}' + tags: + Name: '{{ instance_id }}' + Created_by: Ansible rds_instance tests + register: result + + - assert: + that: + - not result.changed + + # ------------------------------------------------------------------------------------------ + + - name: Promote the read replica - check_mode + rds_instance: + id: '{{ instance_id }}-replica' + state: present + read_replica: false + region: '{{ region_dest }}' + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Promote the read replica + rds_instance: + id: '{{ instance_id }}-replica' + state: present + read_replica: false + region: '{{ region_dest }}' + register: result + + - assert: + that: + - result.changed + + - name: Test idempotence - check_mode + rds_instance: + id: '{{ instance_id }}-replica' + state: present + read_replica: false + region: '{{ region_dest }}' + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Test idempotence + rds_instance: + id: '{{ instance_id }}-replica' + state: present + read_replica: false + region: '{{ region_dest }}' + register: result + + - assert: + that: + - not result.changed + + always: + + - name: Remove the DB instance + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + region: '{{ region_src }}' + wait: false + ignore_errors: yes + + - name: Remove the DB replica + rds_instance: + id: '{{ instance_id }}-replica' + state: absent + skip_final_snapshot: true + region: '{{ region_dest }}' + wait: false + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/aliases new file mode 100644 index 000000000..b9702d285 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/aliases @@ -0,0 +1,3 @@ +cloud/aws +time=20m +rds_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/defaults/main.yml new file mode 100644 index 000000000..5540ffb89 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/defaults/main.yml @@ -0,0 +1,5 @@ +instance_id: ansible-test-{{ tiny_prefix }} +username: test +password: test12345678 +db_instance_class: db.t3.micro +allocated_storage: 20 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/tasks/main.yml new file mode 100644 index 000000000..c872db880 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/tasks/main.yml @@ -0,0 +1,131 @@ +- name: rds_instance / restore integration tests + collections: + - community.aws + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + # TODO: snapshot, s3 + + - name: Ensure the resource doesn't exist + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create a source DB instance + rds_instance: + id: '{{ instance_id }}-s' + state: present + engine: mysql + backup_retention_period: 1 + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + register: source_db + + - assert: + that: + - source_db.changed + - source_db.db_instance_identifier == '{{ instance_id }}-s' + + - name: Create a point in time DB instance - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + source_db_instance_identifier: '{{ instance_id }}-s' + creation_source: instance + engine: mysql + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + use_latest_restorable_time: true + register: result + check_mode: yes + + - assert: + that: result.changed + + - name: Create a point in time DB instance + rds_instance: + id: '{{ instance_id }}' + state: present + source_db_instance_identifier: '{{ instance_id }}-s' + creation_source: instance + engine: mysql + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + use_latest_restorable_time: true + register: result + + - assert: + that: result.changed + + - name: Create a point in time DB instance (idempotence) - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + source_db_instance_identifier: '{{ instance_id }}-s' + creation_source: instance + engine: mysql + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + restore_time: '{{ result.latest_restorable_time }}' + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Create a point in time DB instance (idempotence) + rds_instance: + id: '{{ instance_id }}' + state: present + source_db_instance_identifier: '{{ instance_id }}-s' + creation_source: instance + engine: mysql + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + restore_time: '{{ result.latest_restorable_time }}' + register: result + + - assert: + that: + - not result.changed + - result.db_instance_identifier == '{{ instance_id }}' + + always: + + - name: Remove the DB instance + rds_instance: + id: '{{ instance_id }}-s' + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: yes + + - name: Remove the point in time restored DB + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/aliases new file mode 100644 index 000000000..777fbe40a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/aliases @@ -0,0 +1,3 @@ +cloud/aws +time=15m +rds_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/defaults/main.yml new file mode 100644 index 000000000..5540ffb89 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/defaults/main.yml @@ -0,0 +1,5 @@ +instance_id: ansible-test-{{ tiny_prefix }} +username: test +password: test12345678 +db_instance_class: db.t3.micro +allocated_storage: 20 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/tasks/main.yml new file mode 100644 index 000000000..761f71d2a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/tasks/main.yml @@ -0,0 +1,332 @@ +- name: rds_instance / sgroups integration tests + collections: + - community.aws + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + - name: create a VPC + ec2_vpc_net: + name: '{{ resource_prefix }}-vpc' + state: present + cidr_block: 10.122.122.128/26 + tags: + Name: '{{ resource_prefix }}-vpc' + Description: created by rds_instance integration tests + register: vpc_result + + - name: create subnets + ec2_vpc_subnet: + cidr: '{{ item.cidr }}' + az: '{{ item.zone }}' + vpc_id: '{{ vpc_result.vpc.id }}' + tags: + Name: '{{ resource_prefix }}-subnet' + Description: created by rds_instance integration tests + state: present + register: subnets_result + loop: + - {cidr: 10.122.122.128/28, zone: '{{ aws_region }}a'} + - {cidr: 10.122.122.144/28, zone: '{{ aws_region }}b'} + - {cidr: 10.122.122.160/28, zone: '{{ aws_region }}c'} + + - name: Create security groups + ec2_group: + name: '{{ item }}' + description: created by rds_instance integration tests + state: present + register: sgs_result + loop: + - '{{ resource_prefix }}-sg-1' + - '{{ resource_prefix }}-sg-2' + - '{{ resource_prefix }}-sg-3' + + - name: Ensure the resource doesn't exist + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + # ------------------------------------------------------------------------------------------ + + - name: Create a DB instance in the VPC with two security groups - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + vpc_security_group_ids: + - '{{ sgs_result.results.0.group_id }}' + - '{{ sgs_result.results.1.group_id }}' + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Create a DB instance in the VPC with two security groups + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + vpc_security_group_ids: + - '{{ sgs_result.results.0.group_id }}' + - '{{ sgs_result.results.1.group_id }}' + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == '{{ instance_id }}' + - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) + | list | length == 2 + + - name: Create a DB instance in the VPC with two security groups (idempotence) - + check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + vpc_security_group_ids: + - '{{ sgs_result.results.0.group_id }}' + - '{{ sgs_result.results.1.group_id }}' + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Create a DB instance in the VPC with two security groups (idempotence) + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + vpc_security_group_ids: + - '{{ sgs_result.results.0.group_id }}' + - '{{ sgs_result.results.1.group_id }}' + register: result + + - assert: + that: + - not result.changed + - result.db_instance_identifier == '{{ instance_id }}' + - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) + | list | length == 2 + + # ------------------------------------------------------------------------------------------ + + - name: Add a new security group without purge - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + vpc_security_group_ids: + - '{{ sgs_result.results.2.group_id }}' + apply_immediately: true + purge_security_groups: false + check_mode: true + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == '{{ instance_id }}' + + - name: Add a new security group without purge + rds_instance: + id: '{{ instance_id }}' + state: present + vpc_security_group_ids: + - '{{ sgs_result.results.2.group_id }}' + apply_immediately: true + purge_security_groups: false + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == '{{ instance_id }}' + - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) + | list | length == 3 + + - name: Add a new security group without purge (idempotence) - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + vpc_security_group_ids: + - '{{ sgs_result.results.2.group_id }}' + apply_immediately: true + purge_security_groups: false + register: result + check_mode: yes + + - assert: + that: + - not result.changed + - result.db_instance_identifier == '{{ instance_id }}' + + - name: Add a new security group without purge (idempotence) + rds_instance: + id: '{{ instance_id }}' + state: present + vpc_security_group_ids: + - '{{ sgs_result.results.2.group_id }}' + apply_immediately: true + purge_security_groups: false + register: result + + - assert: + that: + - not result.changed + - result.db_instance_identifier == '{{ instance_id }}' + - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) + | list | length == 3 + + # ------------------------------------------------------------------------------------------ + + - name: Add a security group with purge - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + vpc_security_group_ids: + - '{{ sgs_result.results.2.group_id }}' + apply_immediately: true + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Add a security group with purge + rds_instance: + id: '{{ instance_id }}' + state: present + vpc_security_group_ids: + - '{{ sgs_result.results.2.group_id }}' + apply_immediately: true + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == '{{ instance_id }}' + - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) + | list | length == 1 + - result.vpc_security_groups | selectattr('status', 'equalto', 'removing') | + list | length == 2 + + - name: Add a security group with purge (idempotence) - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + vpc_security_group_ids: + - '{{ sgs_result.results.2.group_id }}' + apply_immediately: true + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Add a security group with purge (idempotence) + rds_instance: + id: '{{ instance_id }}' + state: present + vpc_security_group_ids: + - '{{ sgs_result.results.2.group_id }}' + apply_immediately: true + register: result + + - assert: + that: + - not result.changed + - result.db_instance_identifier == '{{ instance_id }}' + - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) + | list | length == 1 + + always: + + - name: Ensure the resource doesn't exist + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + ignore_errors: yes + + - name: Remove security groups + ec2_group: + name: '{{ item }}' + description: created by rds_instance integration tests + state: absent + register: sgs_result + loop: + - '{{ resource_prefix }}-sg-1' + - '{{ resource_prefix }}-sg-2' + - '{{ resource_prefix }}-sg-3' + ignore_errors: yes + retries: 30 + until: sgs_result is not failed + delay: 10 + + - name: remove subnets + ec2_vpc_subnet: + cidr: '{{ item.cidr }}' + az: '{{ item.zone }}' + vpc_id: '{{ vpc_result.vpc.id }}' + tags: + Name: '{{ resource_prefix }}-subnet' + Description: created by rds_instance integration tests + state: absent + register: subnets + ignore_errors: yes + retries: 30 + until: subnets is not failed + delay: 10 + loop: + - {cidr: 10.122.122.128/28, zone: '{{ aws_region }}a'} + - {cidr: 10.122.122.144/28, zone: '{{ aws_region }}b'} + - {cidr: 10.122.122.160/28, zone: '{{ aws_region }}c'} + - {cidr: 10.122.122.176/28, zone: '{{ aws_region }}d'} + + - name: Delete VPC + ec2_vpc_net: + name: '{{ resource_prefix }}-vpc' + state: absent + cidr_block: 10.122.122.128/26 + tags: + Name: '{{ resource_prefix }}-vpc' + Description: created by rds_instance integration tests + register: vpc_result + ignore_errors: yes + retries: 30 + until: vpc_result is not failed + delay: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/aliases new file mode 100644 index 000000000..df5ff67a2 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/aliases @@ -0,0 +1,5 @@ +time=25m + +cloud/aws + +rds_snapshot_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml new file mode 100644 index 000000000..b480137fc --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml @@ -0,0 +1,14 @@ +--- +# defaults file for rds_instance_snapshot + +# Create RDS instance +instance_id: '{{ resource_prefix }}-instance' +username: 'testrdsusername' +password: "{{ lookup('password', '/dev/null') }}" +db_instance_class: db.t3.micro +allocated_storage: 10 +engine: 'mariadb' +mariadb_engine_version: 10.6.10 + +# Create snapshot +snapshot_id: '{{ instance_id }}-snapshot' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/tasks/main.yml new file mode 100644 index 000000000..c639291a5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/tasks/main.yml @@ -0,0 +1,505 @@ +--- +- module_defaults: + group/aws: + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + collections: + - community.aws + - amazon.aws + + block: + - name: Create a source mariadb instance + rds_instance: + id: "{{ instance_id }}" + state: present + engine: "{{ engine}}" + engine_version: "{{ mariadb_engine_version }}" + allow_major_version_upgrade: true + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + register: _result_create_instance + + - assert: + that: + - _result_create_instance.changed + - _result_create_instance.db_instance_identifier == "{{ instance_id }}" + + - name: Get all RDS snapshots for the existing instance + rds_snapshot_info: + db_instance_identifier: "{{ instance_id }}" + register: _result_instance_snapshot_info + + - assert: + that: + - _result_instance_snapshot_info is successful + - _result_instance_snapshot_info.snapshots | length == 1 + + - name: Take a snapshot of the existing RDS instance (CHECK_MODE) + rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}" + check_mode: yes + register: _result_instance_snapshot + + - assert: + that: + - _result_instance_snapshot.changed + + - name: Take a snapshot of the existing RDS instance + rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}" + wait: true + register: _result_instance_snapshot + + - assert: + that: + - _result_instance_snapshot.changed + - "'availability_zone' in _result_instance_snapshot" + - "'instance_create_time' in _result_instance_snapshot" + - "'db_instance_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}" + - "'db_snapshot_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}" + - "'db_snapshot_arn' in _result_instance_snapshot" + - "'dbi_resource_id' in _result_instance_snapshot" + - "'encrypted' in _result_instance_snapshot" + - "'engine' in _result_instance_snapshot" + - _result_instance_snapshot.engine == "{{ engine }}" + - "'engine_version' in _result_instance_snapshot" + - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}" + - "'iam_database_authentication_enabled' in _result_instance_snapshot" + - "'license_model' in _result_instance_snapshot" + - "'master_username' in _result_instance_snapshot" + - _result_instance_snapshot.master_username == "{{ username }}" + - "'snapshot_create_time' in _result_instance_snapshot" + - "'snapshot_type' in _result_instance_snapshot" + - "'status' in _result_instance_snapshot" + - _result_instance_snapshot.status == "available" + - "'snapshot_type' in _result_instance_snapshot" + - _result_instance_snapshot.snapshot_type == "manual" + - "'status' in _result_instance_snapshot" + - "'storage_type' in _result_instance_snapshot" + - _result_instance_snapshot.storage_type == "gp2" + - "'tags' in _result_instance_snapshot" + - "'vpc_id' in _result_instance_snapshot" + + - name: Take a snapshot of the existing RDS instance (CHECK_MODE - IDEMPOTENCE) + rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}" + check_mode: yes + register: _result_instance_snapshot + + - assert: + that: + - not _result_instance_snapshot.changed + + - name: Take a snapshot of the existing RDS instance (IDEMPOTENCE) + rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}" + wait: true + register: _result_instance_snapshot + + - assert: + that: + - not _result_instance_snapshot.changed + - "'availability_zone' in _result_instance_snapshot" + - "'instance_create_time' in _result_instance_snapshot" + - "'db_instance_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}" + - "'db_snapshot_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}" + - "'db_snapshot_arn' in _result_instance_snapshot" + - "'dbi_resource_id' in _result_instance_snapshot" + - "'encrypted' in _result_instance_snapshot" + - "'engine' in _result_instance_snapshot" + - _result_instance_snapshot.engine == "{{ engine }}" + - "'engine_version' in _result_instance_snapshot" + - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}" + - "'iam_database_authentication_enabled' in _result_instance_snapshot" + - "'license_model' in _result_instance_snapshot" + - "'master_username' in _result_instance_snapshot" + - _result_instance_snapshot.master_username == "{{ username }}" + - "'snapshot_create_time' in _result_instance_snapshot" + - "'snapshot_type' in _result_instance_snapshot" + - "'status' in _result_instance_snapshot" + - _result_instance_snapshot.status == "available" + - "'snapshot_type' in _result_instance_snapshot" + - _result_instance_snapshot.snapshot_type == "manual" + - "'status' in _result_instance_snapshot" + - "'storage_type' in _result_instance_snapshot" + - _result_instance_snapshot.storage_type == "gp2" + - "'tags' in _result_instance_snapshot" + - "'vpc_id' in _result_instance_snapshot" + + - name: Get information about the existing DB snapshot + rds_snapshot_info: + db_snapshot_identifier: "{{ snapshot_id }}" + register: _result_instance_snapshot_info + + - assert: + that: + - _result_instance_snapshot_info is successful + - _result_instance_snapshot_info.snapshots[0].db_instance_identifier == "{{ instance_id }}" + - _result_instance_snapshot_info.snapshots[0].db_snapshot_identifier == "{{ snapshot_id }}" + + - name: Take another snapshot of the existing RDS instance + rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}-b" + wait: true + register: _result_instance_snapshot + + - assert: + that: + - _result_instance_snapshot.changed + - "'availability_zone' in _result_instance_snapshot" + - "'instance_create_time' in _result_instance_snapshot" + - "'db_instance_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}" + - "'db_snapshot_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-b" + - "'db_snapshot_arn' in _result_instance_snapshot" + - "'dbi_resource_id' in _result_instance_snapshot" + - "'encrypted' in _result_instance_snapshot" + - "'engine' in _result_instance_snapshot" + - _result_instance_snapshot.engine == "{{ engine }}" + - "'engine_version' in _result_instance_snapshot" + - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}" + - "'iam_database_authentication_enabled' in _result_instance_snapshot" + - "'license_model' in _result_instance_snapshot" + - "'master_username' in _result_instance_snapshot" + - _result_instance_snapshot.master_username == "{{ username }}" + - "'snapshot_create_time' in _result_instance_snapshot" + - "'snapshot_type' in _result_instance_snapshot" + - "'status' in _result_instance_snapshot" + - _result_instance_snapshot.status == "available" + - "'snapshot_type' in _result_instance_snapshot" + - _result_instance_snapshot.snapshot_type == "manual" + - "'status' in _result_instance_snapshot" + - "'storage_type' in _result_instance_snapshot" + - _result_instance_snapshot.storage_type == "gp2" + - "'tags' in _result_instance_snapshot" + - "'vpc_id' in _result_instance_snapshot" + + - name: Get all snapshots for the existing RDS instance + rds_snapshot_info: + db_instance_identifier: "{{ instance_id }}" + register: _result_instance_snapshot_info + + - assert: + that: + - _result_instance_snapshot_info is successful + #- _result_instance_snapshot_info.cluster_snapshots | length == 3 + + - name: Delete existing DB instance snapshot (CHECK_MODE) + rds_instance_snapshot: + state: absent + db_snapshot_identifier: "{{ snapshot_id }}-b" + register: _result_delete_snapshot + check_mode: yes + + - assert: + that: + - _result_delete_snapshot.changed + + - name: Delete the existing DB instance snapshot + rds_instance_snapshot: + state: absent + db_snapshot_identifier: "{{ snapshot_id }}-b" + register: _result_delete_snapshot + + - assert: + that: + - _result_delete_snapshot.changed + + - name: Delete existing DB instance snapshot (CHECK_MODE - IDEMPOTENCE) + rds_instance_snapshot: + state: absent + db_snapshot_identifier: "{{ snapshot_id }}-b" + register: _result_delete_snapshot + check_mode: yes + + - assert: + that: + - not _result_delete_snapshot.changed + + - name: Delete the existing DB instance snapshot (IDEMPOTENCE) + rds_instance_snapshot: + state: absent + db_snapshot_identifier: "{{ snapshot_id }}-b" + register: _result_delete_snapshot + + - assert: + that: + - not _result_delete_snapshot.changed + + - name: Take another snapshot of the existing RDS instance and assign tags + rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}-b" + wait: true + tags: + tag_one: '{{ snapshot_id }}-b One' + "Tag Two": 'two {{ snapshot_id }}-b' + register: _result_instance_snapshot + + - assert: + that: + - _result_instance_snapshot.changed + - "'availability_zone' in _result_instance_snapshot" + - "'instance_create_time' in _result_instance_snapshot" + - "'db_instance_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}" + - "'db_snapshot_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-b" + - "'db_snapshot_arn' in _result_instance_snapshot" + - "'dbi_resource_id' in _result_instance_snapshot" + - "'encrypted' in _result_instance_snapshot" + - "'engine' in _result_instance_snapshot" + - _result_instance_snapshot.engine == "{{ engine }}" + - "'engine_version' in _result_instance_snapshot" + - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}" + - "'iam_database_authentication_enabled' in _result_instance_snapshot" + - "'license_model' in _result_instance_snapshot" + - "'master_username' in _result_instance_snapshot" + - _result_instance_snapshot.master_username == "{{ username }}" + - "'snapshot_create_time' in _result_instance_snapshot" + - "'snapshot_type' in _result_instance_snapshot" + - "'status' in _result_instance_snapshot" + - _result_instance_snapshot.status == "available" + - "'snapshot_type' in _result_instance_snapshot" + - _result_instance_snapshot.snapshot_type == "manual" + - "'status' in _result_instance_snapshot" + - "'storage_type' in _result_instance_snapshot" + - _result_instance_snapshot.storage_type == "gp2" + - "'tags' in _result_instance_snapshot" + - _result_instance_snapshot.tags | length == 2 + - _result_instance_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One" + - _result_instance_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" + - "'vpc_id' in _result_instance_snapshot" + + - name: Attempt to take another snapshot of the existing RDS instance and assign tags (idempotence) + rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}-b" + wait: true + tags: + tag_one: '{{ snapshot_id }}-b One' + "Tag Two": 'two {{ snapshot_id }}-b' + register: _result_instance_snapshot + + - assert: + that: + - not _result_instance_snapshot.changed + + - name: Take another snapshot of the existing RDS instance and update tags + rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}-b" + tags: + tag_three: '{{ snapshot_id }}-b Three' + "Tag Two": 'two {{ snapshot_id }}-b' + register: _result_instance_snapshot + + - assert: + that: + - _result_instance_snapshot.changed + - "'availability_zone' in _result_instance_snapshot" + - "'instance_create_time' in _result_instance_snapshot" + - "'db_instance_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}" + - "'db_snapshot_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-b" + - "'db_snapshot_arn' in _result_instance_snapshot" + - "'dbi_resource_id' in _result_instance_snapshot" + - "'encrypted' in _result_instance_snapshot" + - "'engine' in _result_instance_snapshot" + - _result_instance_snapshot.engine == "{{ engine }}" + - "'engine_version' in _result_instance_snapshot" + - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}" + - "'iam_database_authentication_enabled' in _result_instance_snapshot" + - "'license_model' in _result_instance_snapshot" + - "'master_username' in _result_instance_snapshot" + - _result_instance_snapshot.master_username == "{{ username }}" + - "'snapshot_create_time' in _result_instance_snapshot" + - "'snapshot_type' in _result_instance_snapshot" + - "'status' in _result_instance_snapshot" + - _result_instance_snapshot.status == "available" + - "'snapshot_type' in _result_instance_snapshot" + - _result_instance_snapshot.snapshot_type == "manual" + - "'status' in _result_instance_snapshot" + - "'storage_type' in _result_instance_snapshot" + - _result_instance_snapshot.storage_type == "gp2" + - "'tags' in _result_instance_snapshot" + - _result_instance_snapshot.tags | length == 2 + - _result_instance_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three" + - _result_instance_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" + - "'vpc_id' in _result_instance_snapshot" + + - name: Take another snapshot of the existing RDS instance and update tags without purge + rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}-b" + purge_tags: no + tags: + tag_one: '{{ snapshot_id }}-b One' + register: _result_instance_snapshot + + - assert: + that: + - _result_instance_snapshot.changed + - "'availability_zone' in _result_instance_snapshot" + - "'instance_create_time' in _result_instance_snapshot" + - "'db_instance_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}" + - "'db_snapshot_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-b" + - "'db_snapshot_arn' in _result_instance_snapshot" + - "'dbi_resource_id' in _result_instance_snapshot" + - "'encrypted' in _result_instance_snapshot" + - "'engine' in _result_instance_snapshot" + - _result_instance_snapshot.engine == "{{ engine }}" + - "'engine_version' in _result_instance_snapshot" + - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}" + - "'iam_database_authentication_enabled' in _result_instance_snapshot" + - "'license_model' in _result_instance_snapshot" + - "'master_username' in _result_instance_snapshot" + - _result_instance_snapshot.master_username == "{{ username }}" + - "'snapshot_create_time' in _result_instance_snapshot" + - "'snapshot_type' in _result_instance_snapshot" + - "'status' in _result_instance_snapshot" + - _result_instance_snapshot.status == "available" + - "'snapshot_type' in _result_instance_snapshot" + - _result_instance_snapshot.snapshot_type == "manual" + - "'status' in _result_instance_snapshot" + - "'storage_type' in _result_instance_snapshot" + - _result_instance_snapshot.storage_type == "gp2" + - "'tags' in _result_instance_snapshot" + - _result_instance_snapshot.tags | length == 3 + - _result_instance_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One" + - _result_instance_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" + - _result_instance_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three" + - "'vpc_id' in _result_instance_snapshot" + + - name: Take another snapshot of the existing RDS instance and do not specify any tag to ensure previous tags are not removed + rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}-b" + register: _result_instance_snapshot + + - assert: + that: + - not _result_instance_snapshot.changed + + # ------------------------------------------------------------------------------------------ + # Test copying a snapshot + ### Note - copying a snapshot from a different region is supported, but not in CI runs, + ### because the aws-terminator only terminates resources in one region. + + - set_fact: + _snapshot_arn: "{{ _result_instance_snapshot.db_snapshot_arn }}" + + - name: Copy a snapshot (check mode) + rds_instance_snapshot: + id: "{{ snapshot_id }}-copy" + source_id: "{{ snapshot_id }}-b" + copy_tags: yes + wait: true + register: _result_instance_snapshot + check_mode: yes + + - assert: + that: + - _result_instance_snapshot.changed + + - name: Copy a snapshot + rds_instance_snapshot: + id: "{{ snapshot_id }}-copy" + source_id: "{{ snapshot_id }}-b" + copy_tags: yes + wait: true + register: _result_instance_snapshot + + - assert: + that: + - _result_instance_snapshot.changed + - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}" + - _result_instance_snapshot.source_db_snapshot_identifier == "{{ _snapshot_arn }}" + - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-copy" + - "'tags' in _result_instance_snapshot" + - _result_instance_snapshot.tags | length == 3 + - _result_instance_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One" + - _result_instance_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" + - _result_instance_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three" + + - name: Copy a snapshot (idempotence - check mode) + rds_instance_snapshot: + id: "{{ snapshot_id }}-copy" + source_id: "{{ snapshot_id }}-b" + copy_tags: yes + wait: true + register: _result_instance_snapshot + check_mode: yes + + - assert: + that: + - not _result_instance_snapshot.changed + + - name: Copy a snapshot (idempotence) + rds_instance_snapshot: + id: "{{ snapshot_id }}-copy" + source_id: "{{ snapshot_id }}-b" + copy_tags: yes + wait: true + register: _result_instance_snapshot + + - assert: + that: + - not _result_instance_snapshot.changed + - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}" + - _result_instance_snapshot.source_db_snapshot_identifier == "{{ _snapshot_arn }}" + - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-copy" + - "'tags' in _result_instance_snapshot" + - _result_instance_snapshot.tags | length == 3 + - _result_instance_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One" + - _result_instance_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" + - _result_instance_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three" + + always: + - name: Delete the existing DB instance snapshots + rds_instance_snapshot: + state: absent + db_snapshot_identifier: "{{ item }}" + wait: false + register: _result_delete_snapshot + ignore_errors: true + loop: + - "{{ snapshot_id }}" + - "{{ snapshot_id }}-b" + - "{{ snapshot_id }}-copy" + + - name: Delete the existing RDS instance without creating a final snapshot + rds_instance: + state: absent + instance_id: "{{ instance_id }}" + skip_final_snapshot: True + wait: false + register: _result_delete_instance + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/vars/main.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/vars/main.yml @@ -0,0 +1 @@ +--- diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/aliases new file mode 100644 index 000000000..c4ee3f5a6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/aliases @@ -0,0 +1,4 @@ +cloud/aws +rds_instance_info +time=30m +rds_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/defaults/main.yml new file mode 100644 index 000000000..d2ebe7f18 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/defaults/main.yml @@ -0,0 +1,9 @@ +instance_id: ansible-test-{{ tiny_prefix }} +modified_instance_id: '{{ instance_id }}-updated' +username: test +password: test12345678 +db_instance_class: db.t3.micro +allocated_storage: 20 + +# For snapshot tests +snapshot_id: '{{ instance_id }}-ss' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/tasks/main.yml new file mode 100644 index 000000000..f8ac5d5f9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/tasks/main.yml @@ -0,0 +1,224 @@ +- name: rds_instance / snapshot_mgmt integration tests + collections: + - community.aws + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + - name: Ensure the resource doesn't exist + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create a mariadb instance + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + tags: + Name: '{{ instance_id }}' + Created_by: Ansible rds_instance tests + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == '{{ instance_id }}' + - result.tags | length == 2 + - result.tags.Name == '{{ instance_id }}' + - result.tags.Created_by == 'Ansible rds_instance tests' + + - name: Create a snapshot + rds_instance_snapshot: + instance_id: '{{ instance_id }}' + snapshot_id: '{{ snapshot_id }}' + state: present + wait: yes + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == "{{ instance_id }}" + - result.db_snapshot_identifier == "{{ snapshot_id }}" + + # ------------------------------------------------------------------------------------------ + # Test restoring db from snapshot + + - name: Restore DB from snapshot - check_mode + rds_instance: + id: '{{ snapshot_id }}' + creation_source: snapshot + snapshot_identifier: '{{ snapshot_id }}' + engine: mariadb + state: present + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Restore DB from snapshot + rds_instance: + id: '{{ snapshot_id }}' + creation_source: snapshot + snapshot_identifier: '{{ snapshot_id }}' + engine: mariadb + state: present + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == "{{ snapshot_id }}" + - result.tags | length == 2 + - result.tags.Name == "{{ instance_id }}" + - result.tags.Created_by == 'Ansible rds_instance tests' + - result.db_instance_status == 'available' + + - name: Restore DB from snapshot (idempotence) - check_mode + rds_instance: + id: '{{ snapshot_id }}' + creation_source: snapshot + snapshot_identifier: '{{ snapshot_id }}' + engine: mariadb + state: present + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Restore DB from snapshot (idempotence) + rds_instance: + id: '{{ snapshot_id }}' + creation_source: snapshot + snapshot_identifier: '{{ snapshot_id }}' + engine: mariadb + state: present + register: result + + - assert: + that: + - not result.changed + - result.db_instance_identifier == "{{ snapshot_id }}" + - result.tags | length == 2 + - result.tags.Name == "{{ instance_id }}" + - result.tags.Created_by == 'Ansible rds_instance tests' + - result.db_instance_status == 'available' + + # ------------------------------------------------------------------------------------------ + # Test final snapshot on deletion + + - name: Ensure instance exists prior to deleting + rds_instance_info: + db_instance_identifier: '{{ instance_id }}' + register: db_info + + - assert: + that: + - db_info.instances | length == 1 + + - name: Delete the instance keeping snapshot - check_mode + rds_instance: + id: '{{ instance_id }}' + state: absent + final_snapshot_identifier: '{{ instance_id }}' + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Delete the instance keeping snapshot + rds_instance: + id: '{{ instance_id }}' + state: absent + final_snapshot_identifier: '{{ instance_id }}' + register: result + + - assert: + that: + - result.changed + - result.final_snapshot.db_instance_identifier == '{{ instance_id }}' + + - name: Check that snapshot exists + rds_snapshot_info: + db_snapshot_identifier: '{{ instance_id }}' + register: result + + - assert: + that: + - result.snapshots | length == 1 + - result.snapshots.0.engine == 'mariadb' + + - name: Ensure instance was deleted + rds_instance_info: + db_instance_identifier: '{{ instance_id }}' + register: db_info + + - assert: + that: + - db_info.instances | length == 0 + + - name: Delete the instance (idempotence) - check_mode + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Delete the instance (idempotence) + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + + - assert: + that: + - not result.changed + + always: + - name: Remove snapshots + rds_instance_snapshot: + db_snapshot_identifier: '{{ item }}' + state: absent + wait: false + ignore_errors: yes + with_items: + - '{{ instance_id }}' + - '{{ snapshot_id }}' + + - name: Remove DB instances + rds_instance: + id: '{{ item }}' + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: yes + with_items: + - '{{ instance_id }}' + - '{{ snapshot_id }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/aliases new file mode 100644 index 000000000..c4ee3f5a6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/aliases @@ -0,0 +1,4 @@ +cloud/aws +rds_instance_info +time=30m +rds_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/defaults/main.yml new file mode 100644 index 000000000..5540ffb89 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/defaults/main.yml @@ -0,0 +1,5 @@ +instance_id: ansible-test-{{ tiny_prefix }} +username: test +password: test12345678 +db_instance_class: db.t3.micro +allocated_storage: 20 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/tasks/main.yml new file mode 100644 index 000000000..fdcfcbf8a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/tasks/main.yml @@ -0,0 +1,320 @@ +- name: rds_instance / states integration tests + collections: + - community.aws + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + - name: Ensure the resource doesn't exist + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create a mariadb instance - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + deletion_protection: true + tags: + Name: '{{ instance_id }}' + Created_by: Ansible rds_instance tests + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Create a mariadb instance + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + deletion_protection: true + tags: + Name: '{{ instance_id }}' + Created_by: Ansible rds_instance tests + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == '{{ instance_id }}' + - result.tags | length == 2 + - result.tags.Name == '{{ instance_id }}' + - result.tags.Created_by == 'Ansible rds_instance tests' + - result.deletion_protection == True + + - name: Create a mariadb instance (idempotence) - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + deletion_protection: true + tags: + Name: '{{ instance_id }}' + Created_by: Ansible rds_instance tests + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Create a mariadb instance (idempotence) + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + deletion_protection: true + tags: + Name: '{{ instance_id }}' + Created_by: Ansible rds_instance tests + register: result + + - assert: + that: + - not result.changed + - result.db_instance_identifier == '{{ instance_id }}' + - result.tags | length == 2 + - result.tags.Name == '{{ instance_id }}' + - result.tags.Created_by == 'Ansible rds_instance tests' + - result.deletion_protection == True + + # ------------------------------------------------------------------------------------------ + # Test stopping / rebooting instances + + - name: Reboot a stopped instance - check_mode + rds_instance: + id: '{{ instance_id }}' + state: rebooted + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Reboot a stopped instance + rds_instance: + id: '{{ instance_id }}' + state: rebooted + register: result + + - assert: + that: + - result.changed + + # ------------------------------------------------------------------------------------------ + + - name: Stop the instance - check_mode + rds_instance: + id: '{{ instance_id }}' + state: stopped + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Stop the instance + rds_instance: + id: '{{ instance_id }}' + state: stopped + register: result + + - assert: + that: + - result.changed + + - name: Stop the instance (idempotence) - check_mode + rds_instance: + id: '{{ instance_id }}' + state: stopped + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Stop the instance (idempotence) + rds_instance: + id: '{{ instance_id }}' + state: stopped + register: result + + - assert: + that: + - not result.changed + + # ------------------------------------------------------------------------------------------ + + - name: Start the instance - check_mode + rds_instance: + id: '{{ instance_id }}' + state: started + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Start the instance + rds_instance: + id: '{{ instance_id }}' + state: started + register: result + + - assert: + that: + - result.changed + + - name: Start the instance (idempotence) - check_mode + rds_instance: + id: '{{ instance_id }}' + state: started + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Start the instance (idempotence) + rds_instance: + id: '{{ instance_id }}' + state: started + register: result + + - assert: + that: + - not result.changed + + # ------------------------------------------------------------------------------------------ + # Test deletion protection / deletion + + - name: Ensure instance exists prior to deleting + rds_instance_info: + db_instance_identifier: '{{ instance_id }}' + register: db_info + + - assert: + that: + - db_info.instances | length == 1 + + - name: Attempt to delete DB instance with deletion protection (should fail) + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + ignore_errors: yes + + - assert: + that: + - result.failed + + - name: Turn off deletion protection + rds_instance: + id: '{{ instance_id }}' + deletion_protection: false + register: result + + - assert: + that: + - result.changed + - result.deletion_protection == False + + - name: Delete the instance - check_mode + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Delete the instance + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + + - assert: + that: + - result.changed + + - name: Ensure instance was deleted + rds_instance_info: + db_instance_identifier: '{{ instance_id }}' + register: db_info + + - assert: + that: + - db_info.instances | length == 0 + + - name: Delete the instance (idempotence) - check_mode + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Delete the instance (idempotence) + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + + - assert: + that: + - not result.changed + + always: + - name: Remove DB instance + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/aliases new file mode 100644 index 000000000..777fbe40a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/aliases @@ -0,0 +1,3 @@ +cloud/aws +time=15m +rds_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/defaults/main.yml new file mode 100644 index 000000000..d9fb41aa7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/defaults/main.yml @@ -0,0 +1,7 @@ +instance_id: ansible-test-{{ tiny_prefix }} +instance_id_gp3: ansible-test-{{ tiny_prefix }}-gp3 +modified_instance_id: '{{ instance_id }}-updated' +username: test +password: test12345678 +db_instance_class: db.t3.micro +allocated_storage: 20 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/main.yml new file mode 100644 index 000000000..14c1872d6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/main.yml @@ -0,0 +1,202 @@ +- name: rds_instance / tagging integration tests + collections: + - community.aws + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + - name: Test tagging db with storage type gp3 + import_tasks: test_tagging_gp3.yml + + - name: Ensure the resource doesn't exist + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + # Test invalid bad options + - name: Create a DB instance with an invalid engine + rds_instance: + id: '{{ instance_id }}' + state: present + engine: thisisnotavalidengine + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + register: result + ignore_errors: true + + - assert: + that: + - result.failed + - '"value of engine must be one of" in result.msg' + + # Test creation, adding tags and enabling encryption + - name: Create a mariadb instance + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + storage_encrypted: true + tags: + Name: '{{ instance_id }}' + Created_by: Ansible rds_instance tests + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == '{{ instance_id }}' + - result.tags | length == 2 + - result.tags.Name == '{{ instance_id }}' + - result.tags.Created_by == 'Ansible rds_instance tests' + - result.kms_key_id + - result.storage_encrypted == true + + - name: Test impotency omitting tags - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Test impotency omitting tags + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + register: result + + - assert: + that: + - not result.changed + - result.db_instance_identifier + - result.tags | length == 2 + + - name: Idempotence with minimal options + rds_instance: + id: '{{ instance_id }}' + state: present + register: result + + - assert: + that: + - not result.changed + - result.db_instance_identifier + - result.tags | length == 2 + + - name: Test tags are not purged if purge_tags is False + rds_instance: + db_instance_identifier: '{{ instance_id }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + tags: {} + purge_tags: false + register: result + + - assert: + that: + - not result.changed + - result.tags | length == 2 + + - name: Add a tag and remove a tag - check_mode + rds_instance: + db_instance_identifier: '{{ instance_id }}' + state: present + tags: + Name: '{{ instance_id }}-new' + Created_by: Ansible rds_instance tests + purge_tags: true + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Add a tag and remove a tag + rds_instance: + db_instance_identifier: '{{ instance_id }}' + state: present + tags: + Name: '{{ instance_id }}-new' + Created_by: Ansible rds_instance tests + purge_tags: true + register: result + + - assert: + that: + - result.changed + - result.tags | length == 2 + - result.tags.Name == '{{ instance_id }}-new' + + - name: Add a tag and remove a tag (idempotence) - check_mode + rds_instance: + db_instance_identifier: '{{ instance_id }}' + state: present + tags: + Name: '{{ instance_id }}-new' + Created_by: Ansible rds_instance tests + purge_tags: true + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Add a tag and remove a tag (idempotence) + rds_instance: + db_instance_identifier: '{{ instance_id }}' + state: present + tags: + Name: '{{ instance_id }}-new' + Created_by: Ansible rds_instance tests + purge_tags: true + register: result + + - assert: + that: + - not result.changed + - result.tags | length == 2 + - result.tags.Name == '{{ instance_id }}-new' + + always: + - name: Remove DB instance + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/test_tagging_gp3.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/test_tagging_gp3.yml new file mode 100644 index 000000000..5d4e6c883 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/test_tagging_gp3.yml @@ -0,0 +1,190 @@ +- block: + - name: Ensure the resource doesn't exist + rds_instance: + id: '{{ instance_id_gp3 }}' + state: absent + skip_final_snapshot: true + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + # Test invalid bad options + - name: Create a DB instance with an invalid engine + rds_instance: + id: '{{ instance_id_gp3 }}' + state: present + engine: thisisnotavalidengine + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + register: result + ignore_errors: true + + - assert: + that: + - result.failed + - '"value of engine must be one of" in result.msg' + + # Test creation, adding tags and enabling encryption + - name: Create a mariadb instance + rds_instance: + id: '{{ instance_id_gp3 }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + storage_encrypted: true + tags: + Name: '{{ instance_id_gp3 }}' + Created_by: Ansible rds_instance tests + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == '{{ instance_id_gp3 }}' + - result.tags | length == 2 + - result.tags.Name == '{{ instance_id_gp3 }}' + - result.tags.Created_by == 'Ansible rds_instance tests' + - result.kms_key_id + - result.storage_encrypted == true + + - name: Test idempotency omitting tags - check_mode + rds_instance: + id: '{{ instance_id_gp3 }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Test idempotency omitting tags + rds_instance: + id: '{{ instance_id_gp3 }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + register: result + + - assert: + that: + - not result.changed + - result.db_instance_identifier == '{{ instance_id_gp3 }}' + - result.tags | length == 2 + + - name: Idempotence with minimal options + rds_instance: + id: '{{ instance_id_gp3 }}' + state: present + register: result + + - assert: + that: + - not result.changed + - result.db_instance_identifier == '{{ instance_id_gp3 }}' + - result.tags | length == 2 + + - name: Test tags are not purged if purge_tags is False + rds_instance: + db_instance_identifier: '{{ instance_id_gp3 }}' + state: present + engine: mariadb + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + tags: {} + purge_tags: false + register: result + + - assert: + that: + - not result.changed + - result.tags | length == 2 + + - name: Add a tag and remove a tag - check_mode + rds_instance: + db_instance_identifier: '{{ instance_id_gp3 }}' + state: present + tags: + Name: '{{ instance_id_gp3 }}-new' + Created_by: Ansible rds_instance tests + purge_tags: true + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Add a tag and remove a tag + rds_instance: + db_instance_identifier: '{{ instance_id_gp3 }}' + state: present + tags: + Name: '{{ instance_id_gp3 }}-new' + Created_by: Ansible rds_instance tests + purge_tags: true + register: result + + - assert: + that: + - result.changed + - result.tags | length == 2 + - result.tags.Name == '{{ instance_id_gp3 }}-new' + + - name: Add a tag and remove a tag (idempotence) - check_mode + rds_instance: + db_instance_identifier: '{{ instance_id_gp3 }}' + state: present + tags: + Name: '{{ instance_id_gp3 }}-new' + Created_by: Ansible rds_instance tests + purge_tags: true + register: result + check_mode: yes + + - assert: + that: + - not result.changed + + - name: Add a tag and remove a tag (idempotence) + rds_instance: + db_instance_identifier: '{{ instance_id_gp3 }}' + state: present + tags: + Name: '{{ instance_id_gp3 }}-new' + Created_by: Ansible rds_instance tests + purge_tags: true + register: result + + - assert: + that: + - not result.changed + - result.tags | length == 2 + - result.tags.Name == '{{ instance_id_gp3 }}-new' + + always: + - name: Remove DB instance + rds_instance: + id: '{{ instance_id_gp3 }}' + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/aliases new file mode 100644 index 000000000..777fbe40a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/aliases @@ -0,0 +1,3 @@ +cloud/aws +time=15m +rds_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/defaults/main.yml new file mode 100644 index 000000000..ff9bc3b47 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/defaults/main.yml @@ -0,0 +1,10 @@ +instance_id: ansible-test-{{ tiny_prefix }} +modified_instance_id: '{{ instance_id }}-updated' +username: test +password: test12345678 +db_instance_class: db.t3.micro +allocated_storage: 20 + +# For mariadb tests +mariadb_engine_version: 10.5.17 +mariadb_engine_version_2: 10.6.10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/tasks/main.yml new file mode 100644 index 000000000..5a2112543 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/tasks/main.yml @@ -0,0 +1,128 @@ +- name: rds_instance / upgrade integration tests + collections: + - community.aws + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + - name: Ensure the resource doesn't exist + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create a mariadb instance + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + engine_version: '{{ mariadb_engine_version }}' + allow_major_version_upgrade: true + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + register: result + + - assert: + that: + - result.changed + - result.db_instance_identifier == '{{ instance_id }}' + + # Test upgrade of DB instance + + - name: Upgrade a mariadb instance - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + engine_version: '{{ mariadb_engine_version_2 }}' + allow_major_version_upgrade: true + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + apply_immediately: true + register: result + check_mode: yes + + - assert: + that: + - result.changed + + - name: Upgrade a mariadb instance + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + engine_version: '{{ mariadb_engine_version_2 }}' + allow_major_version_upgrade: true + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + apply_immediately: true + register: result + + - assert: + that: + - result.changed + - '"engine_version" in result.pending_modified_values or result.engine_version + == mariadb_engine_version_2' + + - name: Idempotence upgrading a mariadb instance - check_mode + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + engine_version: '{{ mariadb_engine_version_2 }}' + allow_major_version_upgrade: true + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + register: result + check_mode: yes + + ### Specifying allow_major_version_upgrade with check_mode will always result in changed=True + ### since it's not returned in describe_db_instances api call + # - assert: + # that: + # - not result.changed + + - name: Idempotence upgrading a mariadb instance + rds_instance: + id: '{{ instance_id }}' + state: present + engine: mariadb + engine_version: '{{ mariadb_engine_version_2 }}' + allow_major_version_upgrade: true + username: '{{ username }}' + password: '{{ password }}' + db_instance_class: '{{ db_instance_class }}' + allocated_storage: '{{ allocated_storage }}' + register: result + + - assert: + that: + - not result.changed + - '"engine_version" in result.pending_modified_values or result.engine_version + == mariadb_engine_version_2' + + always: + - name: Delete the instance + rds_instance: + id: '{{ instance_id }}' + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/aliases new file mode 100644 index 000000000..658684afb --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/aliases @@ -0,0 +1,3 @@ +cloud/aws + +rds_option_group_info \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml new file mode 100644 index 000000000..d99a37964 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml @@ -0,0 +1,17 @@ +option_group_name: '{{ resource_prefix }}rds-option-group' +engine_name: mysql +major_engine_version: 5.6 +option_group_description: '{{ resource_prefix }}rds-option-group test' +instance_id: '{{ resource_prefix }}' +username: test +password: test12345678 +db_instance_class: db.t2.small +storage_encrypted_db_instance_class: db.t2.small +allocated_storage: 20 +vpc_name: '{{ resource_prefix }}-vpc' +vpc_seed: '{{ resource_prefix }}' +vpc_cidr: 10.0.0.0/16 +subnet_cidr: 10.0.{{ 256 | random(seed=vpc_seed) }}.0/24 +sg_1_name: '{{ resource_prefix }}-sg-1' +sg_2_name: '{{ resource_prefix }}-sg-2' +sg_3_name: '{{ resource_prefix }}-sg-3' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/tasks/main.yml new file mode 100644 index 000000000..72981cd63 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/tasks/main.yml @@ -0,0 +1,948 @@ +- name: rds_option_group tests + module_defaults: + group/aws: + region: '{{ aws_region }}' + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + + + block: + - name: create a VPC + ec2_vpc_net: + name: '{{ vpc_name }}' + state: present + cidr_block: '{{ vpc_cidr }}' + register: vpc_result + + - name: Assert success + assert: + that: + - vpc_result is successful + - '"vpc" in vpc_result' + - '"cidr_block" in vpc_result.vpc' + - vpc_result.vpc.cidr_block == vpc_cidr + - '"id" in vpc_result.vpc' + - vpc_result.vpc.id.startswith("vpc-") + - '"state" in vpc_result.vpc' + - vpc_result.vpc.state == 'available' + - '"tags" in vpc_result.vpc' + + - name: 'set fact: VPC ID' + set_fact: + vpc_id: '{{ vpc_result.vpc.id }}' + + - name: create subnet + ec2_vpc_subnet: + cidr: '{{ subnet_cidr}}' + vpc_id: '{{ vpc_id }}' + state: present + register: subnet_result + + - name: Assert success + assert: + that: + - subnet_result is successful + - '"subnet" in subnet_result' + - '"cidr_block" in subnet_result.subnet' + - subnet_result.subnet.cidr_block == subnet_cidr + - '"id" in subnet_result.subnet' + - subnet_result.subnet.id.startswith("subnet-") + - '"state" in subnet_result.subnet' + - subnet_result.subnet.state == 'available' + - '"tags" in subnet_result.subnet' + - subnet_result.subnet.vpc_id == vpc_id + + - name: 'set fact: VPC subnet ID' + set_fact: + subnet_id: '{{ subnet_result.subnet.id }}' + + + - name: Create security groups + ec2_group: + name: '{{ item }}' + description: created by rds_instance integration tests + state: present + vpc_id: '{{ vpc_id }}' + register: sgs_result + loop: + - '{{ sg_1_name }}' + - '{{ sg_2_name }}' + - '{{ sg_3_name }}' + + - name: Assert success + assert: + that: + - sgs_result is successful + + - name: 'set fact: security groups ID' + set_fact: + sg_1: '{{ sgs_result.results.0.group_id }}' + sg_2: '{{ sgs_result.results.1.group_id }}' + sg_3: '{{ sgs_result.results.2.group_id }}' + + + - name: List all the option groups - CHECK_MODE + rds_option_group_info: + register: option_groups_result + check_mode: true + + - name: Assert success - CHECK_MODE + assert: + that: + - option_groups_result is successful + + + - name: List all the option groups + rds_option_group_info: + register: option_groups_result + check_mode: true + + - name: Assert success + assert: + that: + - option_groups_result is successful + + - name: Create an RDS Mysql option group - CHECK_MODE + rds_option_group: + state: present + option_group_name: '{{ option_group_name }}' + engine_name: '{{ engine_name }}' + major_engine_version: '{{ major_engine_version }}' + option_group_description: '{{ option_group_description }}' + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - '{{ sg_1 }}' + option_settings: + - name: MAX_SIMULTANEOUS_CONNECTIONS + value: '20' + - name: CHUNK_SIZE_GROWTH_FACTOR + value: '1.25' + check_mode: true + register: new_rds_mysql_option_group + + - name: Assert success - CHECK_MODE + assert: + that: + - new_rds_mysql_option_group.changed + + + - name: Create an RDS Mysql option group + rds_option_group: + state: present + option_group_name: '{{ option_group_name }}' + engine_name: '{{ engine_name }}' + major_engine_version: '{{ major_engine_version }}' + option_group_description: '{{ option_group_description }}' + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - '{{ sg_1 }}' + option_settings: + - name: MAX_SIMULTANEOUS_CONNECTIONS + value: '20' + - name: CHUNK_SIZE_GROWTH_FACTOR + value: '1.25' + register: new_rds_mysql_option_group + + - assert: + that: + - new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" + - "'major_engine_version' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version + }}" + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == "{{ option_group_description + }}" + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 1 + - option.vpc_security_group_memberships[0].vpc_security_group_id == "{{ sg_1 + }}" + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','MAX_SIMULTANEOUS_CONNECTIONS') + | list | count > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') + | list | count > 0 + vars: + option: '{{ new_rds_mysql_option_group.options[0] }}' + option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' + + + - name: List specific option group + rds_option_group_info: + option_group_name: '{{ option_group_name }}' + register: option_groups_result + + - name: Assert success + assert: + that: + - option_groups_result is successful + - (option_groups_result.result | length) == 1 + - "'engine_name' in option_groups_list" + - option_groups_list.engine_name == "{{ engine_name }}" + - "'major_engine_version' in option_groups_list" + - option_groups_list.major_engine_version == "{{ major_engine_version }}" + - "'option_group_arn' in option_groups_list" + - "'option_group_description' in option_groups_list" + - option_groups_list.option_group_description == "{{ option_group_description + }}" + - "'option_group_name' in option_groups_list" + - option_groups_list.option_group_name == "{{ option_group_name }}" + - "'vpc_id' in option_groups_list" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'options' in option_groups_list" + - (option_groups_list.options | length) > 0 + - "'option_name' in options" + - options.option_name == "MEMCACHED" + - "'permanent' in options" + - "'persistent' in options" + - "'port' in options" + - options.port == 11211 + - "'vpc_security_group_memberships' in options" + - (options.vpc_security_group_memberships | length) == 1 + - options.vpc_security_group_memberships[0].vpc_security_group_id == "{{ sg_1 + }}" + - "'option_settings' in options" + - (options.option_settings | length) > 0 + vars: + option_groups_list: '{{ option_groups_result.result[0] }}' + options: '{{ option_groups_result.result[0].options[0] }}' + + + - name: Create an RDS Mysql option group (idempotency) - CHECK_MODE + rds_option_group: + state: present + option_group_name: '{{ option_group_name }}' + engine_name: '{{ engine_name }}' + major_engine_version: '{{ major_engine_version }}' + option_group_description: '{{ option_group_description }}' + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - '{{ sg_1 }}' + option_settings: + - name: MAX_SIMULTANEOUS_CONNECTIONS + value: '20' + - name: CHUNK_SIZE_GROWTH_FACTOR + value: '1.25' + check_mode: true + register: new_rds_mysql_option_group + + - name: Assert success - CHECK_MODE + assert: + that: + - not new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" + - "'major_engine_version' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version + }}" + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == "{{ option_group_description + }}" + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 1 + - option.vpc_security_group_memberships[0].vpc_security_group_id == "{{ sg_1 + }}" + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','MAX_SIMULTANEOUS_CONNECTIONS') + | list | count > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') + | list | count > 0 + vars: + option: '{{ new_rds_mysql_option_group.options[0] }}' + option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' + - name: Create an RDS Mysql option group (idempotency) + rds_option_group: + state: present + option_group_name: '{{ option_group_name }}' + engine_name: '{{ engine_name }}' + major_engine_version: '{{ major_engine_version }}' + option_group_description: '{{ option_group_description }}' + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - '{{ sg_1 }}' + option_settings: + - name: MAX_SIMULTANEOUS_CONNECTIONS + value: '20' + - name: CHUNK_SIZE_GROWTH_FACTOR + value: '1.25' + register: new_rds_mysql_option_group + + - assert: + that: + - not new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" + - "'major_engine_version' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version + }}" + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == "{{ option_group_description + }}" + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 1 + - option.vpc_security_group_memberships[0].vpc_security_group_id == "{{ sg_1 + }}" + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','MAX_SIMULTANEOUS_CONNECTIONS') + | list | count > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') + | list | count > 0 + vars: + option: '{{ new_rds_mysql_option_group.options[0] }}' + option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' + + + - name: List option groups with specific (engine_name and major_engine_version) + rds_option_group_info: + engine_name: '{{ engine_name }}' + major_engine_version: '{{ major_engine_version }}' + register: option_groups_result + + - name: Assert success + assert: + that: + - option_groups_result is successful + - (option_groups_result.result | length) > 0 + + + - name: Create an RDS Mysql option group - apply different changes (expected changed=true) + rds_option_group: + state: present + option_group_name: '{{ option_group_name }}' + engine_name: '{{ engine_name }}' + major_engine_version: '{{ major_engine_version }}' + option_group_description: '{{ option_group_description }}' + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - '{{ sg_1 }}' + - '{{ sg_2 }}' + - '{{ sg_3 }}' + option_settings: + - name: MAX_SIMULTANEOUS_CONNECTIONS + value: '30' + register: new_rds_mysql_option_group + + - assert: + that: + - new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" + - "'major_engine_version' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version + }}" + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == "{{ option_group_description + }}" + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 3 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') + | list | count > 0 + vars: + option: '{{ new_rds_mysql_option_group.options[0] }}' + option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' + - name: Get info about an option group - CHECK_MODE + rds_option_group_info: + option_group_name: '{{ option_group_name }}' + check_mode: true + register: option_groups_result + + - name: Assert success - CHECK_MODE + assert: + that: + - option_groups_result is successful + - (option_groups_result.result | length) == 1 + - "'engine_name' in option_groups_list" + - option_groups_list.engine_name == "{{ engine_name }}" + - "'major_engine_version' in option_groups_list" + - option_groups_list.major_engine_version == "{{ major_engine_version }}" + - "'option_group_arn' in option_groups_list" + - "'option_group_description' in option_groups_list" + - option_groups_list.option_group_description == "{{ option_group_description + }}" + - "'option_group_name' in option_groups_list" + - option_groups_list.option_group_name == "{{ option_group_name }}" + - "'vpc_id' in option_groups_list" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'options' in option_groups_list" + - (option_groups_list.options | length) > 0 + - "'option_name' in options" + - options.option_name == "MEMCACHED" + - "'permanent' in options" + - "'persistent' in options" + - "'port' in options" + - options.port == 11211 + - "'vpc_security_group_memberships' in options" + - (options.vpc_security_group_memberships | length) == 3 + - "'option_settings' in options" + - (options.option_settings | length) > 0 + vars: + option_groups_list: '{{ option_groups_result.result[0] }}' + options: '{{ option_groups_result.result[0].options[0] }}' + + + - name: RDS Mysql option group - apply tags - CHECK_MODE + rds_option_group: + state: present + option_group_name: '{{ option_group_name }}' + engine_name: '{{ engine_name }}' + major_engine_version: '{{ major_engine_version }}' + option_group_description: '{{ option_group_description }}' + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - '{{ sg_1 }}' + - '{{ sg_2 }}' + - '{{ sg_3 }}' + option_settings: + - name: CHUNK_SIZE_GROWTH_FACTOR + value: '1.2' + tags: + tag_one: '{{ option_group_name }} One' + Tag Two: two {{ option_group_name }} + check_mode: true + register: new_rds_mysql_option_group + + - name: Assert success - CHECK_MODE + assert: + that: + - new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" + - "'major_engine_version' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version + }}" + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == "{{ option_group_description + }}" + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'tags' in new_rds_mysql_option_group" + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 3 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') + | list | count > 0 + vars: + option: '{{ new_rds_mysql_option_group.options[0] }}' + option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' + - name: RDS Mysql option group - apply tags + rds_option_group: + state: present + option_group_name: '{{ option_group_name }}' + engine_name: '{{ engine_name }}' + major_engine_version: '{{ major_engine_version }}' + option_group_description: '{{ option_group_description }}' + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - '{{ sg_1 }}' + - '{{ sg_2 }}' + - '{{ sg_3 }}' + option_settings: + - name: CHUNK_SIZE_GROWTH_FACTOR + value: '1.2' + tags: + tag_one: '{{ option_group_name }} One' + Tag Two: two {{ option_group_name }} + register: new_rds_mysql_option_group + + - assert: + that: + - new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" + - "'major_engine_version' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version + }}" + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == "{{ option_group_description + }}" + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'tags' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.tags | length) == 2 + - new_rds_mysql_option_group.tags["tag_one"] == "{{ option_group_name }} One" + - new_rds_mysql_option_group.tags["Tag Two"] == "two {{ option_group_name }}" + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 3 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') + | list | count > 0 + vars: + option: '{{ new_rds_mysql_option_group.options[0] }}' + option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' + + + - name: RDS Mysql option group - apply tags (idempotency) + rds_option_group: + state: present + option_group_name: '{{ option_group_name }}' + engine_name: '{{ engine_name }}' + major_engine_version: '{{ major_engine_version }}' + option_group_description: '{{ option_group_description }}' + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - '{{ sg_1 }}' + - '{{ sg_2 }}' + - '{{ sg_3 }}' + option_settings: + - name: CHUNK_SIZE_GROWTH_FACTOR + value: '1.2' + tags: + tag_one: '{{ option_group_name }} One' + Tag Two: two {{ option_group_name }} + register: new_rds_mysql_option_group + + - assert: + that: + - not new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" + - "'major_engine_version' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version + }}" + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == "{{ option_group_description + }}" + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'tags' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.tags | length) == 2 + - new_rds_mysql_option_group.tags["tag_one"] == "{{ option_group_name }} One" + - new_rds_mysql_option_group.tags["Tag Two"] == "two {{ option_group_name }}" + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 3 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') + | list | count > 0 + vars: + option: '{{ new_rds_mysql_option_group.options[0] }}' + option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' + + + - name: RDS Mysql option group - update tags + rds_option_group: + state: present + option_group_name: '{{ option_group_name }}' + engine_name: '{{ engine_name }}' + major_engine_version: '{{ major_engine_version }}' + option_group_description: '{{ option_group_description }}' + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - '{{ sg_1 }}' + - '{{ sg_2 }}' + - '{{ sg_3 }}' + option_settings: + - name: CHUNK_SIZE_GROWTH_FACTOR + value: '1.2' + tags: + tag_three: '{{ option_group_name }} Three' + Tag Two: two {{ option_group_name }} + register: new_rds_mysql_option_group + + - assert: + that: + - new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" + - "'major_engine_version' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version + }}" + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == "{{ option_group_description + }}" + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'tags' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.tags | length) == 2 + - new_rds_mysql_option_group.tags["tag_three"] == "{{ option_group_name }} Three" + - new_rds_mysql_option_group.tags["Tag Two"] == "two {{ option_group_name }}" + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 3 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') + | list | count > 0 + vars: + option: '{{ new_rds_mysql_option_group.options[0] }}' + option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' + + + - name: RDS Mysql option group - update tags without purge (expected changed=true) + rds_option_group: + state: present + option_group_name: '{{ option_group_name }}' + engine_name: '{{ engine_name }}' + major_engine_version: '{{ major_engine_version }}' + option_group_description: '{{ option_group_description }}' + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - '{{ sg_1 }}' + - '{{ sg_2 }}' + - '{{ sg_3 }}' + option_settings: + - name: CHUNK_SIZE_GROWTH_FACTOR + value: '1.2' + purge_tags: no + tags: + tag_one: '{{ option_group_name }} One' + register: new_rds_mysql_option_group + + - assert: + that: + - new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" + - "'major_engine_version' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version + }}" + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == "{{ option_group_description + }}" + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'tags' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.tags | length) == 3 + - new_rds_mysql_option_group.tags["Tag Two"] == "two {{ option_group_name }}" + - new_rds_mysql_option_group.tags["tag_one"] == "{{ option_group_name }} One" + - new_rds_mysql_option_group.tags["tag_three"] == "{{ option_group_name }} Three" + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 3 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') + | list | count > 0 + vars: + option: '{{ new_rds_mysql_option_group.options[0] }}' + option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' + + + - name: RDS Mysql option group - update with CamelCase tags (expected changed=true) + rds_option_group: + state: present + option_group_name: '{{ option_group_name }}' + engine_name: '{{ engine_name }}' + major_engine_version: '{{ major_engine_version }}' + option_group_description: '{{ option_group_description }}' + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - '{{ sg_1 }}' + - '{{ sg_2 }}' + - '{{ sg_3 }}' + option_settings: + - name: CHUNK_SIZE_GROWTH_FACTOR + value: '1.2' + tags: + lowercase spaced: hello cruel world + Title Case: Hello Cruel World + CamelCase: SimpleCamelCase + snake_case: simple_snake_case + register: new_rds_mysql_option_group + + - assert: + that: + - new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" + - "'major_engine_version' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version + }}" + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == "{{ option_group_description + }}" + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'tags' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.tags | length) == 4 + - new_rds_mysql_option_group.tags["lowercase spaced"] == 'hello cruel world' + - new_rds_mysql_option_group.tags["Title Case"] == 'Hello Cruel World' + - new_rds_mysql_option_group.tags["CamelCase"] == 'SimpleCamelCase' + - new_rds_mysql_option_group.tags["snake_case"] == 'simple_snake_case' + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 3 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') + | list | count > 0 + vars: + option: '{{ new_rds_mysql_option_group.options[0] }}' + option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' + - name: RDS Mysql option group - do not specify any tag to ensure previous tags + are not removed + rds_option_group: + state: present + option_group_name: '{{ option_group_name }}' + engine_name: '{{ engine_name }}' + major_engine_version: '{{ major_engine_version }}' + option_group_description: '{{ option_group_description }}' + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - '{{ sg_1 }}' + - '{{ sg_2 }}' + - '{{ sg_3 }}' + option_settings: + - name: CHUNK_SIZE_GROWTH_FACTOR + value: '1.2' + register: new_rds_mysql_option_group + + - assert: + that: + - not new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" + - "'major_engine_version' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version + }}" + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == "{{ option_group_description + }}" + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'tags' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.tags | length) == 4 + - new_rds_mysql_option_group.tags["lowercase spaced"] == 'hello cruel world' + - new_rds_mysql_option_group.tags["Title Case"] == 'Hello Cruel World' + - new_rds_mysql_option_group.tags["CamelCase"] == 'SimpleCamelCase' + - new_rds_mysql_option_group.tags["snake_case"] == 'simple_snake_case' + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 3 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') + | list | count > 0 + vars: + option: '{{ new_rds_mysql_option_group.options[0] }}' + option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' + - name: Delete an RDS Mysql option group - CHECK_MODE + rds_option_group: + state: absent + option_group_name: '{{ option_group_name }}' + check_mode: yes + register: deleted_rds_mysql_option_group + + - name: Assert success - CHECK_MODE + assert: + that: + - deleted_rds_mysql_option_group.changed + + + - name: Delete an RDS Mysql option group + rds_option_group: + state: absent + option_group_name: '{{ option_group_name }}' + register: deleted_rds_mysql_option_group + + - name: Assert success + assert: + that: + - deleted_rds_mysql_option_group.changed + + + always: + + - name: Delete an RDS Mysql option group + rds_option_group: + state: absent + option_group_name: '{{ option_group_name }}' + register: deleted_rds_mysql_option_group + ignore_errors: yes + + - name: Remove security groups + ec2_group: + name: '{{ item }}' + description: created by rds_instance integration tests + state: absent + register: sgs_result + loop: + - '{{ sg_1_name }}' + - '{{ sg_2_name }}' + - '{{ sg_3_name }}' + ignore_errors: yes + + - name: remove subnet + ec2_vpc_subnet: + cidr: '{{ subnet_cidr }}' + vpc_id: '{{ vpc_id }}' + state: absent + ignore_errors: yes + + - name: Delete VPC + ec2_vpc_net: + name: '{{ vpc_name }}' + cidr_block: '{{ vpc_cidr }}' + state: absent + purge_cidrs: yes + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/defaults/main.yml new file mode 100644 index 000000000..d9636646b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/defaults/main.yml @@ -0,0 +1,29 @@ +rds_param_group: + name: '{{ resource_prefix}}rds-param-group' + description: Test group for rds_param_group Ansible module + engine: postgres9.6 + +rds_long_param_list: + application_name: Test + logging_collector: on + log_directory: /var/log/postgresql + log_filename: postgresql.log.%Y-%m-%d-%H + log_file_mode: 0600 + event_source: RDS + log_min_messages: INFO + log_min_duration_statement: 500 + log_rotation_age: 60 + debug_print_parse: on + debug_print_rewritten: on + debug_print_plan: on + debug_pretty_print: on + log_checkpoints: on + log_connections: on + log_disconnections: on + log_duration: on + log_error_verbosity: VERBOSE + log_lock_waits: on + log_temp_files: 10K + log_timezone: UTC + log_statement: all + log_replication_commands: on diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml new file mode 100644 index 000000000..889bf876a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml @@ -0,0 +1,517 @@ +# A Note about ec2 environment variable name preference: +# - EC2_URL -> AWS_URL +# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY +# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY +# - EC2_REGION -> AWS_REGION +# +# TODO - name: test 'region' parameter +# TODO - name: test 'state=absent' parameter for existing key +# TODO - name: test 'state=absent' parameter for missing key +# TODO - name: test 'validate_certs' parameter + +# ============================================================ + +- name: rds_option_group tests + module_defaults: + group/aws: + ec2_access_key: '{{ aws_access_key }}' + ec2_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ ec2_region }}' + block: + + # ============================================================ + - name: test empty parameter group - CHECK_MODE + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + state: present + check_mode: true + register: result + + - name: assert rds parameter group changed - CHECK_MODE + assert: + that: + - result.changed + + - name: test empty parameter group + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + state: present + register: result + + - name: assert rds parameter group changed + assert: + that: + - result.changed + - '"db_parameter_group_arn" in result' + - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\ + \ | lower }}'" + - '"description" in result' + - result.tags == {} + + # ============================================================ + - name: test empty parameter group with no arguments changes nothing - CHECK_MODE + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + state: present + check_mode: true + register: result + + - name: assert no change when running empty parameter group a second time - CHECK_MODE + assert: + that: + - not result.changed + + - name: test empty parameter group with no arguments changes nothing + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + state: present + register: result + + - name: assert no change when running empty parameter group a second time + assert: + that: + - not result.changed + + # ============================================================ + - name: test adding numeric tag - CHECK_MODE + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + state: present + tags: + Environment: test + Test: 123 + check_mode: true + register: result + + - name: adding numeric tag just silently converts - CHECK_MODE + assert: + that: + - result.changed + - name: test adding numeric tag + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + state: present + tags: + Environment: test + Test: 123 + register: result + + - name: adding numeric tag just silently converts + assert: + that: + - result.changed + - '"db_parameter_group_arn" in result' + - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\ + \ | lower }}'" + - '"description" in result' + - '"tags" in result' + - result.tags | length == 2 + - result.tags["Environment"] == 'test' + - result.tags["Test"] == '123' + + # ============================================================ + - name: test tagging existing group - CHECK_MODE + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + state: present + tags: + Environment: test + Test: '123' + NewTag: hello + check_mode: true + register: result + + - name: assert tagging existing group changes it and adds tags - CHECK_MODE + assert: + that: + - result.changed + - name: test tagging existing group + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + state: present + tags: + Environment: test + Test: '123' + NewTag: hello + register: result + + - name: assert tagging existing group changes it and adds tags + assert: + that: + - result.changed + - '"db_parameter_group_arn" in result' + - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\ + \ | lower }}'" + - '"description" in result' + - '"tags" in result' + - result.tags | length == 3 + - result.tags["Environment"] == 'test' + - result.tags["Test"] == '123' + - result.tags["NewTag"] == 'hello' + + # ============================================================ + - name: test repeating tagging existing group - CHECK_MODE + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + state: present + tags: + Environment: test + Test: '123' + NewTag: hello + check_mode: true + register: result + + - name: assert tagging existing group changes it and adds tags - CHECK_MODE + assert: + that: + - not result.changed + - '"db_parameter_group_arn" in result' + - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\ + \ | lower }}'" + - '"description" in result' + - '"tags" in result' + - result.tags | length == 3 + - result.tags["Environment"] == 'test' + - result.tags["Test"] == '123' + - result.tags["NewTag"] == 'hello' + + - name: test repeating tagging existing group + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + state: present + tags: + Environment: test + Test: '123' + NewTag: hello + register: result + + - name: assert tagging existing group changes it and adds tags + assert: + that: + - not result.changed + - '"db_parameter_group_arn" in result' + - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\ + \ | lower }}'" + - '"description" in result' + - '"tags" in result' + - result.tags | length == 3 + - result.tags["Environment"] == 'test' + - result.tags["Test"] == '123' + - result.tags["NewTag"] == 'hello' + + # ============================================================ + - name: test deleting tags from existing group - CHECK_MODE + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + state: present + tags: + Environment: test + purge_tags: yes + check_mode: true + register: result + + - name: assert removing tags from existing group changes it - CHECK_MODE + assert: + that: + - result.changed + - name: test deleting tags from existing group + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + state: present + tags: + Environment: test + purge_tags: yes + register: result + + - name: assert removing tags from existing group changes it + assert: + that: + - result.changed + - '"db_parameter_group_arn" in result' + - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\ + \ | lower }}'" + - '"description" in result' + - '"tags" in result' + - result.tags | length == 1 + - result.tags["Environment"] == 'test' + + # ============================================================ + - name: test state=absent with engine defined (expect changed=true) - CHECK_MODE + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + state: absent + check_mode: true + register: result + + - name: assert state=absent with engine defined (expect changed=true) - CHECK_MODE + assert: + that: + - result.changed + + - name: test state=absent with engine defined (expect changed=true) + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + state: absent + register: result + + - name: assert state=absent with engine defined (expect changed=true) + assert: + that: + - result.changed + + # ============================================================ + - name: test creating group with parameters - CHECK_MODE + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + state: present + params: + log_directory: /var/log/postgresql + log_statement: all + log_duration: on + this_param_does_not_exist: oh_no + tags: + Environment: test + Test: '123' + check_mode: true + register: result + + - name: assert creating a new group with parameter changes it - CHECK_MODE + assert: + that: + - result.changed + + - name: test creating group with parameters + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + state: present + params: + log_directory: /var/log/postgresql + log_statement: all + log_duration: on + this_param_does_not_exist: oh_no + tags: + Environment: test + Test: '123' + register: result + + - name: assert creating a new group with parameter changes it + assert: + that: + - result.changed + - '"db_parameter_group_arn" in result' + - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\ + \ | lower }}'" + - '"description" in result' + - '"tags" in result' + - result.tags | length == 2 + - result.tags["Environment"] == 'test' + - result.tags["Test"] == '123' + - result.errors|length == 2 + + # ============================================================ + - name: test repeating group with parameters - CHECK_MODE + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + state: present + params: + log_directory: /var/log/postgresql + log_statement: all + log_duration: on + this_param_does_not_exist: oh_no + tags: + Environment: test + Test: '123' + check_mode: true + register: result + + - name: assert repeating group with parameters does not change it - CHECK_MODE + assert: + that: + - not result.changed + + - name: test repeating group with parameters + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + state: present + params: + log_directory: /var/log/postgresql + log_statement: all + log_duration: on + this_param_does_not_exist: oh_no + tags: + Environment: test + Test: '123' + register: result + + - name: assert repeating group with parameters does not change it + assert: + that: + - not result.changed + - '"db_parameter_group_arn" in result' + - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\ + \ | lower }}'" + - '"description" in result' + - '"tags" in result' + - result.tags | length == 2 + - result.tags["Environment"] == 'test' + - result.tags["Test"] == '123' + - result.errors|length == 2 + + # ============================================================ + - name: test state=absent with engine defined (expect changed=true) - CHECK_MODE + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + state: absent + check_mode: true + register: result + + - name: assert state=absent with engine defined (expect changed=true) - CHECK_MODE + assert: + that: + - result.changed + - name: test state=absent with engine defined (expect changed=true) + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + state: absent + register: result + + - name: assert state=absent with engine defined (expect changed=true) + assert: + that: + - result.changed + + # ============================================================ + - name: test repeating state=absent (expect changed=false) - CHECK_MODE + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + state: absent + register: result + check_mode: true + ignore_errors: true + + - name: assert repeating state=absent (expect changed=false) - CHECK_MODE + assert: + that: + - not result.changed + - name: test repeating state=absent (expect changed=false) + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + state: absent + register: result + ignore_errors: true + + - name: assert repeating state=absent (expect changed=false) + assert: + that: + - not result.changed + + # ============================================================ + - name: test creating group with more than 20 parameters - CHECK_MODE + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + params: '{{ rds_long_param_list }}' + state: present + check_mode: true + register: result + + - name: assert creating a new group with lots of parameter changes it - CHECK_MODE + assert: + that: + - result.changed + - name: test creating group with more than 20 parameters + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + params: '{{ rds_long_param_list }}' + state: present + register: result + + - name: assert creating a new group with lots of parameter changes it + assert: + that: + - result.changed + + # ============================================================ + - name: test creating group with more than 20 parameters - CHECK_MODE + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + params: '{{ rds_long_param_list }}' + state: present + check_mode: true + register: result + + - name: assert repeating a group with lots of parameter does not change it - CHECK_MODE + assert: + that: + - not result.changed + - name: test creating group with more than 20 parameters + rds_param_group: + name: '{{ rds_param_group.name }}' + engine: '{{ rds_param_group.engine }}' + description: '{{ rds_param_group.description }}' + params: '{{ rds_long_param_list }}' + state: present + register: result + + - name: assert repeating a group with lots of parameter does not change it + assert: + that: + - not result.changed + + always: + # ============================================================ + - name: test state=absent (expect changed=false) + rds_param_group: + name: '{{ rds_param_group.name }}' + state: absent + register: result + ignore_errors: true + + - name: assert state=absent (expect changed=false) + assert: + that: + - result.changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/defaults/main.yml new file mode 100644 index 000000000..156c9f903 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/defaults/main.yml @@ -0,0 +1,9 @@ +vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 +subnet_a: 10.{{ 256 | random(seed=resource_prefix) }}.10.0/24 +subnet_b: 10.{{ 256 | random(seed=resource_prefix) }}.11.0/24 +subnet_c: 10.{{ 256 | random(seed=resource_prefix) }}.12.0/24 +subnet_d: 10.{{ 256 | random(seed=resource_prefix) }}.13.0/24 + +group_description: 'Created by integration test : {{ resource_prefix }}' +group_description_changed: 'Created by integration test : {{ resource_prefix }} - + changed' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/main.yml new file mode 100644 index 000000000..207b150af --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/main.yml @@ -0,0 +1,112 @@ +# Tests for rds_subnet_group +# +# Note: (From Amazon's documentation) +# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.modify_db_subnet_group +# DB subnet groups must contain at least one subnet in at least two AZs in the +# AWS Region. + +- module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + # ============================================================ + + - name: Fetch AZ availability + aws_az_info: + register: az_info + + - name: Assert that we have multiple AZs available to us + assert: + that: az_info.availability_zones | length >= 2 + + - name: Pick AZs + set_fact: + az_one: '{{ az_info.availability_zones[0].zone_name }}' + az_two: '{{ az_info.availability_zones[1].zone_name }}' + + # ============================================================ + + - name: Create a VPC + ec2_vpc_net: + state: present + cidr_block: '{{ vpc_cidr }}' + name: '{{ resource_prefix }}' + register: vpc + + - name: Create subnets + ec2_vpc_subnet: + state: present + cidr: '{{ item.cidr }}' + az: '{{ item.az }}' + vpc_id: '{{ vpc.vpc.id }}' + tags: + Name: '{{ item.name }}' + with_items: + - cidr: '{{ subnet_a }}' + az: '{{ az_one }}' + name: '{{ resource_prefix }}-subnet-a' + - cidr: '{{ subnet_b }}' + az: '{{ az_two }}' + name: '{{ resource_prefix }}-subnet-b' + - cidr: '{{ subnet_c }}' + az: '{{ az_one }}' + name: '{{ resource_prefix }}-subnet-c' + - cidr: '{{ subnet_d }}' + az: '{{ az_two }}' + name: '{{ resource_prefix }}-subnet-d' + register: subnets + + - set_fact: + subnet_ids: '{{ subnets.results | map(attribute="subnet.id") | list }}' + + # ============================================================ + + - include_tasks: params.yml + + - include_tasks: tests.yml + + # ============================================================ + + always: + - name: Remove subnet group + rds_subnet_group: + state: absent + name: '{{ resource_prefix }}' + ignore_errors: yes + + - name: Remove subnets + ec2_vpc_subnet: + state: absent + cidr: '{{ item.cidr }}' + vpc_id: '{{ vpc.vpc.id }}' + with_items: + - cidr: '{{ subnet_a }}' + name: '{{ resource_prefix }}-subnet-a' + - cidr: '{{ subnet_b }}' + name: '{{ resource_prefix }}-subnet-b' + - cidr: '{{ subnet_c }}' + name: '{{ resource_prefix }}-subnet-c' + - cidr: '{{ subnet_d }}' + name: '{{ resource_prefix }}-subnet-d' + ignore_errors: yes + register: removed_subnets + until: removed_subnets is succeeded + retries: 5 + delay: 5 + + - name: Remove the VPC + ec2_vpc_net: + state: absent + cidr_block: '{{ vpc_cidr }}' + name: '{{ resource_prefix }}' + ignore_errors: yes + register: removed_vpc + until: removed_vpc is success + retries: 5 + delay: 5 + + # ============================================================ diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/params.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/params.yml new file mode 100644 index 000000000..109703f38 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/params.yml @@ -0,0 +1,29 @@ +# Try creating without a description +- name: Create a subnet group (no description) + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + ignore_errors: yes + register: create_missing_param +- assert: + that: + - create_missing_param is failed + - "'description' in create_missing_param.msg" + - "'state is present but all of the following are missing' in create_missing_param.msg" + +# Try creating without subnets +- name: Create a subnet group (no subnets) + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description }}' + ignore_errors: yes + register: create_missing_param +- assert: + that: + - create_missing_param is failed + - "'subnets' in create_missing_param.msg" + - "'state is present but all of the following are missing' in create_missing_param.msg" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/tests.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/tests.yml new file mode 100644 index 000000000..ce710ed3b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/tests.yml @@ -0,0 +1,675 @@ +# ============================================================ +# Basic creation +- name: Create a subnet group - CHECK_MODE + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + check_mode: true + register: result + +- assert: + that: + - result is changed + +- name: Create a subnet group + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + register: result + +- assert: + that: + - result is changed + - result.subnet_group.description == group_description + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + +- name: Create a subnet group (idempotency) - CHECK_MODE + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + check_mode: true + register: result + +- assert: + that: + - result is not changed + +- name: Create a subnet group (idempotency) + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + register: result + +- assert: + that: + - result is not changed + - result.subnet_group.description == group_description + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + +# ============================================================ +# Update description +- name: Update subnet group description - CHECK_MODE + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + check_mode: true + register: result + +- assert: + that: + - result is changed + +- name: Update subnet group description + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + register: result + +- assert: + that: + - result is changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + +- name: Update subnet group description (idempotency) - CHECK_MODE + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + check_mode: true + register: result + +- assert: + that: + - result is not changed + +- name: Update subnet group description (idempotency) + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + register: result + +- assert: + that: + - result is not changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + +- name: Restore subnet group description - CHECK_MODE + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + check_mode: true + register: result + +- assert: + that: + - result is changed + +- name: Restore subnet group description + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + register: result + +- assert: + that: + - result is changed + - result.subnet_group.description == group_description + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + +# ============================================================ +# Update subnets +- name: Update subnet group list - CHECK_MODE + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description }}' + subnets: + - '{{ subnet_ids[2] }}' + - '{{ subnet_ids[3] }}' + check_mode: true + register: result + +- assert: + that: + - result is changed + +- name: Update subnet group list + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description }}' + subnets: + - '{{ subnet_ids[2] }}' + - '{{ subnet_ids[3] }}' + register: result + +- assert: + that: + - result is changed + - result.subnet_group.description == group_description + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[2] in result.subnet_group.subnet_ids + - subnet_ids[3] in result.subnet_group.subnet_ids + +- name: Update subnet group list (idempotency) - CHECK_MODE + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description }}' + subnets: + - '{{ subnet_ids[2] }}' + - '{{ subnet_ids[3] }}' + check_mode: true + register: result + +- assert: + that: + - result is not changed + +- name: Update subnet group list (idempotency) + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description }}' + subnets: + - '{{ subnet_ids[2] }}' + - '{{ subnet_ids[3] }}' + register: result + +- assert: + that: + - result is not changed + - result.subnet_group.description == group_description + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[2] in result.subnet_group.subnet_ids + - subnet_ids[3] in result.subnet_group.subnet_ids + +- name: Add more subnets subnet group list - CHECK_MODE + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + - '{{ subnet_ids[2] }}' + - '{{ subnet_ids[3] }}' + check_mode: true + register: result + +- assert: + that: + - result is changed + +- name: Add more subnets subnet group list + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + - '{{ subnet_ids[2] }}' + - '{{ subnet_ids[3] }}' + register: result + +- assert: + that: + - result is changed + - result.subnet_group.description == group_description + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 4 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - subnet_ids[2] in result.subnet_group.subnet_ids + - subnet_ids[3] in result.subnet_group.subnet_ids + +- name: Add more members to subnet group list (idempotency) - CHECK_MODE + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + - '{{ subnet_ids[2] }}' + - '{{ subnet_ids[3] }}' + check_mode: true + register: result + +- assert: + that: + - result is not changed + +- name: Add more members to subnet group list (idempotency) + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + - '{{ subnet_ids[2] }}' + - '{{ subnet_ids[3] }}' + register: result + +- assert: + that: + - result is not changed + - result.subnet_group.description == group_description + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 4 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - subnet_ids[2] in result.subnet_group.subnet_ids + - subnet_ids[3] in result.subnet_group.subnet_ids + +# ============================================================ +# Add tags to subnets +- name: Update subnet with tags - CHECK_MODE + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + tags: + tag_one: '{{ resource_prefix }} One' + Tag Two: two {{ resource_prefix }} + check_mode: true + register: result + +- assert: + that: + - result is changed + +- name: Update subnet with tags + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + tags: + tag_one: '{{ resource_prefix }} One' + Tag Two: two {{ resource_prefix }} + register: result + +- assert: + that: + - result is changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - '"tags" in result.subnet_group' + - result.subnet_group.tags | length == 2 + - result.subnet_group.tags["tag_one"] == '{{ resource_prefix }} One' + - result.subnet_group.tags["Tag Two"] == 'two {{ resource_prefix }}' + +- name: Update subnet with tags (idempotency) - CHECK_MODE + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + tags: + tag_one: '{{ resource_prefix }} One' + Tag Two: two {{ resource_prefix }} + check_mode: true + register: result + +- assert: + that: + - result is not changed + +- name: Update subnet with tags (idempotency) + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + tags: + tag_one: '{{ resource_prefix }} One' + Tag Two: two {{ resource_prefix }} + register: result + +- assert: + that: + - result is not changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - '"tags" in result.subnet_group' + - result.subnet_group.tags | length == 2 + - result.subnet_group.tags["tag_one"] == '{{ resource_prefix }} One' + - result.subnet_group.tags["Tag Two"] == 'two {{ resource_prefix }}' + +- name: Update (add/remove) tags - CHECK_MODE + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + tags: + tag_three: '{{ resource_prefix }} Three' + Tag Two: two {{ resource_prefix }} + check_mode: true + register: result + +- assert: + that: + - result is changed + +- name: Update (add/remove) tags + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + tags: + tag_three: '{{ resource_prefix }} Three' + Tag Two: two {{ resource_prefix }} + register: result + +- assert: + that: + - result is changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - '"tags" in result.subnet_group' + - result.subnet_group.tags | length == 2 + - result.subnet_group.tags["tag_three"] == '{{ resource_prefix }} Three' + - result.subnet_group.tags["Tag Two"] == 'two {{ resource_prefix }}' + +- name: Update tags without purge - CHECK_MODE + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + purge_tags: no + tags: + tag_one: '{{ resource_prefix }} One' + check_mode: true + register: result + +- assert: + that: + - result is changed + +- name: Update tags without purge + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + purge_tags: no + tags: + tag_one: '{{ resource_prefix }} One' + register: result + +- assert: + that: + - result is changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - '"tags" in result.subnet_group' + - result.subnet_group.tags | length == 3 + - result.subnet_group.tags["tag_three"] == '{{ resource_prefix }} Three' + - result.subnet_group.tags["Tag Two"] == 'two {{ resource_prefix }}' + - result.subnet_group.tags["tag_one"] == '{{ resource_prefix }} One' + +- name: Remove all the tags - CHECK_MODE + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + tags: {} + check_mode: true + register: result + +- assert: + that: + - result is changed + +- name: Remove all the tags + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + tags: {} + register: result + +- assert: + that: + - result is changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - '"tags" in result.subnet_group' + +- name: Update with CamelCase tags - CHECK_MODE + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + tags: + lowercase spaced: hello cruel world + Title Case: Hello Cruel World + CamelCase: SimpleCamelCase + snake_case: simple_snake_case + check_mode: true + register: result + +- assert: + that: + - result is changed + +- name: Update with CamelCase tags + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + tags: + lowercase spaced: hello cruel world + Title Case: Hello Cruel World + CamelCase: SimpleCamelCase + snake_case: simple_snake_case + register: result + +- assert: + that: + - result is changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - '"tags" in result.subnet_group' + - result.subnet_group.tags | length == 4 + - result.subnet_group.tags["lowercase spaced"] == 'hello cruel world' + - result.subnet_group.tags["Title Case"] == 'Hello Cruel World' + - result.subnet_group.tags["CamelCase"] == 'SimpleCamelCase' + - result.subnet_group.tags["snake_case"] == 'simple_snake_case' + +- name: Do not specify any tag to ensure previous tags are not removed + rds_subnet_group: + state: present + name: '{{ resource_prefix }}' + description: '{{ group_description_changed }}' + subnets: + - '{{ subnet_ids[0] }}' + - '{{ subnet_ids[1] }}' + register: result + +- assert: + that: + - result is not changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - '"tags" in result.subnet_group' + - result.subnet_group.tags | length == 4 + - result.subnet_group.tags["lowercase spaced"] == 'hello cruel world' + - result.subnet_group.tags["Title Case"] == 'Hello Cruel World' + - result.subnet_group.tags["CamelCase"] == 'SimpleCamelCase' + - result.subnet_group.tags["snake_case"] == 'simple_snake_case' + + +# ============================================================ +# Deletion +- name: Delete a subnet group - CHECK_MODE + rds_subnet_group: + state: absent + name: '{{ resource_prefix }}' + check_mode: true + register: result + +- assert: + that: + - result is changed + +- name: Delete a subnet group + rds_subnet_group: + state: absent + name: '{{ resource_prefix }}' + register: result + +- assert: + that: + - result is changed + +- name: Delete a subnet group - CHECK_MODE (idempotency) + rds_subnet_group: + state: absent + name: '{{ resource_prefix }}' + check_mode: true + register: result + +- assert: + that: + - result is not changed + +- name: Delete a subnet group (idempotency) + rds_subnet_group: + state: absent + name: '{{ resource_prefix }}' + register: result + +- assert: + that: + - result is not changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53/aliases b/ansible_collections/amazon/aws/tests/integration/targets/route53/aliases new file mode 100644 index 000000000..c6a082944 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53/aliases @@ -0,0 +1,4 @@ +cloud/aws + +route53_info +module_utils_route53 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53/defaults/main.yml new file mode 100644 index 000000000..cc0d3b78d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for route53 tests diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53/tasks/main.yml new file mode 100644 index 000000000..08ec59d93 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53/tasks/main.yml @@ -0,0 +1,1126 @@ +# tasks file for Route53 integration tests + +- set_fact: + zone_one: '{{ resource_prefix | replace("-", "") }}.one.ansible.test.' + zone_two: '{{ resource_prefix | replace("-", "") }}.two.ansible.test.' +- debug: + msg: Set zones {{ zone_one }} and {{ zone_two }} + +- name: Test basics (new zone, A and AAAA records) + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + amazon.aws.route53: + # Route53 is explicitly a global service + region: + block: + - name: create VPC + ec2_vpc_net: + cidr_block: 192.0.2.0/24 + name: '{{ resource_prefix }}_vpc' + state: present + register: vpc + + - name: Create a zone + route53_zone: + zone: '{{ zone_one }}' + comment: Created in Ansible test {{ resource_prefix }} + tags: + TestTag: '{{ resource_prefix }}.z1' + register: z1 + - assert: + that: + - z1 is success + - z1 is changed + - z1.comment == 'Created in Ansible test {{ resource_prefix }}' + - z1.tags.TestTag == '{{ resource_prefix }}.z1' + + - name: Get zone details + route53_info: + query: hosted_zone + hosted_zone_id: '{{ z1.zone_id }}' + hosted_zone_method: details + register: hosted_zones + - name: Assert newly created hosted zone only has NS and SOA records + assert: + that: + - hosted_zones.HostedZone.ResourceRecordSetCount == 2 + + - name: Create a second zone + route53_zone: + zone: '{{ zone_two }}' + vpc_id: '{{ vpc.vpc.id }}' + vpc_region: '{{ aws_region }}' + comment: Created in Ansible test {{ resource_prefix }} + tags: + TestTag: '{{ resource_prefix }}.z2' + register: z2 + - assert: + that: + - z2 is success + - z2 is changed + - z2.comment == 'Created in Ansible test {{ resource_prefix }}' + - z2.tags.TestTag == '{{ resource_prefix }}.z2' + + - name: Get zone details + route53_info: + query: hosted_zone + hosted_zone_id: '{{ z2.zone_id }}' + hosted_zone_method: details + register: hosted_zones + + - name: Assert newly created hosted zone only has NS and SOA records + assert: + that: + - hosted_zones.HostedZone.ResourceRecordSetCount == 2 + - hosted_zones.HostedZone.Config.PrivateZone + + # Ensure that we can use the non-paginated list_by_name method with max_items + - name: Get zone 1 details only + route53_info: + query: hosted_zone + hosted_zone_method: list_by_name + dns_name: '{{ zone_one }}' + max_items: 1 + register: list_by_name_result + + - name: Assert that we found exactly one zone when querying by name + assert: + that: + - list_by_name_result.HostedZones | length == 1 + - list_by_name_result.HostedZones[0].Name == '{{ zone_one }}' + + - name: Create A record using zone fqdn + route53: + state: present + zone: '{{ zone_one }}' + record: qdn_test.{{ zone_one }} + type: A + value: 192.0.2.1 + register: qdn + - assert: + that: + - qdn is not failed + - qdn is changed + + - name: Get A record using "get" method of route53 module + route53: + state: get + zone: '{{ zone_one }}' + record: qdn_test.{{ zone_one }} + type: A + register: get_result + - name: Check boto3 type get data + assert: + that: + - get_result.nameservers | length > 0 + - get_result.resource_record_sets | length == 1 + - '"name" in record_set' + - record_set.name == qdn_record + - '"resource_records" in record_set' + - record_set.resource_records | length == 1 + - '"value" in record_set.resource_records[0]' + - record_set.resource_records[0].value == '192.0.2.1' + - '"ttl" in record_set' + - record_set.ttl == 3600 + - '"type" in record_set' + - record_set.type == 'A' + vars: + record_set: '{{ get_result.resource_record_sets[0] }}' + qdn_record: qdn_test.{{ zone_one }} + + - name: Check boto3 compat get data + assert: + that: + - '"set" in get_result' + - '"Name" in record_set' + - record_set.Name == qdn_record + - '"ResourceRecords" in record_set' + - record_set.ResourceRecords | length == 1 + - '"Value" in record_set.ResourceRecords[0]' + - record_set.ResourceRecords[0].Value == '192.0.2.1' + - '"TTL" in record_set' + - record_set.TTL == 3600 + - record_set.Type == 'A' + vars: + record_set: '{{ get_result.set }}' + qdn_record: qdn_test.{{ zone_one }} + + - name: Check boto2 compat get data + assert: + that: + - '"set" in get_result' + - '"alias" in record_set' + - record_set.alias == False + - '"failover" in record_set' + - '"health_check" in record_set' + - '"hosted_zone_id" in record_set' + - record_set.hosted_zone_id == z1.zone_id + - '"identifier" in record_set' + - '"record" in record_set' + - record_set.record == qdn_record + - '"ttl" in record_set' + - record_set.ttl == "3600" + - '"type" in record_set' + - record_set.type == 'A' + - '"value" in record_set' + - record_set.value == '192.0.2.1' + - '"values" in record_set' + - record_set['values'] | length == 1 + - record_set['values'][0] == '192.0.2.1' + - '"weight" in record_set' + - '"zone" in record_set' + - record_set.zone == zone_one + vars: + record_set: '{{ get_result.set }}' + qdn_record: qdn_test.{{ zone_one }} + + ## test A recordset creation and order adjustments + - name: Create same A record using zone non-qualified domain + route53: + state: present + zone: '{{ zone_one[:-1] }}' + record: qdn_test.{{ zone_one[:-1] }} + type: A + value: 192.0.2.1 + register: non_qdn + - assert: + that: + - non_qdn is not failed + - non_qdn is not changed + + - name: Create A record using zone ID + route53: + state: present + hosted_zone_id: '{{ z1.zone_id }}' + record: zid_test.{{ zone_one }} + type: A + value: 192.0.2.1 + register: zid + - assert: + that: + - zid is not failed + - zid is changed + + - name: Create a multi-value A record with values in different order + route53: + state: present + zone: '{{ zone_one }}' + record: order_test.{{ zone_one }} + type: A + value: + - 192.0.2.2 + - 192.0.2.1 + register: mv_a_record + - assert: + that: + - mv_a_record is not failed + - mv_a_record is changed + + - name: Create same multi-value A record with values in different order + route53: + state: present + zone: '{{ zone_one }}' + record: order_test.{{ zone_one }} + type: A + value: + - 192.0.2.2 + - 192.0.2.1 + register: mv_a_record + - assert: + that: + - mv_a_record is not failed + - mv_a_record is not changed + + # Get resulting A record and ensure max_items is applied + - name: get Route53 A record information + route53_info: + type: A + query: record_sets + hosted_zone_id: '{{ z1.zone_id }}' + start_record_name: order_test.{{ zone_one }} + max_items: 1 + register: records + + - assert: + that: + - records.ResourceRecordSets|length == 1 + - records.ResourceRecordSets[0].ResourceRecords|length == 2 + - records.ResourceRecordSets[0].ResourceRecords[0].Value == '192.0.2.2' + - records.ResourceRecordSets[0].ResourceRecords[1].Value == '192.0.2.1' + + - name: Remove a member from multi-value A record with values in different order + route53: + state: present + zone: '{{ zone_one }}' + record: order_test.{{ zone_one }} + type: A + value: + - 192.0.2.2 + register: del_a_record + ignore_errors: true + - name: This should fail, because `overwrite` is false + assert: + that: + - del_a_record is failed + + - name: Remove a member from multi-value A record with values in different order + route53: + state: present + zone: '{{ zone_one }}' + record: order_test.{{ zone_one }} + overwrite: true + type: A + value: + - 192.0.2.2 + register: del_a_record + ignore_errors: true + + - name: This should not fail, because `overwrite` is true + assert: + that: + - del_a_record is not failed + - del_a_record is changed + + - name: get Route53 zone A record information + route53_info: + type: A + query: record_sets + hosted_zone_id: '{{ z1.zone_id }}' + start_record_name: order_test.{{ zone_one }} + max_items: 50 + register: records + + - assert: + that: + - records.ResourceRecordSets|length == 3 + - records.ResourceRecordSets[0].ResourceRecords|length == 1 + - records.ResourceRecordSets[0].ResourceRecords[0].Value == '192.0.2.2' + + ## Test CNAME record creation and retrive info + - name: Create CNAME record + route53: + state: present + zone: '{{ zone_one }}' + type: CNAME + record: cname_test.{{ zone_one }} + value: order_test.{{ zone_one }} + register: cname_record + + - assert: + that: + - cname_record is not failed + - cname_record is changed + + - name: Get Route53 CNAME record information + route53_info: + type: CNAME + query: record_sets + hosted_zone_id: '{{ z1.zone_id }}' + start_record_name: cname_test.{{ zone_one }} + max_items: 1 + register: cname_records + + - assert: + that: + - cname_records.ResourceRecordSets|length == 1 + - cname_records.ResourceRecordSets[0].ResourceRecords|length == 1 + - cname_records.ResourceRecordSets[0].ResourceRecords[0].Value == "order_test.{{ + zone_one }}" + + ## Test CAA record creation + - name: Create a LetsEncrypt CAA record + route53: + state: present + zone: '{{ zone_one }}' + record: '{{ zone_one }}' + type: CAA + value: + - 0 issue "letsencrypt.org;" + - 0 issuewild "letsencrypt.org;" + overwrite: true + register: caa + - assert: + that: + - caa is not failed + - caa is changed + + - name: Re-create the same LetsEncrypt CAA record + route53: + state: present + zone: '{{ zone_one }}' + record: '{{ zone_one }}' + type: CAA + value: + - 0 issue "letsencrypt.org;" + - 0 issuewild "letsencrypt.org;" + overwrite: true + register: caa + - assert: + that: + - caa is not failed + - caa is not changed + + - name: Re-create the same LetsEncrypt CAA record in opposite-order + route53: + state: present + zone: '{{ zone_one }}' + record: '{{ zone_one }}' + type: CAA + value: + - 0 issuewild "letsencrypt.org;" + - 0 issue "letsencrypt.org;" + overwrite: true + register: caa + - name: This should not be changed, as CAA records are not order sensitive + assert: + that: + - caa is not failed + - caa is not changed + + - name: Create an A record for a wildcard prefix + route53: + state: present + zone: '{{ zone_one }}' + record: '*.wildcard_test.{{ zone_one }}' + type: A + value: + - 192.0.2.1 + register: wc_a_record + - assert: + that: + - wc_a_record is not failed + - wc_a_record is changed + + - name: Create an A record for a wildcard prefix (idempotency) + route53: + state: present + zone: '{{ zone_one }}' + record: '*.wildcard_test.{{ zone_one }}' + type: A + value: + - 192.0.2.1 + register: wc_a_record + - assert: + that: + - wc_a_record is not failed + - wc_a_record is not changed + + - name: Create an A record for a wildcard prefix (change) + route53: + state: present + zone: '{{ zone_one }}' + record: '*.wildcard_test.{{ zone_one }}' + type: A + value: + - 192.0.2.2 + overwrite: true + register: wc_a_record + - assert: + that: + - wc_a_record is not failed + - wc_a_record is changed + + - name: Delete an A record for a wildcard prefix + route53: + state: absent + zone: '{{ zone_one }}' + record: '*.wildcard_test.{{ zone_one }}' + type: A + value: + - 192.0.2.2 + register: wc_a_record + - assert: + that: + - wc_a_record is not failed + - wc_a_record is changed + - wc_a_record.diff.after == {} + + - name: create a record with different TTL + route53: + state: present + zone: '{{ zone_one }}' + record: localhost.{{ zone_one }} + type: A + value: 127.0.0.1 + ttl: 30 + register: ttl30 + - name: check return values + assert: + that: + - ttl30.diff.resource_record_sets[0].ttl == 30 + - ttl30 is changed + + - name: delete previous record without mention ttl and value + route53: + state: absent + zone: '{{ zone_one }}' + record: localhost.{{ zone_one }} + type: A + register: ttl30 + - name: check if record is deleted + assert: + that: + - ttl30 is changed + + - name: immutable delete previous record without mention ttl and value + route53: + state: absent + zone: '{{ zone_one }}' + record: localhost.{{ zone_one }} + type: A + register: ttl30 + - name: check if record was deleted + assert: + that: + - ttl30 is not changed + + # Tests on zone two (private zone) + - name: Create A record using zone fqdn + route53: + state: present + zone: '{{ zone_two }}' + record: qdn_test.{{ zone_two }} + type: A + value: 192.0.2.1 + private_zone: true + register: qdn + - assert: + that: + - qdn is not failed + - qdn is changed + + - name: Get A record using 'get' method of route53 module + route53: + state: get + zone: '{{ zone_two }}' + record: qdn_test.{{ zone_two }} + type: A + private_zone: true + register: get_result + - assert: + that: + - get_result.nameservers|length > 0 + - get_result.set.Name == "qdn_test.{{ zone_two }}" + - get_result.set.ResourceRecords[0].Value == "192.0.2.1" + - get_result.set.Type == "A" + + - name: Get a record that does not exist + route53: + state: get + zone: '{{ zone_two }}' + record: notfound.{{ zone_two }} + type: A + private_zone: true + register: get_result + - assert: + that: + - get_result.nameservers|length > 0 + - get_result.set|length == 0 + - get_result.resource_record_sets|length == 0 + + - name: Create same A record using zone non-qualified domain + route53: + state: present + zone: '{{ zone_two[:-1] }}' + record: qdn_test.{{ zone_two[:-1] }} + type: A + value: 192.0.2.1 + private_zone: true + register: non_qdn + - assert: + that: + - non_qdn is not failed + - non_qdn is not changed + + - name: Create A record using zone ID + route53: + state: present + hosted_zone_id: '{{ z2.zone_id }}' + record: zid_test.{{ zone_two }} + type: A + value: 192.0.2.2 + private_zone: true + register: zid + - assert: + that: + - zid is not failed + - zid is changed + + - name: Create A record using zone fqdn and vpc_id + route53: + state: present + zone: '{{ zone_two }}' + record: qdn_test_vpc.{{ zone_two }} + type: A + value: 192.0.2.3 + private_zone: true + vpc_id: '{{ vpc.vpc.id }}' + register: qdn + - assert: + that: + - qdn is not failed + - qdn is changed + + - name: Create A record using zone ID and vpc_id + route53: + state: present + hosted_zone_id: '{{ z2.zone_id }}' + record: zid_test_vpc.{{ zone_two }} + type: A + value: 192.0.2.4 + private_zone: true + vpc_id: '{{ vpc.vpc.id }}' + register: zid + - assert: + that: + - zid is not failed + - zid is changed + + - name: Create an Alias record + route53: + state: present + zone: '{{ zone_one }}' + record: alias.{{ zone_one }} + type: A + alias: true + alias_hosted_zone_id: '{{ z1.zone_id }}' + value: zid_test.{{ zone_one }} + overwrite: true + register: alias_record + - name: This should be changed + assert: + that: + - alias_record is not failed + - alias_record is changed + + - name: Re-Create an Alias record + route53: + state: present + zone: '{{ zone_one }}' + record: alias.{{ zone_one }} + type: A + alias: true + alias_hosted_zone_id: '{{ z1.zone_id }}' + value: zid_test.{{ zone_one }} + overwrite: true + register: alias_record + - name: This should not be changed + assert: + that: + - alias_record is not failed + - alias_record is not changed + + - name: Create a weighted record + route53: + state: present + zone: '{{ zone_one }}' + record: weighted.{{ zone_one }} + type: CNAME + value: zid_test.{{ zone_one }} + overwrite: true + identifier: host1@www + weight: 100 + region: '{{ omit }}' + register: weighted_record + - name: This should be changed + assert: + that: + - weighted_record is not failed + - weighted_record is changed + + - name: Re-Create a weighted record + route53: + state: present + zone: '{{ zone_one }}' + record: weighted.{{ zone_one }} + type: CNAME + value: zid_test.{{ zone_one }} + overwrite: true + identifier: host1@www + weight: 100 + region: '{{ omit }}' + register: weighted_record + - name: This should not be changed + assert: + that: + - weighted_record is not failed + - weighted_record is not changed + + - name: Create a zero weighted record + route53: + state: present + zone: '{{ zone_one }}' + record: zero_weighted.{{ zone_one }} + type: CNAME + value: zid_test.{{ zone_one }} + overwrite: true + identifier: host1@www + weight: 0 + region: '{{ omit }}' + register: weighted_record + - name: This should be changed + assert: + that: + - weighted_record is not failed + - weighted_record is changed + + - name: Re-Create a zero weighted record + route53: + state: present + zone: '{{ zone_one }}' + record: zero_weighted.{{ zone_one }} + type: CNAME + value: zid_test.{{ zone_one }} + overwrite: true + identifier: host1@www + weight: 0 + region: '{{ omit }}' + register: weighted_record + - name: This should not be changed + assert: + that: + - weighted_record is not failed + - weighted_record is not changed + +#Test Geo Location - Continent Code + - name: Create a record with geo_location - continent_code (check_mode) + route53: + state: present + zone: '{{ zone_one }}' + record: geo-test-1.{{ zone_one }} + identifier: geohost1@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + continent_code: NA + check_mode: true + register: create_geo_continent_check_mode + - assert: + that: + - create_geo_continent_check_mode is changed + - create_geo_continent_check_mode is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_continent_check_mode.resource_actions' + + - name: Create a record with geo_location - continent_code + route53: + state: present + zone: '{{ zone_one }}' + record: geo-test-1.{{ zone_one }} + identifier: geohost1@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + continent_code: NA + register: create_geo_continent + # Get resulting A record and geo_location parameters are applied + - name: get Route53 A record information + route53_info: + type: A + query: record_sets + hosted_zone_id: '{{ z1.zone_id }}' + start_record_name: geo-test-1.{{ zone_one }} + max_items: 1 + register: result + + - assert: + that: + - create_geo_continent is changed + - create_geo_continent is not failed + - '"route53:ChangeResourceRecordSets" in create_geo_continent.resource_actions' + - result.ResourceRecordSets[0].GeoLocation.ContinentCode == "NA" + + - name: Create a record with geo_location - continent_code (idempotency) + route53: + state: present + zone: '{{ zone_one }}' + record: geo-test-1.{{ zone_one }} + identifier: geohost1@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + continent_code: NA + register: create_geo_continent_idem + - assert: + that: + - create_geo_continent_idem is not changed + - create_geo_continent_idem is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_continent_idem.resource_actions' + + - name: Create a record with geo_location - continent_code (idempotency - check_mode) + route53: + state: present + zone: '{{ zone_one }}' + record: geo-test-1.{{ zone_one }} + identifier: geohost1@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + continent_code: NA + check_mode: true + register: create_geo_continent_idem_check + + - assert: + that: + - create_geo_continent_idem_check is not changed + - create_geo_continent_idem_check is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_continent_idem_check.resource_actions' + +#Test Geo Location - Country Code + - name: Create a record with geo_location - country_code (check_mode) + route53: + state: present + zone: '{{ zone_one }}' + record: geo-test-2.{{ zone_one }} + identifier: geohost2@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + check_mode: true + register: create_geo_country_check_mode + - assert: + that: + - create_geo_country_check_mode is changed + - create_geo_country_check_mode is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_country_check_mode.resource_actions' + + - name: Create a record with geo_location - country_code + route53: + state: present + zone: '{{ zone_one }}' + record: geo-test-2.{{ zone_one }} + identifier: geohost2@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + register: create_geo_country + # Get resulting A record and geo_location parameters are applied + - name: get Route53 A record information + route53_info: + type: A + query: record_sets + hosted_zone_id: '{{ z1.zone_id }}' + start_record_name: geo-test-2.{{ zone_one }} + max_items: 1 + register: result + - assert: + that: + - create_geo_country is changed + - create_geo_country is not failed + - '"route53:ChangeResourceRecordSets" in create_geo_country.resource_actions' + - result.ResourceRecordSets[0].GeoLocation.CountryCode == "US" + + - name: Create a record with geo_location - country_code (idempotency) + route53: + state: present + zone: '{{ zone_one }}' + record: geo-test-2.{{ zone_one }} + identifier: geohost2@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + register: create_geo_country_idem + - assert: + that: + - create_geo_country_idem is not changed + - create_geo_country_idem is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_country_idem.resource_actions' + + - name: Create a record with geo_location - country_code (idempotency - check_mode) + route53: + state: present + zone: '{{ zone_one }}' + record: geo-test-2.{{ zone_one }} + identifier: geohost2@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + check_mode: true + register: create_geo_country_idem_check + + - assert: + that: + - create_geo_country_idem_check is not changed + - create_geo_country_idem_check is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_country_idem_check.resource_actions' + +#Test Geo Location - Subdivision Code + - name: Create a record with geo_location - subdivision_code (check_mode) + route53: + state: present + zone: '{{ zone_one }}' + record: geo-test-3.{{ zone_one }} + identifier: geohost3@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + subdivision_code: TX + check_mode: true + register: create_geo_subdivision_check_mode + - assert: + that: + - create_geo_subdivision_check_mode is changed + - create_geo_subdivision_check_mode is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_subdivision_check_mode.resource_actions' + + - name: Create a record with geo_location - subdivision_code + route53: + state: present + zone: '{{ zone_one }}' + record: geo-test-3.{{ zone_one }} + identifier: geohost3@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + subdivision_code: TX + register: create_geo_subdivision + # Get resulting A record and geo_location parameters are applied + - name: get Route53 A record information + route53_info: + type: A + query: record_sets + hosted_zone_id: '{{ z1.zone_id }}' + start_record_name: geo-test-3.{{ zone_one }} + max_items: 1 + register: result + - assert: + that: + - create_geo_subdivision is changed + - create_geo_subdivision is not failed + - '"route53:ChangeResourceRecordSets" in create_geo_subdivision.resource_actions' + - result.ResourceRecordSets[0].GeoLocation.CountryCode == "US" + - result.ResourceRecordSets[0].GeoLocation.SubdivisionCode == "TX" + + - name: Create a record with geo_location - subdivision_code (idempotency) + route53: + state: present + zone: '{{ zone_one }}' + record: geo-test-3.{{ zone_one }} + identifier: geohost3@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + subdivision_code: TX + register: create_geo_subdivision_idem + - assert: + that: + - create_geo_subdivision_idem is not changed + - create_geo_subdivision_idem is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_subdivision_idem.resource_actions' + + - name: Create a record with geo_location - subdivision_code (idempotency - check_mode) + route53: + state: present + zone: '{{ zone_one }}' + record: geo-test-3.{{ zone_one }} + identifier: geohost3@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + subdivision_code: TX + check_mode: true + register: create_geo_subdivision_idem_check + + - assert: + that: + - create_geo_subdivision_idem_check is not changed + - create_geo_subdivision_idem_check is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_subdivision_idem_check.resource_actions' + +#Cleanup------------------------------------------------------ + + always: + + - name: delete a record with geo_location - continent_code + route53: + state: absent + zone: '{{ zone_one }}' + record: geo-test-1.{{ zone_one }} + identifier: geohost1@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + continent_code: NA + ignore_errors: true + + - name: delete a record with geo_location - country_code + route53: + state: absent + zone: '{{ zone_one }}' + record: geo-test-2.{{ zone_one }} + identifier: geohost2@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + ignore_errors: true + + - name: delete a record with geo_location - subdivision_code + route53: + state: absent + zone: '{{ zone_one }}' + record: geo-test-3.{{ zone_one }} + identifier: geohost3@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + subdivision_code: TX + ignore_errors: true + + - route53_info: + query: record_sets + hosted_zone_id: '{{ z1.zone_id }}' + register: z1_records + + - name: Loop over A/AAAA/CNAME Alias records and delete them + route53: + state: absent + alias: true + alias_hosted_zone_id: '{{ item.AliasTarget.HostedZoneId }}' + zone: '{{ zone_one }}' + record: '{{ item.Name }}' + type: '{{ item.Type }}' + value: '{{ item.AliasTarget.DNSName }}' + ignore_errors: true + loop: '{{ z1_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", + "CNAME", "CAA"]) | list }}' + when: + - '"AliasTarget" in item' + + - name: Loop over A/AAAA/CNAME records and delete them + route53: + state: absent + zone: '{{ zone_one }}' + record: '{{ item.Name }}' + type: '{{ item.Type }}' + value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}' + weight: '{{ item.Weight | default(omit) }}' + identifier: '{{ item.SetIdentifier }}' + region: '{{ omit }}' + ignore_errors: true + loop: '{{ z1_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", + "CNAME", "CAA"]) | list }}' + when: + - '"ResourceRecords" in item' + - '"SetIdentifier" in item' + + - name: Loop over A/AAAA/CNAME records and delete them + route53: + state: absent + zone: '{{ zone_one }}' + record: '{{ item.Name }}' + type: '{{ item.Type }}' + value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}' + ignore_errors: true + loop: '{{ z1_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", + "CNAME", "CAA"]) | list }}' + when: + - '"ResourceRecords" in item' + + - route53_info: + query: record_sets + hosted_zone_id: '{{ z2.zone_id }}' + register: z2_records + + - name: Loop over A/AAAA/CNAME Alias records and delete them + route53: + state: absent + alias: true + alias_hosted_zone_id: '{{ item.AliasTarget.HostedZoneId }}' + zone: '{{ zone_two }}' + record: '{{ item.Name }}' + type: '{{ item.Type }}' + value: '{{ item.AliasTarget.DNSName }}' + private_zone: true + ignore_errors: true + loop: '{{ z2_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", + "CNAME", "CAA"]) | list }}' + when: + - '"AliasTarget" in item' + + - name: Loop over A/AAAA/CNAME records and delete them + route53: + state: absent + zone: '{{ zone_two }}' + record: '{{ item.Name }}' + type: '{{ item.Type }}' + value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}' + identifier: '{{ item.SetIdentifier }}' + region: '{{ omit }}' + private_zone: true + ignore_errors: true + loop: '{{ z2_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", + "CNAME", "CAA"]) | list }}' + when: + - '"ResourceRecords" in item' + - '"SetIdentifier" in item' + + - name: Loop over A/AAAA/CNAME records and delete them + route53: + state: absent + zone: '{{ zone_two }}' + record: '{{ item.Name }}' + type: '{{ item.Type }}' + value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}' + private_zone: true + ignore_errors: true + loop: '{{ z2_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", + "CNAME", "CAA"]) | list }}' + when: + - '"ResourceRecords" in item' + + - name: Delete test zone one {{ zone_one }} + route53_zone: + state: absent + zone: '{{ zone_one }}' + register: delete_one + ignore_errors: true + retries: 10 + until: delete_one is not failed + + - name: Delete test zone two {{ zone_two }} + route53_zone: + state: absent + zone: '{{ zone_two }}' + register: delete_two + ignore_errors: true + retries: 10 + until: delete_two is not failed + + - name: destroy VPC + ec2_vpc_net: + cidr_block: 192.0.2.0/24 + name: '{{ resource_prefix }}_vpc' + state: absent + register: remove_vpc + retries: 10 + delay: 5 + until: remove_vpc is success + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53/vars/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/aliases b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/defaults/main.yml new file mode 100644 index 000000000..769e5079d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/defaults/main.yml @@ -0,0 +1,36 @@ +--- +# route53_health_check integration tests +# +# Module uses the following as an 'ID' +# (the real ID is automatically assigned after creation) +# - ip_address +# - fqdn +# - port +# - type +# - request_interval + +#ip_address: We allocate an EIP due to route53 restrictions +fqdn: '{{ tiny_prefix }}.route53-health.ansible.test' +fqdn_1: '{{ tiny_prefix }}-1.route53-health.ansible.test' +port: 8080 +type: 'TCP' +request_interval: 30 + +# modifiable +# - resource_path +# - string_match +# - failure_threshold + +failure_threshold: 5 +failure_threshold_updated: 1 + +# For resource_path we need an HTTP/HTTPS type check +# for string_match we need an _STR_MATCH type +type_https_match: 'HTTPS_STR_MATCH' +type_http_match: 'HTTP_STR_MATCH' +type_http: 'HTTP' +resource_path: '/health.php' +resource_path_1: '/new-health.php' +resource_path_updated: '/healthz' +string_match: 'Hello' +string_match_updated: 'Hello World' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/meta/main.yml new file mode 100644 index 000000000..1471b11f6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml new file mode 100644 index 000000000..42bdb6562 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml @@ -0,0 +1,134 @@ +--- +- block: + - name: 'Create multiple HTTP health checks with different resource_path - check_mode' + route53_health_check: + state: present + name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found' + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http }}' + resource_path: '{{ item }}' + use_unique_names: true + register: create_check + check_mode: true + with_items: + - '{{ resource_path }}' + - '{{ resource_path_1 }}' + + - name: 'Check result - Create a HTTP health check - check_mode' + assert: + that: + - create_check is not failed + - create_check is changed + - '"route53:CreateHealthCheck" not in create_check.results[0].resource_actions' + - '"route53:CreateHealthCheck" not in create_check.results[1].resource_actions' + + - name: 'Create multiple HTTP health checks with different resource_path' + route53_health_check: + state: present + name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found' + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http }}' + resource_path: '{{ item }}' + use_unique_names: true + register: create_result + with_items: + - '{{ resource_path }}' + - '{{ resource_path_1 }}' + + - name: Get ID's for health_checks created in above task + set_fact: + health_check_1_id: "{{ create_result.results[0].health_check.id }}" + health_check_2_id: "{{ create_result.results[1].health_check.id }}" + + - name: Get health_check 1 info + amazon.aws.route53_info: + query: health_check + health_check_id: "{{ health_check_1_id }}" + health_check_method: details + register: health_check_1_info + + - name: Get health_check 2 info + amazon.aws.route53_info: + query: health_check + health_check_id: "{{ health_check_2_id }}" + health_check_method: details + register: health_check_2_info + + - name: 'Check result - Create multiple HTTP health check' + assert: + that: + - create_result is not failed + - create_result is changed + - '"route53:UpdateHealthCheck" not in create_result.results[0].resource_actions' + - '"route53:UpdateHealthCheck" not in create_result.results[1].resource_actions' + - health_check_1_id != health_check_2_id + - health_check_1_info.HealthCheck.HealthCheckConfig.ResourcePath == '{{ resource_path }}' + - health_check_2_info.HealthCheck.HealthCheckConfig.ResourcePath == '{{ resource_path_1 }}' + + - name: 'Create multiple HTTP health checks with different resource_path - idempotency - check_mode' + route53_health_check: + state: present + name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found' + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http }}' + resource_path: '{{ item }}' + use_unique_names: true + register: create_idem_check + check_mode: true + with_items: + - '{{ resource_path }}' + - '{{ resource_path_1 }}' + + - name: 'Check result - Create multiple HTTP health check - idempotency - check_mode' + assert: + that: + - create_idem_check is not failed + - create_idem_check is not changed + - '"route53:CreateHealthCheck" not in create_idem_check.results[0].resource_actions' + - '"route53:CreateHealthCheck" not in create_idem_check.results[1].resource_actions' + - '"route53:UpdateHealthCheck" not in create_idem_check.results[0].resource_actions' + - '"route53:UpdateHealthCheck" not in create_idem_check.results[1].resource_actions' + + - name: 'Create multiple HTTP health checks with different resource_path - idempotency' + route53_health_check: + state: present + name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found' + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http }}' + resource_path: '{{ item }}' + use_unique_names: true + register: create_idem + check_mode: true + with_items: + - '{{ resource_path }}' + - '{{ resource_path_1 }}' + + - name: 'Check result - Create multiple HTTP health check - idempotency - check_mode' + assert: + that: + - create_idem is not failed + - create_idem is not changed + - '"route53:CreateHealthCheck" not in create_idem.results[0].resource_actions' + - '"route53:CreateHealthCheck" not in create_idem.results[1].resource_actions' + - '"route53:UpdateHealthCheck" not in create_idem.results[0].resource_actions' + - '"route53:UpdateHealthCheck" not in create_idem.results[1].resource_actions' + + always: + # Cleanup starts here + - name: 'Delete multiple HTTP health checks with different resource_path' + route53_health_check: + state: absent + name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found' + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http }}' + resource_path: '{{ item }}' + use_unique_names: true + register: delete_result + with_items: + - '{{ resource_path }}' + - '{{ resource_path_1 }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/main.yml new file mode 100644 index 000000000..1b1ecd805 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/main.yml @@ -0,0 +1,1822 @@ +--- +# route53_health_check integration tests +# +# Module uses the following as an 'ID' +# (the real ID is automatically assigned after creation) +# - ip_address +# - fqdn +# - port +# - type (immutable) +# - request_interval (immutable) +# +# modifiable +# - resource_path +# - string_match +# - failure_threshold +# - disabled +# +- module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + # Route53 can only test against routable IPs. Request an EIP so some poor + # soul doesn't get randomly hit by our testing. + - name: Allocate an EIP we can test against + ec2_eip: + state: present + register: eip + + - set_fact: + ip_address: '{{ eip.public_ip }}' + + - name: Run tests for creating multiple health checks with name as unique identifier + include_tasks: create_multiple_health_checks.yml + + - name: Run tests for update and delete health check by ID + include_tasks: update_delete_by_id.yml + + # Minimum possible definition + - name: 'Create a TCP health check - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + register: create_check + check_mode: true + + - name: 'Check result - Create a TCP health check - check_mode' + assert: + that: + - create_check is successful + - create_check is changed + + - name: 'Create a TCP health check' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + register: create_check + + - name: 'Check result - Create a TCP health check' + assert: + that: + - create_check is successful + - create_check is changed + - '"health_check" in create_check' + - '"id" in _health_check' + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action == 'create' + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == false + - _check_config.type == 'TCP' + - _check_config.failure_threshold == 3 + - _check_config.request_interval == 30 + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: '{{ create_check.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - set_fact: + tcp_check_id: '{{ create_check.health_check.id }}' + + - name: 'Create a TCP health check - idempotency - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + register: create_check + check_mode: true + + - name: 'Check result - Create a TCP health check - idempotency - check_mode' + assert: + that: + - create_check is successful + - create_check is not changed + + - name: 'Create a TCP health check - idempotency' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + register: create_check + + - name: 'Check result - Create a TCP health check - idempotency' + assert: + that: + - create_check is successful + - create_check is not changed + - '"health_check" in create_check' + - '"id" in create_check.health_check' + - _health_check.id == tcp_check_id + - '"id" in _health_check' + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == false + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == 3 + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: '{{ create_check.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + # Update an attribute + - name: 'Update TCP health check - set threshold - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + failure_threshold: '{{ failure_threshold_updated }}' + register: update_threshold + check_mode: true + + - name: 'Check result - Update TCP health check - set threshold - check_mode' + assert: + that: + - update_threshold is successful + - update_threshold is changed + + - name: 'Update TCP health check - set threshold' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + failure_threshold: '{{ failure_threshold_updated }}' + register: update_threshold + + - name: 'Check result - Update TCP health check - set threshold' + assert: + that: + - update_threshold is successful + - update_threshold is changed + - '"health_check" in update_threshold' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == false + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: '{{ update_threshold.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - name: 'Update TCP health check - set threshold - idempotency - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + failure_threshold: '{{ failure_threshold_updated }}' + register: update_threshold + check_mode: true + + - name: 'Check result - Update TCP health check - set threshold - idempotency - check_mode' + assert: + that: + - update_threshold is successful + - update_threshold is not changed + + - name: 'Update TCP health check - set threshold - idempotency' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + failure_threshold: '{{ failure_threshold_updated }}' + register: update_threshold + + - name: 'Check result - Update TCP health check - set threshold - idempotency' + assert: + that: + - update_threshold is successful + - update_threshold is not changed + - '"health_check" in update_threshold' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == false + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: '{{ update_threshold.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - name: 'Update TCP health check - set disabled - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + disabled: true + register: update_disabled + check_mode: true + + - name: 'Check result - Update TCP health check - set disabled - check_mode' + assert: + that: + - update_disabled is successful + - update_disabled is changed + + - name: 'Update TCP health check - set disabled' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + disabled: true + register: update_disabled + + - name: 'Check result - Update TCP health check - set disabled' + assert: + that: + - update_disabled is successful + - update_disabled is changed + - '"health_check" in update_disabled' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == true + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: '{{ update_disabled.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - name: 'Update TCP health check - set disabled - idempotency - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + disabled: true + register: update_disabled + check_mode: true + + - name: 'Check result - Update TCP health check - set disabled - idempotency - check_mode' + assert: + that: + - update_disabled is successful + - update_disabled is not changed + + - name: 'Update TCP health check - set disabled - idempotency' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + disabled: true + register: update_disabled + + - name: 'Check result - Update TCP health check - set disabled - idempotency' + assert: + that: + - update_disabled is successful + - update_disabled is not changed + - '"health_check" in update_disabled' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == true + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: '{{ update_disabled.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - name: 'Update TCP health check - set tags - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + tags: + CamelCase: CamelCaseValue + snake_case: snake_case_value + "with space": Some value + purge_tags: false + register: update_tags + check_mode: true + + - name: 'Check result - Update TCP health check - set tags - check_mode' + assert: + that: + - update_tags is successful + - update_tags is changed + + - name: 'Update TCP health check - set tags' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + tags: + CamelCase: CamelCaseValue + snake_case: snake_case_value + "with space": Some value + purge_tags: false + register: update_tags + + - name: 'Check result - Update TCP health check - set tags' + assert: + that: + - update_tags is successful + - update_tags is changed + - '"health_check" in update_tags' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" in _health_check.tags' + - _health_check.tags['CamelCase'] == 'CamelCaseValue' + - '"snake_case" in _health_check.tags' + - _health_check.tags['snake_case'] == 'snake_case_value' + - '"with space" in _health_check.tags' + - _health_check.tags['with space'] == 'Some value' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == true + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: '{{ update_tags.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - name: 'Update TCP health check - set tags - idempotency - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + tags: + CamelCase: CamelCaseValue + snake_case: snake_case_value + "with space": Some value + purge_tags: false + register: update_tags + check_mode: true + + - name: 'Check result - Update TCP health check - set tags - idempotency - check_mode' + assert: + that: + - update_tags is successful + - update_tags is not changed + + - name: 'Update TCP health check - set tags - idempotency' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + tags: + CamelCase: CamelCaseValue + snake_case: snake_case_value + "with space": Some value + purge_tags: false + register: update_tags + + - name: 'Check result - Update TCP health check - set tags - idempotency' + assert: + that: + - update_tags is successful + - update_tags is not changed + - '"health_check" in update_tags' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" in _health_check.tags' + - _health_check.tags['CamelCase'] == 'CamelCaseValue' + - '"snake_case" in _health_check.tags' + - _health_check.tags['snake_case'] == 'snake_case_value' + - '"with space" in _health_check.tags' + - _health_check.tags['with space'] == 'Some value' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == true + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: '{{ update_tags.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - name: 'Update TCP health check - add tags - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + tags: + anotherTag: anotherValue + purge_tags: false + register: add_tags + check_mode: true + + - name: 'Check result - Update TCP health check - add tags - check_mode' + assert: + that: + - add_tags is successful + - add_tags is changed + + - name: 'Update TCP health check - add tags' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + tags: + anotherTag: anotherValue + purge_tags: false + register: add_tags + + - name: 'Check result - Update TCP health check - add tags' + assert: + that: + - add_tags is successful + - add_tags is changed + - '"health_check" in add_tags' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" in _health_check.tags' + - _health_check.tags['CamelCase'] == 'CamelCaseValue' + - '"snake_case" in _health_check.tags' + - _health_check.tags['snake_case'] == 'snake_case_value' + - '"with space" in _health_check.tags' + - _health_check.tags['with space'] == 'Some value' + - '"anotherTag" in _health_check.tags' + - _health_check.tags['anotherTag'] == 'anotherValue' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == true + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: '{{ add_tags.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - name: 'Update TCP health check - add tags - idempotency - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + tags: + anotherTag: anotherValue + purge_tags: false + register: add_tags + check_mode: true + + - name: 'Check result - Update TCP health check - add tags - idempotency - check_mode' + assert: + that: + - add_tags is successful + - add_tags is not changed + + - name: 'Update TCP health check - add tags - idempotency' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + tags: + anotherTag: anotherValue + purge_tags: false + register: add_tags + + - name: 'Check result - Update TCP health check - add tags - idempotency' + assert: + that: + - add_tags is successful + - add_tags is not changed + - '"health_check" in add_tags' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" in _health_check.tags' + - _health_check.tags['CamelCase'] == 'CamelCaseValue' + - '"snake_case" in _health_check.tags' + - _health_check.tags['snake_case'] == 'snake_case_value' + - '"with space" in _health_check.tags' + - _health_check.tags['with space'] == 'Some value' + - '"anotherTag" in _health_check.tags' + - _health_check.tags['anotherTag'] == 'anotherValue' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == true + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: '{{ add_tags.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - name: 'Update TCP health check - purge tags - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + tags: + anotherTag: anotherValue + purge_tags: true + register: purge_tags + check_mode: true + + - name: 'Check result - Update TCP health check - purge tags - check_mode' + assert: + that: + - purge_tags is successful + - purge_tags is changed + + - name: 'Update TCP health check - purge tags' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + tags: + anotherTag: anotherValue + purge_tags: true + register: purge_tags + + - name: 'Check result - Update TCP health check - purge tags' + assert: + that: + - purge_tags is successful + - purge_tags is changed + - '"health_check" in purge_tags' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" not in _health_check.tags' + - '"snake_case" not in _health_check.tags' + - '"with space" not in _health_check.tags' + - '"anotherTag" in _health_check.tags' + - _health_check.tags['anotherTag'] == 'anotherValue' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == true + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: '{{ purge_tags.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - name: 'Update TCP health check - purge tags - idempotency - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + tags: + anotherTag: anotherValue + purge_tags: true + register: purge_tags + check_mode: true + + - name: 'Check result - Update TCP health check - purge tags - idempotency - check_mode' + assert: + that: + - purge_tags is successful + - purge_tags is not changed + + - name: 'Update TCP health check - purge tags - idempotency' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + tags: + anotherTag: anotherValue + purge_tags: true + register: purge_tags + + - name: 'Check result - Update TCP health check - purge tags - idempotency' + assert: + that: + - purge_tags is successful + - purge_tags is not changed + - '"health_check" in purge_tags' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" not in _health_check.tags' + - '"snake_case" not in _health_check.tags' + - '"with space" not in _health_check.tags' + - '"anotherTag" in _health_check.tags' + - _health_check.tags['anotherTag'] == 'anotherValue' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == true + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: '{{ purge_tags.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + # Delete the check + - name: 'Delete TCP health check - check_mode' + route53_health_check: + state: absent + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + register: delete_tcp + check_mode: True + + - name: 'Check result - Delete TCP health check - check_mode' + assert: + that: + - delete_tcp is successful + - delete_tcp is changed + + - name: 'Delete TCP health check' + route53_health_check: + state: absent + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + register: delete_tcp + + - name: 'Check result - Delete TCP health check' + assert: + that: + - delete_tcp is successful + - delete_tcp is changed + + - name: 'Delete TCP health check - idempotency - check_mode' + route53_health_check: + state: absent + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + register: delete_tcp + check_mode: True + + - name: 'Check result - Delete TCP health check - idempotency - check_mode' + assert: + that: + - delete_tcp is successful + - delete_tcp is not changed + + - name: 'Delete TCP health check - idempotency' + route53_health_check: + state: absent + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + register: delete_tcp + + - name: 'Check result - Delete TCP health check - idempotency' + assert: + that: + - delete_tcp is successful + - delete_tcp is not changed + + # Create an HTTPS_STR_MATCH healthcheck so we can try out more settings + - name: 'Create a HTTPS_STR_MATCH health check - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + string_match: '{{ string_match }}' + register: create_match + check_mode: true + + - name: 'Check result - Create a HTTPS_STR_MATCH health check - check_mode' + assert: + that: + - create_match is successful + - create_match is changed + + - name: 'Create a HTTPS_STR_MATCH health check' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + string_match: '{{ string_match }}' + register: create_match + + - name: 'Check result - Create a HTTPS_STR_MATCH health check' + assert: + that: + - create_match is successful + - create_match is changed + - '"health_check" in create_match' + - '"id" in _health_check' + - _health_check.id != tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == false + - _check_config.type == 'HTTPS_STR_MATCH' + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == 3 + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.search_string == string_match + vars: + _health_check: '{{ create_match.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - set_fact: + match_check_id: '{{ create_match.health_check.id }}' + + - name: 'Create a HTTPS_STR_MATCH health check - idempotency - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + string_match: '{{ string_match }}' + register: create_match + check_mode: true + + - name: 'Check result - Create a HTTPS_STR_MATCH health check - idempotency - check_mode' + assert: + that: + - create_match is successful + - create_match is not changed + + - name: 'Create a HTTPS_STR_MATCH health check - idempotency' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + string_match: '{{ string_match }}' + register: create_match + + - name: 'Check result - Create a HTTPS_STR_MATCH health check - idempotency' + assert: + that: + - create_match is successful + - create_match is not changed + - '"health_check" in create_match' + - '"id" in _health_check' + - _health_check.id == match_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == false + - _check_config.type == type_https_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == 3 + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.search_string == string_match + vars: + _health_check: '{{ create_match.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - name: 'Update HTTPS health check - set resource_path - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + resource_path: '{{ resource_path }}' + register: update_resource_path + check_mode: true + + - name: 'Check result - Update HTTPS health check - set resource_path - check_mode' + assert: + that: + - update_resource_path is successful + - update_resource_path is changed + + - name: 'Update HTTPS health check - set resource_path' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + resource_path: '{{ resource_path }}' + register: update_resource_path + + - name: 'Check result - Update HTTPS health check - set resource_path' + assert: + that: + - update_resource_path is successful + - update_resource_path is changed + - '"health_check" in update_resource_path' + - '"id" in _health_check' + - _health_check.id == match_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == false + - _check_config.type == type_https_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == 3 + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.resource_path == resource_path + - _check_config.search_string == string_match + vars: + _health_check: '{{ update_resource_path.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - name: 'Update HTTPS health check - set resource_path - idempotency - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + resource_path: '{{ resource_path }}' + register: update_resource_path + check_mode: true + + - name: 'Check result - Update HTTPS health check - set resource_path - idempotency - check_mode' + assert: + that: + - update_resource_path is successful + - update_resource_path is not changed + + - name: 'Update HTTPS health check - set resource_path - idempotency' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + resource_path: '{{ resource_path }}' + register: update_resource_path + + - name: 'Check result - Update HTTPS health check - set resource_path - idempotency' + assert: + that: + - update_resource_path is successful + - update_resource_path is not changed + - '"health_check" in update_resource_path' + - '"id" in _health_check' + - _health_check.id == match_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == false + - _check_config.type == type_https_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == 3 + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.resource_path == resource_path + - _check_config.search_string == string_match + vars: + _health_check: '{{ update_resource_path.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - name: 'Update HTTPS health check - set string_match - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + string_match: '{{ string_match_updated }}' + register: update_string_match + check_mode: true + + - name: 'Check result - Update HTTPS health check - set string_match - check_mode' + assert: + that: + - update_string_match is successful + - update_string_match is changed + + - name: 'Update HTTPS health check - set string_match' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + string_match: '{{ string_match_updated }}' + register: update_string_match + + - name: 'Check result - Update HTTPS health check - set string_match' + assert: + that: + - update_string_match is successful + - update_string_match is changed + - '"health_check" in update_string_match' + - '"id" in _health_check' + - _health_check.id == match_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == false + - _check_config.type == type_https_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == 3 + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.resource_path == resource_path + - _check_config.search_string == string_match_updated + vars: + _health_check: '{{ update_string_match.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - name: 'Update HTTPS health check - set string_match - idempotency - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + string_match: '{{ string_match_updated }}' + register: update_string_match + check_mode: true + + - name: 'Check result - Update HTTPS health check - set string_match - idempotency - check_mode' + assert: + that: + - update_string_match is successful + - update_string_match is not changed + + - name: 'Update HTTPS health check - set string_match - idempotency' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + string_match: '{{ string_match_updated }}' + register: update_string_match + + - name: 'Check result - Update HTTPS health check - set string_match - idempotency' + assert: + that: + - update_string_match is successful + - update_string_match is not changed + - '"health_check" in update_string_match' + - '"id" in _health_check' + - _health_check.id == match_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == false + - _check_config.type == type_https_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == 3 + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.resource_path == resource_path + - _check_config.search_string == string_match_updated + vars: + _health_check: '{{ update_string_match.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + # Test deletion + - name: 'Delete HTTPS health check - check_mode' + route53_health_check: + state: absent + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + register: delete_match + check_mode: true + + - name: 'Check result - Delete HTTPS health check - check_mode' + assert: + that: + - delete_match is successful + - delete_match is changed + + - name: 'Delete HTTPS health check' + route53_health_check: + state: absent + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + register: delete_match + + - name: 'Check result - Delete HTTPS health check' + assert: + that: + - delete_match is successful + - delete_match is changed + + - name: 'Delete HTTPS health check - idempotency - check_mode' + route53_health_check: + state: absent + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + register: delete_match + check_mode: true + + - name: 'Check result - Delete HTTPS health check - idempotency - check_mode' + assert: + that: + - delete_match is successful + - delete_match is not changed + + - name: 'Delete HTTPS health check - idempotency' + route53_health_check: + state: absent + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + register: delete_match + + - name: 'Check result - Delete HTTPS health check - idempotency' + assert: + that: + - delete_match is successful + - delete_match is not changed + + # Create an HTTP health check with lots of settings we can update + - name: 'Create Complex health check - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + string_match: '{{ string_match }}' + resource_path: '{{ resource_path }}' + failure_threshold: '{{ failure_threshold }}' + disabled: true + tags: + CamelCase: CamelCaseValue + snake_case: snake_case_value + "with space": Some value + purge_tags: false + register: create_complex + check_mode: true + + - name: 'Check result - Create Complex health check - check_mode' + assert: + that: + - create_complex is successful + - create_complex is changed + + - name: 'Create Complex health check' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + string_match: '{{ string_match }}' + resource_path: '{{ resource_path }}' + failure_threshold: '{{ failure_threshold }}' + disabled: true + tags: + CamelCase: CamelCaseValue + snake_case: snake_case_value + "with space": Some value + purge_tags: false + register: create_complex + + - name: 'Check result - Create Complex health check' + assert: + that: + - create_complex is successful + - create_complex is changed + - '"health_check" in create_complex' + - '"id" in _health_check' + - _health_check.id != tcp_check_id + - _health_check.id != match_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" in _health_check.tags' + - _health_check.tags['CamelCase'] == 'CamelCaseValue' + - '"snake_case" in _health_check.tags' + - _health_check.tags['snake_case'] == 'snake_case_value' + - '"with space" in _health_check.tags' + - _health_check.tags['with space'] == 'Some value' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == true + - _check_config.type == type_http_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == failure_threshold + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.resource_path == resource_path + - _check_config.search_string == string_match + vars: + _health_check: '{{ create_complex.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - set_fact: + complex_check_id: '{{ create_complex.health_check.id }}' + + - name: 'Create Complex health check - idempotency - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + string_match: '{{ string_match }}' + resource_path: '{{ resource_path }}' + failure_threshold: '{{ failure_threshold }}' + disabled: true + tags: + CamelCase: CamelCaseValue + snake_case: snake_case_value + "with space": Some value + purge_tags: false + register: create_complex + check_mode: true + + - name: 'Check result - Create Complex health check - idempotency - check_mode' + assert: + that: + - create_complex is successful + - create_complex is not changed + + - name: 'Create Complex health check - idempotency' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + string_match: '{{ string_match }}' + resource_path: '{{ resource_path }}' + failure_threshold: '{{ failure_threshold }}' + disabled: true + tags: + CamelCase: CamelCaseValue + snake_case: snake_case_value + "with space": Some value + purge_tags: false + register: create_complex + + - name: 'Check result - Create Complex health check - idempotency' + assert: + that: + - create_complex is successful + - create_complex is not changed + - '"health_check" in create_complex' + - '"id" in _health_check' + - _health_check.id == complex_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" in _health_check.tags' + - _health_check.tags['CamelCase'] == 'CamelCaseValue' + - '"snake_case" in _health_check.tags' + - _health_check.tags['snake_case'] == 'snake_case_value' + - '"with space" in _health_check.tags' + - _health_check.tags['with space'] == 'Some value' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == true + - _check_config.type == type_http_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == failure_threshold + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.resource_path == resource_path + - _check_config.search_string == string_match + vars: + _health_check: '{{ create_complex.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - name: 'Update Complex health check - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + string_match: '{{ string_match_updated }}' + resource_path: '{{ resource_path_updated }}' + failure_threshold: '{{ failure_threshold_updated }}' + register: update_complex + check_mode: true + + - name: 'Check result - Update Complex health check - check_mode' + assert: + that: + - update_complex is successful + - update_complex is changed + + - name: 'Update Complex health check' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + string_match: '{{ string_match_updated }}' + resource_path: '{{ resource_path_updated }}' + failure_threshold: '{{ failure_threshold_updated }}' + register: update_complex + + - name: 'Check result - Update Complex health check' + assert: + that: + - update_complex is successful + - update_complex is changed + - '"health_check" in update_complex' + - '"id" in _health_check' + - _health_check.id == complex_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" in _health_check.tags' + - _health_check.tags['CamelCase'] == 'CamelCaseValue' + - '"snake_case" in _health_check.tags' + - _health_check.tags['snake_case'] == 'snake_case_value' + - '"with space" in _health_check.tags' + - _health_check.tags['with space'] == 'Some value' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == true + - _check_config.type == type_http_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.resource_path == resource_path_updated + - _check_config.search_string == string_match_updated + vars: + _health_check: '{{ update_complex.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - name: 'Update Complex health check - idempotency - check_mode' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + string_match: '{{ string_match_updated }}' + resource_path: '{{ resource_path_updated }}' + failure_threshold: '{{ failure_threshold_updated }}' + register: update_complex + check_mode: true + + - name: 'Check result - Update Complex health check - idempotency - check_mode' + assert: + that: + - update_complex is successful + - update_complex is not changed + + - name: 'Update Complex health check - idempotency' + route53_health_check: + state: present + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + string_match: '{{ string_match_updated }}' + resource_path: '{{ resource_path_updated }}' + failure_threshold: '{{ failure_threshold_updated }}' + register: update_complex + + - name: 'Check result - Update Complex health check - idempotency' + assert: + that: + - update_complex is successful + - update_complex is not changed + - '"health_check" in update_complex' + - '"id" in _health_check' + - _health_check.id == complex_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" in _health_check.tags' + - _health_check.tags['CamelCase'] == 'CamelCaseValue' + - '"snake_case" in _health_check.tags' + - _health_check.tags['snake_case'] == 'snake_case_value' + - '"with space" in _health_check.tags' + - _health_check.tags['with space'] == 'Some value' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == true + - _check_config.type == type_http_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.resource_path == resource_path_updated + - _check_config.search_string == string_match_updated + vars: + _health_check: '{{ update_complex.health_check }}' + _check_config: '{{ _health_check.health_check_config }}' + + - name: 'Delete Complex health check - check_mode' + route53_health_check: + state: absent + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + register: delete_complex + check_mode: true + + - name: 'Check result - Delete Complex health check - check_mode' + assert: + that: + - delete_complex is successful + - delete_complex is changed + + - name: 'Delete Complex health check' + route53_health_check: + state: absent + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + register: delete_complex + + - name: 'Check result - Delete Complex health check' + assert: + that: + - delete_complex is successful + - delete_complex is changed + + - name: 'Delete Complex health check - idempotency - check_mode' + route53_health_check: + state: absent + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + register: delete_complex + check_mode: true + + - name: 'Check result - Delete Complex health check - idempotency - check_mode' + assert: + that: + - delete_complex is successful + - delete_complex is not changed + + - name: 'Delete Complex health check - idempotency' + route53_health_check: + state: absent + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + register: delete_complex + + - name: 'Check result - Delete Complex health check - idempotency' + assert: + that: + - delete_complex is successful + - delete_complex is not changed + + # Minimum possible definition + - name: 'Create a TCP health check with latency graphs enabled' + route53_health_check: + state: present + health_check_name: '{{ tiny_prefix }}-hc-latency-graph' + use_unique_names: true + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + measure_latency: true + register: create_check + + - name: Get health check info + amazon.aws.route53_info: + query: health_check + health_check_id: "{{ create_check.health_check.id }}" + health_check_method: details + register: health_check_info + + - name: 'Check result - Create a TCP health check with latency graphs enabled' + assert: + that: + - create_check is successful + - create_check is changed + - health_check_info.health_check.health_check_config.measure_latency == true + + - pause: + seconds: 20 + + # test route53_info for health_check_method=status + - name: Get health check status + amazon.aws.route53_info: + query: health_check + health_check_id: "{{ create_check.health_check.id }}" + health_check_method: status + register: health_check_status_info + + - assert: + that: + - health_check_status_info is not failed + - '"health_check_observations" in health_check_status_info' + + # test route53_info for health_check_method=failure_reason + - name: Get health check failure_reason + amazon.aws.route53_info: + query: health_check + health_check_id: "{{ create_check.health_check.id }}" + health_check_method: failure_reason + register: health_check_failure_reason_info + + - assert: + that: + - health_check_failure_reason_info is not failed + - '"health_check_observations" in health_check_failure_reason_info' + + + - name: 'Update above health check to disable latency graphs - immutable, no change' + route53_health_check: + state: present + health_check_name: '{{ tiny_prefix }}-hc-latency-graph' + use_unique_names: true + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + measure_latency: false + register: update_check + + - name: 'Check result - Update TCP health check to disable latency graphs' + assert: + that: + - update_check is successful + - update_check is not changed + - health_check_info.health_check.health_check_config.measure_latency == true + + always: + + ################################################ + # TEARDOWN STARTS HERE + ################################################ + + - name: 'Delete TCP health check with latency graphs enabled' + route53_health_check: + state: absent + health_check_name: '{{ tiny_prefix }}-hc-latency-graph' + use_unique_names: true + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + measure_latency: true + ignore_errors: true + + - name: 'Delete TCP health check' + route53_health_check: + state: absent + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type }}' + ignore_errors: true + + - name: 'Delete HTTPS health check' + route53_health_check: + state: absent + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_https_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + ignore_errors: true + + - name: 'Delete Complex health check' + route53_health_check: + state: absent + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http_match }}' + fqdn: '{{ fqdn }}' + request_interval: '{{ request_interval }}' + ignore_errors: true + + - name: release EIP + ec2_eip: + state: absent + public_ip: '{{ ip_address }}' + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml new file mode 100644 index 000000000..e4d242a20 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml @@ -0,0 +1,303 @@ +--- +- block: + - name: 'Create HTTP health check for use in this test' + route53_health_check: + state: present + name: '{{ tiny_prefix }}-test-update-delete-by-id' + ip_address: '{{ ip_address }}' + port: '{{ port }}' + type: '{{ type_http }}' + resource_path: '{{ resource_path }}' + fqdn: '{{ fqdn }}' + use_unique_names: true + register: create_result + + - name: 'Check result - Create HTTP health check' + assert: + that: + - create_result is not failed + - create_result is changed + - '"route53:CreateHealthCheck" in create_result.resource_actions' + + - name: Get ID for health_checks created in above task + set_fact: + health_check_id: "{{ create_result.health_check.id }}" + + - name: Get health_check info + amazon.aws.route53_info: + query: health_check + health_check_id: "{{ health_check_id }}" + health_check_method: details + register: health_check_info + + # Update Health Check by ID Tests + - name: 'Update Health Check by ID - Update Port - check_mode' + route53_health_check: + id: "{{ health_check_id }}" + port: 8888 + register: update_result + check_mode: true + + - name: 'Check result - Update Health Check Port - check_mode' + assert: + that: + - update_result is not failed + - update_result is changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + - name: 'Update Health Check by ID - Update Port' + route53_health_check: + id: "{{ health_check_id }}" + port: 8888 + register: update_result + + - name: Get health_check info + amazon.aws.route53_info: + query: health_check + health_check_id: "{{ health_check_id }}" + health_check_method: details + register: health_check_info + + - name: 'Check result - Update Health Check Port' + assert: + that: + - update_result is not failed + - update_result is changed + - health_check_info.HealthCheck.HealthCheckConfig.Port == 8888 + + + - name: 'Update Health Check by ID - Update Port - idempotency - check_mode' + route53_health_check: + id: "{{ health_check_id }}" + port: 8888 + register: update_result + check_mode: true + + - name: 'Check result - Update Health Check Port - idempotency - check_mode' + assert: + that: + - update_result is not failed + - update_result is not changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + - name: 'Update Health Check by ID - Update Port - idempotency' + route53_health_check: + id: "{{ health_check_id }}" + port: 8888 + register: update_result + + - name: 'Check result - Update Health Check Port - idempotency' + assert: + that: + - update_result is not failed + - update_result is not changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + ## + - name: 'Update Health Check by ID - Update IP address and FQDN - check_mode' + route53_health_check: + id: "{{ health_check_id }}" + ip_address: 1.2.3.4 + fqdn: '{{ fqdn_1 }}' + register: update_result + check_mode: true + + - name: 'Check result - Update Health Check IP address and FQDN - check_mode' + assert: + that: + - update_result is not failed + - update_result is changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + - name: 'Update Health Check by ID - Update IP address and FQDN' + route53_health_check: + id: "{{ health_check_id }}" + ip_address: 1.2.3.4 + fqdn: '{{ fqdn_1 }}' + register: update_result + + - name: Get health_check info + amazon.aws.route53_info: + query: health_check + health_check_id: "{{ health_check_id }}" + health_check_method: details + register: health_check_info + + - name: 'Check result - Update Health Check IP address and FQDN' + assert: + that: + - update_result is not failed + - update_result is changed + - health_check_info.HealthCheck.HealthCheckConfig.IPAddress == '1.2.3.4' + - health_check_info.HealthCheck.HealthCheckConfig.FullyQualifiedDomainName == "{{ fqdn_1 }}" + + + - name: 'Update Health Check by ID - Update IP address and FQDN - idempotency - check_mode' + route53_health_check: + id: "{{ health_check_id }}" + ip_address: 1.2.3.4 + fqdn: '{{ fqdn_1 }}' + register: update_result + check_mode: true + + - name: 'Check result - Update Health Check IP address and FQDN - idempotency - check_mode' + assert: + that: + - update_result is not failed + - update_result is not changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + - name: 'Update Health Check by ID - Update IP address and FQDN - idempotency' + route53_health_check: + id: "{{ health_check_id }}" + ip_address: 1.2.3.4 + fqdn: '{{ fqdn_1 }}' + register: update_result + + - name: 'Check result - Update Health Check IP address and FQDN - idempotency' + assert: + that: + - update_result is not failed + - update_result is not changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + # Update Health Check (Port) by name + + - name: 'Update Health Check by name - Update Port - check_mode' + route53_health_check: + state: present + port: 8080 + type: '{{ type_http }}' + fqdn: '{{ fqdn }}' + health_check_name: '{{ tiny_prefix }}-test-update-delete-by-id' + use_unique_names: true + register: update_result + check_mode: true + + - name: 'Check result - Update Health Check Port - check_mode' + assert: + that: + - update_result is not failed + - update_result is changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + - name: 'Update Health Check by name - Update Port' + route53_health_check: + state: present + port: 8080 + type: '{{ type_http }}' + fqdn: '{{ fqdn }}' + health_check_name: '{{ tiny_prefix }}-test-update-delete-by-id' + use_unique_names: true + register: update_result + + - name: Get health_check info + amazon.aws.route53_info: + query: health_check + health_check_id: "{{ health_check_id }}" + health_check_method: details + register: health_check_info + + - name: 'Check result - Update Health Check Port' + assert: + that: + - update_result is not failed + - update_result is changed + - health_check_info.HealthCheck.HealthCheckConfig.Port == 8080 + + - name: 'Update Health Check by name - Update Port - idempotency - check_mode' + route53_health_check: + state: present + port: 8080 + type: '{{ type_http }}' + fqdn: '{{ fqdn }}' + health_check_name: '{{ tiny_prefix }}-test-update-delete-by-id' + use_unique_names: true + register: update_result + check_mode: true + + - name: 'Check result - Update Health Check Port - idempotency - check_mode' + assert: + that: + - update_result is not failed + - update_result is not changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + - name: 'Update Health Check by name - Update Port - idempotency' + route53_health_check: + state: present + port: 8080 + type: '{{ type_http }}' + fqdn: '{{ fqdn }}' + health_check_name: '{{ tiny_prefix }}-test-update-delete-by-id' + use_unique_names: true + register: update_result + + - name: 'Check result - Update Health Check Port - idempotency' + assert: + that: + - update_result is not failed + - update_result is not changed + - '"route53:UpdateHealthCheck" not in update_result.resource_actions' + + # Delete Health Check by ID Tests + - name: Delete Health check by ID - check_mode + route53_health_check: + state: absent + id: "{{ health_check_id }}" + register: delete_result + check_mode: true + + - name: 'Check result - Delete Health Check by ID -check_mode' + assert: + that: + - delete_result is not failed + - delete_result is changed + - '"route53:DeleteHealthCheck" not in delete_result.resource_actions' + + - name: Delete Health check by ID + route53_health_check: + state: absent + id: "{{ health_check_id }}" + register: delete_result + + - name: 'Check result - Delete Health Check by ID' + assert: + that: + - delete_result is not failed + - delete_result is changed + - '"route53:DeleteHealthCheck" in delete_result.resource_actions' + + - name: Delete Health check by ID - idempotency - check_mode + route53_health_check: + state: absent + id: "{{ health_check_id }}" + register: delete_result + check_mode: true + + - name: 'Check result - Delete Health Check by ID -idempotency -check_mode' + assert: + that: + - delete_result is not failed + - delete_result is not changed + - '"route53:DeleteHealthCheck" not in delete_result.resource_actions' + + - name: Delete Health check by ID - idempotency + route53_health_check: + state: absent + id: "{{ health_check_id }}" + register: delete_result + + - name: 'Check result - Delete Health Check by ID -idempotency' + assert: + that: + - delete_result is not failed + - delete_result is not changed + - '"route53:DeleteHealthCheck" not in delete_result.resource_actions' + + # cleanup + always: + - name: Delete Health check by ID + route53_health_check: + state: absent + id: "{{ health_check_id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/aliases b/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/tasks/main.yml new file mode 100644 index 000000000..4aea981d0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/tasks/main.yml @@ -0,0 +1,617 @@ +--- +- name: 'route53_zone integration tests' + collections: + - amazon.aws + module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + block: + + # ============================================================ + + - name: Create VPC for use in testing + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + cidr_block: 10.22.32.0/23 + tags: + Name: Ansible ec2_instance Testing VPC + tenancy: default + register: testing_vpc + + # ============================================================ + - name: Create a public zone + route53_zone: + zone: "{{ resource_prefix }}.public" + comment: original comment + state: present + tags: + TestTag: "{{ resource_prefix }}" + another_tag: "{{ resource_prefix }} again" + register: output + + - assert: + that: + - output.changed + - output.comment == 'original comment' + - output.name == '{{ resource_prefix }}.public.' + - output.tags.TestTag == '{{ resource_prefix }}' + - output.tags.another_tag == '{{ resource_prefix }} again' + - not output.private_zone + + # ============================================================ + - name: Create a public zone (CHECK MODE) + route53_zone: + zone: "{{ resource_prefix }}.check.public" + comment: original comment + state: present + tags: + TestTag: "{{ resource_prefix }}" + another_tag: "{{ resource_prefix }} again" + register: output + check_mode: yes + + - assert: + that: + - output.changed + - output.comment == 'original comment' + - output.name == '{{ resource_prefix }}.check.public.' + - output.tags.TestTag == '{{ resource_prefix }}' + - output.tags.another_tag == '{{ resource_prefix }} again' + - not output.private_zone + + # ============================================================ + - name: Do an idemptotent update of a public zone + route53_zone: + zone: "{{ resource_prefix }}.public" + comment: original comment + state: present + tags: + TestTag: "{{ resource_prefix }}" + another_tag: "{{ resource_prefix }} again" + register: output + + - assert: + that: + - not output.changed + - output.comment == 'original comment' + - output.name == '{{ resource_prefix }}.public.' + - output.tags.TestTag == '{{ resource_prefix }}' + - output.tags.another_tag == '{{ resource_prefix }} again' + - not output.private_zone + + - name: Do an idemptotent update of a public zone (CHECK MODE) + route53_zone: + zone: "{{ resource_prefix }}.public" + comment: original comment + state: present + tags: + TestTag: "{{ resource_prefix }}" + another_tag: "{{ resource_prefix }} again" + register: output + check_mode: yes + + - assert: + that: + - not output.changed + - output.comment == 'original comment' + - output.name == '{{ resource_prefix }}.public.' + - output.tags.TestTag == '{{ resource_prefix }}' + - output.tags.another_tag == '{{ resource_prefix }} again' + - not output.private_zone + + # ============================================================ + - name: Modify tags on a public zone + route53_zone: + zone: "{{ resource_prefix }}.public" + comment: original comment + state: present + tags: + AnotherTag: "{{ resource_prefix }}.anothertag" + purge_tags: true + register: output + + - assert: + that: + - output.changed + - "'TestTag' not in output.tags" + - output.tags.AnotherTag == '{{ resource_prefix }}.anothertag' + + # ============================================================ + - name: Update comment and remove tags of a public zone + route53_zone: + zone: "{{ resource_prefix }}.public" + comment: updated comment + state: present + purge_tags: true + tags: {} + register: output + + - assert: + that: + - output.changed + - output.result.comment == "updated comment" + - not output.tags + + - name: Update comment and remove tags of a public zone (CHECK MODE) + route53_zone: + zone: "{{ resource_prefix }}.public" + comment: updated comment for check + state: present + purge_tags: true + tags: {} + register: output + check_mode: yes + + - assert: + that: + - output.changed + - output.result.comment == "updated comment for check" + - not output.tags + + # ============================================================ + - name: Delete public zone (CHECK MODE) + route53_zone: + zone: "{{ resource_prefix }}.public" + state: absent + register: output + check_mode: yes + + - assert: + that: + - output.changed + - "'Successfully deleted' in output.result" + + - name: Delete public zone + route53_zone: + zone: "{{ resource_prefix }}.public" + state: absent + register: output + + - assert: + that: + - output.changed + - "'Successfully deleted' in output.result" + + # ============================================================ + - name: Create a private zone (CHECK MODE) + route53_zone: + vpc_id: "{{ testing_vpc.vpc.id }}" + vpc_region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + comment: original comment + state: present + register: output + check_mode: yes + + - assert: + that: + - output.changed + + - name: Create a private zone + route53_zone: + vpc_id: "{{ testing_vpc.vpc.id }}" + vpc_region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + comment: original comment + state: present + register: output + + - assert: + that: + - output.changed + # ============================================================ + - name: Idemptotent update a private zone + route53_zone: + vpc_id: "{{ testing_vpc.vpc.id }}" + vpc_region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + comment: original comment + state: present + register: output + + - assert: + that: + - not output.changed + - "'There is already a private hosted zone in the same region with the same VPC' in output.msg" + + - name: Idemptotent update a private zone (CHECK MODE) + route53_zone: + vpc_id: "{{ testing_vpc.vpc.id }}" + vpc_region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + comment: original comment + state: present + register: output + check_mode: yes + + - assert: + that: + - not output.changed + - "'There is already a private hosted zone in the same region with the same VPC' in output.msg" + + # ============================================================ + - name: Update private zone comment + route53_zone: + vpc_id: "{{ testing_vpc.vpc.id }}" + vpc_region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + comment: updated_comment + state: present + register: output + + - assert: + that: + - output.changed + - output.result.comment == "updated_comment" + + - name: Update private zone comment (CHECK MODE) + route53_zone: + vpc_id: "{{ testing_vpc.vpc.id }}" + vpc_region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + comment: updated_comment check + state: present + register: output + check_mode: yes + + - assert: + that: + - output.changed + - output.result.comment == "updated_comment check" + + # ============================================================ + - name: Try to delete private zone without setting vpc_id and vpc_region + route53_zone: + zone: "{{ resource_prefix }}.private" + state: absent + register: output + + - assert: + that: + - not output.changed + - "output.result == 'No zone to delete.'" + + - name: Try to delete private zone without setting vpc_id and vpc_region (CHECK MODE) + route53_zone: + zone: "{{ resource_prefix }}.private" + state: absent + register: output + check_mode: yes + + - assert: + that: + - not output.changed + - "output.result == 'No zone to delete.'" + + # ============================================================ + - name: Try to delete a public zone that does not exists + route53_zone: + zone: "{{ resource_prefix }}.publicfake" + comment: original comment + state: absent + register: output + + - assert: + that: + - not output.changed + - "output.result == 'No zone to delete.'" + + - name: Try to delete a public zone that does not exists (CHECK MODE) + route53_zone: + zone: "{{ resource_prefix }}.publicfake" + comment: original comment + state: absent + register: output + check_mode: yes + + - assert: + that: + - not output.changed + - "output.result == 'No zone to delete.'" + + # ============================================================ + - name: Delete private zone (CHECK MODE) + route53_zone: + vpc_id: "{{ testing_vpc.vpc.id }}" + vpc_region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + state: absent + register: output + check_mode: yes + + - assert: + that: + - output.changed + - "'Successfully deleted' in output.result" + + - name: Delete private zone + route53_zone: + vpc_id: "{{ testing_vpc.vpc.id }}" + vpc_region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + state: absent + register: output + + - assert: + that: + - output.changed + - "'Successfully deleted' in output.result" + + # ============================================================ + - name: Create a private zone (new format) (CHECK MODE) + route53_zone: + vpcs: + - id: "{{ testing_vpc.vpc.id }}" + region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + comment: original comment + state: present + register: output + check_mode: yes + + - assert: + that: + - output.changed + + - name: Create a private zone (new format) + route53_zone: + vpcs: + - id: "{{ testing_vpc.vpc.id }}" + region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + comment: original comment + state: present + register: output + + - assert: + that: + - output.changed + + # ============================================================ + - name: Idemptotent update a private zone (new format) (CHECK MODE) + route53_zone: + vpcs: + - id: "{{ testing_vpc.vpc.id }}" + region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + comment: original comment + state: present + register: output + check_mode: yes + + - assert: + that: + - not output.changed + - "'There is already a private hosted zone in the same region with the same VPC' in output.msg" + + - name: Idemptotent update a private zone (new format) + route53_zone: + vpcs: + - id: "{{ testing_vpc.vpc.id }}" + region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + comment: original comment + state: present + register: output + + - assert: + that: + - not output.changed + - "'There is already a private hosted zone in the same region with the same VPC' in output.msg" + + # ============================================================ + - name: Update a private zone comment (new format) (CHECK MODE) + route53_zone: + vpcs: + - id: "{{ testing_vpc.vpc.id }}" + region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + comment: new comment + state: present + register: output + check_mode: yes + + - assert: + that: + - output.changed + + - name: Update a private zone comment (new format) + route53_zone: + vpcs: + - id: "{{ testing_vpc.vpc.id }}" + region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + comment: new comment + state: present + register: output + + - assert: + that: + - output.changed + + # ============================================================ + - name: Delete private zone (new format) (CHECK MODE) + route53_zone: + vpcs: + - id: "{{ testing_vpc.vpc.id }}" + region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + state: absent + register: output + check_mode: yes + + - assert: + that: + - output.changed + - "'Successfully deleted' in output.result" + + - name: Delete private zone (new format) + route53_zone: + vpcs: + - id: "{{ testing_vpc.vpc.id }}" + region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + state: absent + register: output + + # ============================================================ + - block: + - name: Create second VPC for use in testing + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc2" + cidr_block: 10.22.34.0/23 + tags: + Name: Ansible ec2_instance Testing VPC 2 + tenancy: default + register: second_testing_vpc + + - name: Create a private zone with multiple VPCs (CHECK MODE) + route53_zone: + vpcs: + - id: "{{ testing_vpc.vpc.id }}" + region: "{{ aws_region }}" + - id: "{{ second_testing_vpc.vpc.id }}" + region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + comment: original comment + state: present + register: output + check_mode: yes + + - assert: + that: + - output.changed + + - name: Create a private zone with multiple VPCs + route53_zone: + vpcs: + - id: "{{ testing_vpc.vpc.id }}" + region: "{{ aws_region }}" + - id: "{{ second_testing_vpc.vpc.id }}" + region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + comment: original comment + state: present + register: output + + - assert: + that: + - output.changed + - output.vpc_id == testing_vpc.vpc.id # The first one for backwards compatibility + - output.vpc_region == aws_region + - (output.vpcs | length) == 2 + - output.vpcs.1.id == second_testing_vpc.vpc.id + - output.vpcs.1.region == aws_region + + # ============================================================ + - name: Delete private zone with multiple VPCs (CHECK MODE) + route53_zone: + vpcs: + - id: "{{ testing_vpc.vpc.id }}" + region: "{{ aws_region }}" + - id: "{{ second_testing_vpc.vpc.id }}" + region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + state: absent + register: output + check_mode: yes + + - assert: + that: + - output.changed + - "'Successfully deleted' in output.result" + + - name: Delete private zone with multiple VPCs + route53_zone: + vpcs: + - id: "{{ testing_vpc.vpc.id }}" + region: "{{ aws_region }}" + - id: "{{ second_testing_vpc.vpc.id }}" + region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + state: absent + register: output + + - assert: + that: + - output.changed + - "'Successfully deleted' in output.result" + + always: + - name: Delete second VPC for use in testing + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc2" + cidr_block: 10.22.34.0/23 + state: absent + + # ============================================================ + - name: Create a public zone + route53_zone: + zone: "{{ resource_prefix }}.public2" + comment: this is an example + state: present + register: new_zone + + # Delete zone using its id + - name: Delete zone using attribute hosted_zone_id (CHECK MODE) + route53_zone: + zone: "{{ resource_prefix }}.public2" + hosted_zone_id: "{{new_zone.zone_id}}" + state: absent + register: output + check_mode: yes + + - assert: + that: + - output.changed + - "'Successfully deleted' in output.result" + + - name: Delete zone using attribute hosted_zone_id + route53_zone: + zone: "{{ resource_prefix }}.public2" + hosted_zone_id: "{{new_zone.zone_id}}" + state: absent + register: output + + - assert: + that: + - output.changed + - "'Successfully deleted' in output.result" + + # ============================================================ + always: + - name: Ensure public zone is deleted + route53_zone: + zone: "{{ item }}" + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + with_items: + - "{{ resource_prefix }}.public" + - "{{ resource_prefix }}.public2" + + - name: Ensure private zone is deleted + route53_zone: + vpc_id: "{{ testing_vpc.vpc.id }}" + vpc_region: "{{ aws_region }}" + zone: "{{ resource_prefix }}.private" + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + + - name: remove the VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + cidr_block: 10.22.32.0/23 + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/aliases b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/inventory b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/inventory new file mode 100644 index 000000000..db31e4a9b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/inventory @@ -0,0 +1,17 @@ +[tests] +ownership_controls +missing +simple +complex +dotted +tags +encryption_kms +encryption_bucket_key +encryption_sse +public_access +acl +object_lock + +[all:vars] +ansible_connection=local +ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/main.yml new file mode 100644 index 000000000..22fc0d64f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/main.yml @@ -0,0 +1,12 @@ +--- +# Beware: most of our tests here are run in parallel. +# To add new tests you'll need to add a new host to the inventory and a matching +# '{{ inventory_hostname }}'.yml file in roles/s3_bucket/tasks/ + +# VPC should get cleaned up once all hosts have run +- hosts: all + gather_facts: no + strategy: free + #serial: 10 + roles: + - s3_bucket diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/meta/main.yml new file mode 100644 index 000000000..67c81ac7f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - role: setup_botocore_pip diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml new file mode 100644 index 000000000..ef5c13907 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml @@ -0,0 +1,2 @@ +--- +bucket_name: '{{ resource_prefix }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml new file mode 100644 index 000000000..f924af173 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml @@ -0,0 +1,68 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - set_fact: + local_bucket_name: "{{ bucket_name | hash('md5')}}acl" + + - name: 'Create a simple bucket' + s3_bucket: + name: '{{ local_bucket_name }}' + object_ownership: BucketOwnerPreferred + public_access: + block_public_acls: true + block_public_policy: true + ignore_public_acls: true + restrict_public_buckets: true + + - name: 'Update bucket ACL, new value = private' + s3_bucket: + name: '{{ local_bucket_name }}' + acl: private + state: present + register: private_acl + + - assert: + that: + - private_acl.changed + + - name: 'Update bucket ACL, new value = public-read' + s3_bucket: + name: '{{ local_bucket_name }}' + acl: public-read + state: present + ignore_errors: true + register: public_read_acl + + - assert: + that: + - public_read_acl is failed + + - name: 'Update bucket ACL, new value = public-read' + s3_bucket: + name: '{{ local_bucket_name }}' + acl: public-read + state: present + public_access: + block_public_acls: false + block_public_policy: true + ignore_public_acls: true + restrict_public_buckets: true + ignore_errors: true + register: public_read_acl + + - assert: + that: + - public_read_acl.changed + + # ============================================================ + always: + - name: Ensure all buckets are deleted + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml new file mode 100644 index 000000000..8b8a8bdca --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml @@ -0,0 +1,150 @@ +--- +- block: + - set_fact: + local_bucket_name: "{{ bucket_name | hash('md5')}}complex" + - name: 'Create more complex s3_bucket' + s3_bucket: + name: "{{ local_bucket_name }}" + state: present + policy: "{{ lookup('template','policy.json') }}" + requester_pays: yes + versioning: yes + public_access: + block_public_acls: false + tags: + example: tag1 + another: tag2 + register: output + + - assert: + that: + - output is changed + - output.name == '{{ local_bucket_name }}' + - output.requester_pays + - output.versioning.MfaDelete == 'Disabled' + - output.versioning.Versioning == 'Enabled' + - output.tags.example == 'tag1' + - output.tags.another == 'tag2' + - output.policy.Statement[0].Action == 's3:GetObject' + - output.policy.Statement[0].Effect == 'Allow' + - output.policy.Statement[0].Principal == '*' + - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ local_bucket_name }}/*' + - output.policy.Statement[0].Sid == 'AddPerm' + + # ============================================================ + + - name: 'Pause to help with s3 bucket eventual consistency' + wait_for: + timeout: 10 + delegate_to: localhost + + - name: 'Try to update the same complex s3_bucket' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + policy: "{{ lookup('template','policy.json') }}" + requester_pays: yes + versioning: yes + tags: + example: tag1 + another: tag2 + register: output + + - assert: + that: + - output is not changed + - output.name == '{{ local_bucket_name }}' + - output.requester_pays + - output.versioning.MfaDelete == 'Disabled' + - output.versioning.Versioning == 'Enabled' + - output.tags.example == 'tag1' + - output.tags.another == 'tag2' + - output.policy.Statement[0].Action == 's3:GetObject' + - output.policy.Statement[0].Effect == 'Allow' + - output.policy.Statement[0].Principal == '*' + - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ local_bucket_name }}/*' + - output.policy.Statement[0].Sid == 'AddPerm' + + # ============================================================ + - name: 'Update bucket policy on complex bucket' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + policy: "{{ lookup('template','policy-updated.json') }}" + requester_pays: yes + versioning: yes + tags: + example: tag1 + another: tag2 + register: output + + - assert: + that: + - output is changed + - output.policy.Statement[0].Action == 's3:GetObject' + - output.policy.Statement[0].Effect == 'Deny' + - output.policy.Statement[0].Principal.AWS == '*' + - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ local_bucket_name }}/*' + - output.policy.Statement[0].Sid == 'AddPerm' + + # ============================================================ + + - name: 'Pause to help with s3 bucket eventual consistency' + wait_for: + timeout: 10 + delegate_to: localhost + + - name: Update attributes for s3_bucket + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + policy: "{{ lookup('template','policy.json') }}" + requester_pays: no + versioning: no + tags: + example: tag1-udpated + another: tag2 + register: output + + - assert: + that: + - output is changed + - output.name == '{{ local_bucket_name }}' + - not output.requester_pays + - output.versioning.MfaDelete == 'Disabled' + - output.versioning.Versioning in ['Suspended', 'Disabled'] + - output.tags.example == 'tag1-udpated' + - output.tags.another == 'tag2' + - output.policy.Statement[0].Action == 's3:GetObject' + - output.policy.Statement[0].Effect == 'Allow' + - output.policy.Statement[0].Principal == '*' + - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ local_bucket_name }}/*' + - output.policy.Statement[0].Sid == 'AddPerm' + + - name: 'Delete complex test bucket' + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + register: output + + - assert: + that: + - output is changed + + - name: 'Re-delete complex test bucket' + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + register: output + + - assert: + that: + - output is not changed + + # ============================================================ + always: + - name: 'Ensure all buckets are deleted' + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml new file mode 100644 index 000000000..1461b51bc --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml @@ -0,0 +1,55 @@ +--- +- block: + - name: 'Ensure bucket_name contains a .' + set_fact: + local_bucket_name: "{{ bucket_name | hash('md5')}}.dotted" + + + # ============================================================ + # + - name: 'Create bucket with dot in name' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + register: output + + - assert: + that: + - output is changed + - output.name == '{{ local_bucket_name }}' + + + # ============================================================ + + - name: 'Pause to help with s3 bucket eventual consistency' + wait_for: + timeout: 10 + delegate_to: localhost + + - name: 'Delete s3_bucket with dot in name' + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + register: output + + - assert: + that: + - output is changed + + - name: 'Re-delete s3_bucket with dot in name' + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + register: output + + - assert: + that: + - output is not changed + + # ============================================================ + always: + - name: 'Ensure all buckets are deleted' + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_bucket_key.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_bucket_key.yml new file mode 100644 index 000000000..66a54c1e0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_bucket_key.yml @@ -0,0 +1,103 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: Set facts for encryption_bucket_key test + set_fact: + local_bucket_name: "{{ bucket_name | hash('md5') }}-bucket-key" + # ============================================================ + + - name: "Create a simple bucket" + s3_bucket: + name: "{{ local_bucket_name }}" + state: present + register: output + + - name: "Enable aws:kms encryption with KMS master key" + s3_bucket: + name: "{{ local_bucket_name }}" + state: present + encryption: "aws:kms" + register: output + + - name: "Enable bucket key for bucket with aws:kms encryption" + s3_bucket: + name: "{{ local_bucket_name }}" + state: present + encryption: "aws:kms" + bucket_key_enabled: true + register: output + + - name: "Assert for 'Enable bucket key for bucket with aws:kms encryption'" + assert: + that: + - output.changed + - output.encryption + + - name: "Re-enable bucket key for bucket with aws:kms encryption (idempotent)" + s3_bucket: + name: "{{ local_bucket_name }}" + encryption: "aws:kms" + bucket_key_enabled: true + register: output + + - name: "Assert for 'Re-enable bucket key for bucket with aws:kms encryption (idempotent)'" + assert: + that: + - not output.changed + - output.encryption + + ## # ============================================================ + ## + ## AWS S3 no longer supports disabling S3 encryption + ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/default-encryption-faq.html + ## + ## - name: Disable encryption from bucket + ## s3_bucket: + ## name: "{{ local_bucket_name }}" + ## encryption: none + ## bucket_key_enabled: false + ## register: output + ## + ## - name: Assert for 'Disable encryption from bucket' + ## assert: + ## that: + ## - output.changed + ## - not output.encryption + ## + ## - name: Disable encryption from bucket (idempotent) + ## s3_bucket: + ## name: "{{ local_bucket_name }}" + ## bucket_key_enabled: true + ## register: output + ## + ## - name: Assert for 'Disable encryption from bucket (idempotent)' + ## assert: + ## that: + ## - output is not changed + ## - not output.encryption + ## + ## # ============================================================ + + - name: Delete encryption test s3 bucket + s3_bucket: + name: "{{ local_bucket_name }}" + state: absent + register: output + + - name: Assert for 'Delete encryption test s3 bucket' + assert: + that: + - output.changed + + # ============================================================ + always: + - name: Ensure all buckets are deleted + s3_bucket: + name: "{{ local_bucket_name }}" + state: absent + failed_when: false diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml new file mode 100644 index 000000000..75cdb4c6f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml @@ -0,0 +1,92 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - set_fact: + local_bucket_name: "{{ bucket_name | hash('md5')}}e-kms" + # ============================================================ + + - name: 'Create a simple bucket' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + register: output + + - name: 'Enable aws:kms encryption with KMS master key' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + encryption: "aws:kms" + register: output + + - assert: + that: + - output.changed + - output.encryption + - output.encryption.SSEAlgorithm == 'aws:kms' + + - name: 'Re-enable aws:kms encryption with KMS master key (idempotent)' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + encryption: "aws:kms" + register: output + + - assert: + that: + - not output.changed + - output.encryption + - output.encryption.SSEAlgorithm == 'aws:kms' + + ## # ============================================================ + ## + ## AWS S3 no longer supports disabling S3 encryption + ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/default-encryption-faq.html + ## + ## - name: Disable encryption from bucket + ## s3_bucket: + ## name: '{{ local_bucket_name }}' + ## state: present + ## encryption: "none" + ## register: output + ## + ## - assert: + ## that: + ## - output.changed + ## - not output.encryption + ## + ## - name: Disable encryption from bucket + ## s3_bucket: + ## name: '{{ local_bucket_name }}' + ## state: present + ## encryption: "none" + ## register: output + ## + ## - assert: + ## that: + ## - output is not changed + ## - not output.encryption + ## + ## # ============================================================ + + - name: Delete encryption test s3 bucket + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + register: output + + - assert: + that: + - output.changed + + # ============================================================ + always: + - name: Ensure all buckets are deleted + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml new file mode 100644 index 000000000..60ee26009 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml @@ -0,0 +1,93 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - set_fact: + local_bucket_name: "{{ bucket_name | hash('md5')}}e-sse" + # ============================================================ + + - name: 'Create a simple bucket' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + register: output + + - name: 'Enable AES256 encryption' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + encryption: 'AES256' + register: output + + - assert: + that: + # SSE is now enabled by default + # - output.changed + - output.encryption + - output.encryption.SSEAlgorithm == 'AES256' + + - name: 'Re-enable AES256 encryption (idempotency)' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + encryption: 'AES256' + register: output + + - assert: + that: + - not output.changed + - output.encryption + - output.encryption.SSEAlgorithm == 'AES256' + + ## # ============================================================ + ## + ## AWS S3 no longer supports disabling S3 encryption + ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/default-encryption-faq.html + ## + ## - name: Disable encryption from bucket + ## s3_bucket: + ## name: '{{ local_bucket_name }}' + ## state: present + ## encryption: "none" + ## register: output + ## + ## - assert: + ## that: + ## - output.changed + ## - not output.encryption + ## + ## - name: Disable encryption from bucket + ## s3_bucket: + ## name: '{{ local_bucket_name }}' + ## state: present + ## encryption: "none" + ## register: output + ## + ## - assert: + ## that: + ## - output is not changed + ## - not output.encryption + ## + ## # ============================================================ + + - name: Delete encryption test s3 bucket + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + register: output + + - assert: + that: + - output.changed + + # ============================================================ + always: + - name: Ensure all buckets are deleted + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml new file mode 100644 index 000000000..8eba03ba1 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml @@ -0,0 +1,20 @@ +--- +# Beware: most of our tests here are run in parallel. +# To add new tests you'll need to add a new host to the inventory and a matching +# '{{ inventory_hostname }}'.yml file in roles/ec2_roles/tasks/ +# +# ############################################################################### + +- name: "Wrap up all tests and setup AWS credentials" + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - debug: + msg: "{{ inventory_hostname }} start: {{ lookup('pipe','date') }}" + - include_tasks: '{{ inventory_hostname }}.yml' + - debug: + msg: "{{ inventory_hostname }} finish: {{ lookup('pipe','date') }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml new file mode 100644 index 000000000..eaac3ea79 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml @@ -0,0 +1,28 @@ +--- +- name: 'Attempt to delete non-existent buckets' + block: + - set_fact: + local_bucket_name: "{{ bucket_name | hash('md5')}}-missing" + # ============================================================ + # + # While in theory the 'simple' test case covers this there are + # ways in which eventual-consistency could catch us out. + # + - name: 'Delete non-existstent s3_bucket (never created)' + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + register: output + + - assert: + that: + - output is success + - output is not changed + + # ============================================================ + always: + - name: 'Ensure all buckets are deleted' + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/object_lock.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/object_lock.yml new file mode 100644 index 000000000..9140a566b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/object_lock.yml @@ -0,0 +1,131 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - set_fact: + local_bucket_name: "{{ bucket_name | hash('md5')}}-objectlock" + + # ============================================================ + + - name: 'Create a simple bucket' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + register: output + + - assert: + that: + - output.changed + - not output.object_lock_enabled + + - name: 'Re-disable object lock (idempotency)' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + object_lock_enabled: false + register: output + + - assert: + that: + - not output.changed + - not output.object_lock_enabled + + - name: 'Enable object lock' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + object_lock_enabled: true + register: output + ignore_errors: true + + - assert: + that: + - output is failed + + - name: Delete test s3 bucket + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + register: output + + - assert: + that: + - output.changed + + # ============================================================ + + - name: 'Create a bucket with object lock enabled' + s3_bucket: + name: '{{ local_bucket_name }}-2' + state: present + object_lock_enabled: true + register: output + + - assert: + that: + - output.changed + - output.object_lock_enabled + + - name: 'Disable object lock' + s3_bucket: + name: '{{ local_bucket_name }}-2' + state: present + object_lock_enabled: false + register: output + ignore_errors: true + + - assert: + that: + - output is failed + + - name: 'Re-Enable object lock (idempotency)' + s3_bucket: + name: '{{ local_bucket_name }}-2' + state: present + object_lock_enabled: true + register: output + + - assert: + that: + - not output.changed + - output.object_lock_enabled + + - name: 'Touch bucket with object lock enabled (idempotency)' + s3_bucket: + name: '{{ local_bucket_name }}-2' + state: present + object_lock_enabled: true + register: output + + - assert: + that: + - not output.changed + - output.object_lock_enabled + + - name: Delete test s3 bucket + s3_bucket: + name: '{{ local_bucket_name }}-2' + state: absent + register: output + + - assert: + that: + - output.changed + + # ============================================================ + always: + - name: Ensure all buckets are deleted + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + ignore_errors: yes + + - name: Ensure all buckets are deleted + s3_bucket: + name: '{{ local_bucket_name }}-2' + state: absent + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/ownership_controls.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/ownership_controls.yml new file mode 100644 index 000000000..683ff0659 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/ownership_controls.yml @@ -0,0 +1,143 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - set_fact: + local_bucket_name: "{{ bucket_name | hash('md5')}}ownership" + + - name: 'Create a simple bucket bad value for ownership controls' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + object_ownership: default + ignore_errors: true + register: output + + - assert: + that: + - output.failed + + - name: 'Create bucket with object_ownership set to object_writer' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + ignore_errors: true + register: output + + - assert: + that: + - output.changed + - not output.object_ownership|bool + + - name: delete s3 bucket + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + + - name: 'create s3 bucket with object ownership controls' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + object_ownership: ObjectWriter + register: output + + - assert: + that: + - output.changed + - output.object_ownership + - output.object_ownership == 'ObjectWriter' + + - name: 'update s3 bucket ownership preferred controls' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + object_ownership: BucketOwnerPreferred + register: output + + - assert: + that: + - output.changed + - output.object_ownership + - output.object_ownership == 'BucketOwnerPreferred' + + - name: 'test idempotency update s3 bucket ownership preferred controls' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + object_ownership: BucketOwnerPreferred + register: output + + - assert: + that: + - output.changed is false + - output.object_ownership + - output.object_ownership == 'BucketOwnerPreferred' + + - name: 'update s3 bucket ownership enforced controls' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + object_ownership: BucketOwnerEnforced + register: output + + - assert: + that: + - output.changed + - output.object_ownership + - output.object_ownership == 'BucketOwnerEnforced' + + - name: 'test idempotency update s3 bucket ownership preferred controls' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + object_ownership: BucketOwnerEnforced + register: output + + - assert: + that: + - output.changed is false + - output.object_ownership + - output.object_ownership == 'BucketOwnerEnforced' + + - name: 'delete s3 bucket ownership controls' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + delete_object_ownership: true + register: output + + - assert: + that: + - output.changed + - not output.object_ownership|bool + + - name: 'delete s3 bucket ownership controls once again (idempotency)' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + delete_object_ownership: true + register: idempotency + + - assert: + that: + - not idempotency.changed + - not idempotency.object_ownership|bool + + # ============================================================ + always: + - name: delete s3 bucket ownership controls + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + delete_object_ownership: true + ignore_errors: yes + + - name: Ensure all buckets are deleted + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml new file mode 100644 index 000000000..743a2ce4d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml @@ -0,0 +1,115 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - set_fact: + local_bucket_name: "{{ bucket_name | hash('md5')}}-public" + # ============================================================ + + - name: 'Create a simple bucket with public access block configuration' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + public_access: + block_public_acls: true + block_public_policy: true + ignore_public_acls: true + restrict_public_buckets: true + register: output + + - assert: + that: + - output.changed + - output.public_access_block + - output.public_access_block.BlockPublicAcls + - output.public_access_block.BlockPublicPolicy + - output.public_access_block.IgnorePublicAcls + - output.public_access_block.RestrictPublicBuckets + + - name: 'Re-configure public access block configuration' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + public_access: + block_public_acls: true + block_public_policy: false + ignore_public_acls: true + restrict_public_buckets: false + register: output + + - assert: + that: + - output.changed + - output.public_access_block + - output.public_access_block.BlockPublicAcls + - not output.public_access_block.BlockPublicPolicy + - output.public_access_block.IgnorePublicAcls + - not output.public_access_block.RestrictPublicBuckets + + - name: 'Re-configure public access block configuration (idempotency)' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + public_access: + block_public_acls: true + block_public_policy: false + ignore_public_acls: true + restrict_public_buckets: false + register: output + + - assert: + that: + - output is not changed + - output.public_access_block + - output.public_access_block.BlockPublicAcls + - not output.public_access_block.BlockPublicPolicy + - output.public_access_block.IgnorePublicAcls + - not output.public_access_block.RestrictPublicBuckets + + - name: 'Delete public access block configuration' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + delete_public_access: true + register: output + + - assert: + that: + - output is changed + - not output.public_access_block|bool + + - name: 'Delete public access block configuration (idempotency)' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + delete_public_access: true + register: output + + - assert: + that: + - output is not changed + - not output.public_access_block|bool + + # ============================================================ + + - name: Delete testing s3 bucket + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + register: output + + - assert: + that: + - output.changed + + # ============================================================ + always: + - name: Ensure all buckets are deleted + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml new file mode 100644 index 000000000..7a2f3a4e2 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml @@ -0,0 +1,67 @@ +--- +- name: 'Run simple tests' + block: + - set_fact: + local_bucket_name: "{{ bucket_name | hash('md5')}}-simple" + # Note: s3_bucket doesn't support check_mode + + # ============================================================ + - name: 'Create a simple s3_bucket' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + register: output + + - assert: + that: + - output is success + - output is changed + - output.name == '{{ local_bucket_name }}' + - not output.requester_pays + - output.public_access is undefined + + # ============================================================ + - name: 'Try to update the simple bucket with the same values' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + register: output + + - assert: + that: + - output is success + - output is not changed + - output.name == '{{ local_bucket_name }}' + - not output.requester_pays + + # ============================================================ + - name: 'Delete the simple s3_bucket' + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + register: output + + - assert: + that: + - output is success + - output is changed + + # ============================================================ + - name: 'Re-delete the simple s3_bucket (idempotency)' + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + register: output + + - assert: + that: + - output is success + - output is not changed + + # ============================================================ + always: + - name: 'Ensure all buckets are deleted' + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml new file mode 100644 index 000000000..1df4e5c9c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml @@ -0,0 +1,257 @@ +--- +- name: 'Run tagging tests' + block: + - set_fact: + local_bucket_name: "{{ bucket_name | hash('md5')}}-tags" + # ============================================================ + - name: 'Create simple s3_bucket for testing tagging' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + register: output + + - assert: + that: + - output.changed + - output.name == '{{ local_bucket_name }}' + + # ============================================================ + + - name: 'Add tags to s3 bucket' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + tags: + example: tag1 + another: tag2 + register: output + + - assert: + that: + - output.changed + - output.name == '{{ local_bucket_name }}' + - output.tags.example == 'tag1' + - output.tags.another == 'tag2' + + - name: 'Re-Add tags to s3 bucket' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + tags: + example: tag1 + another: tag2 + register: output + + - assert: + that: + - output is not changed + - output.name == '{{ local_bucket_name }}' + - output.tags.example == 'tag1' + - output.tags.another == 'tag2' + + # ============================================================ + + - name: Remove a tag from an s3_bucket + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + tags: + example: tag1 + register: output + + - assert: + that: + - output.changed + - output.name == '{{ local_bucket_name }}' + - output.tags.example == 'tag1' + - "'another' not in output.tags" + + - name: Re-remove the tag from an s3_bucket + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + tags: + example: tag1 + register: output + + - assert: + that: + - output is not changed + - output.name == '{{ local_bucket_name }}' + - output.tags.example == 'tag1' + - "'another' not in output.tags" + + ## ============================================================ + + #- name: 'Pause to help with s3 bucket eventual consistency' + # wait_for: + # timeout: 10 + # delegate_to: localhost + + ## ============================================================ + + - name: 'Add a tag for s3_bucket with purge_tags False' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + purge_tags: no + tags: + anewtag: here + register: output + + - assert: + that: + - output.changed + - output.name == '{{ local_bucket_name }}' + - output.tags.example == 'tag1' + - output.tags.anewtag == 'here' + + - name: 'Re-add a tag for s3_bucket with purge_tags False' + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + purge_tags: no + tags: + anewtag: here + register: output + + - assert: + that: + - output is not changed + - output.name == '{{ local_bucket_name }}' + - output.tags.example == 'tag1' + - output.tags.anewtag == 'here' + + ## ============================================================ + + #- name: 'Pause to help with s3 bucket eventual consistency' + # wait_for: + # timeout: 10 + # delegate_to: localhost + + ## ============================================================ + + - name: Update a tag for s3_bucket with purge_tags False + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + purge_tags: no + tags: + anewtag: next + register: output + + - assert: + that: + - output.changed + - output.name == '{{ local_bucket_name }}' + - output.tags.example == 'tag1' + - output.tags.anewtag == 'next' + + - name: Re-update a tag for s3_bucket with purge_tags False + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + purge_tags: no + tags: + anewtag: next + register: output + + - assert: + that: + - output is not changed + - output.name == '{{ local_bucket_name }}' + - output.tags.example == 'tag1' + - output.tags.anewtag == 'next' + + ## ============================================================ + + #- name: 'Pause to help with s3 bucket eventual consistency' + # wait_for: + # timeout: 10 + # delegate_to: localhost + + ## ============================================================ + + - name: Pass empty tags dict for s3_bucket with purge_tags False + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + purge_tags: no + tags: {} + register: output + + - assert: + that: + - output is not changed + - output.name == '{{ local_bucket_name }}' + - output.tags.example == 'tag1' + - output.tags.anewtag == 'next' + + ## ============================================================ + + #- name: 'Pause to help with s3 bucket eventual consistency' + # wait_for: + # timeout: 10 + # delegate_to: localhost + + ## ============================================================ + + - name: Do not specify any tag to ensure previous tags are not removed + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + register: output + + - assert: + that: + - not output.changed + - output.name == '{{ local_bucket_name }}' + - output.tags.example == 'tag1' + + # ============================================================ + + - name: Remove all tags + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + tags: {} + register: output + + - assert: + that: + - output.changed + - output.name == '{{ local_bucket_name }}' + - output.tags == {} + + - name: Re-remove all tags + s3_bucket: + name: '{{ local_bucket_name }}' + state: present + tags: {} + register: output + + - assert: + that: + - output is not changed + - output.name == '{{ local_bucket_name }}' + - output.tags == {} + + # ============================================================ + + - name: Delete bucket + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + register: output + + - assert: + that: + - output.changed + + # ============================================================ + always: + - name: Ensure all buckets are deleted + s3_bucket: + name: '{{ local_bucket_name }}' + state: absent + ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json new file mode 100644 index 000000000..e0b10273f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json @@ -0,0 +1,12 @@ +{ + "Version":"2012-10-17", + "Statement":[ + { + "Sid":"AddPerm", + "Effect":"Deny", + "Principal": {"AWS": "*"}, + "Action":["s3:GetObject"], + "Resource":["arn:aws:s3:::{{local_bucket_name}}/*"] + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json new file mode 100644 index 000000000..0f7c4968f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json @@ -0,0 +1,12 @@ +{ + "Version":"2012-10-17", + "Statement":[ + { + "Sid":"AddPerm", + "Effect":"Allow", + "Principal": "*", + "Action":["s3:GetObject"], + "Resource":["arn:aws:s3:::{{local_bucket_name}}/*"] + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/runme.sh new file mode 100755 index 000000000..aa324772b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/runme.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# +# Beware: most of our tests here are run in parallel. +# To add new tests you'll need to add a new host to the inventory and a matching +# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/ + + +set -eux + +export ANSIBLE_ROLES_PATH=../ + +ansible-playbook main.yml -i inventory "$@" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/aliases b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/aliases new file mode 100644 index 000000000..d34fac48d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/aliases @@ -0,0 +1,3 @@ +cloud/aws +aws_s3 +s3_object_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/defaults/main.yml new file mode 100644 index 000000000..d408adb49 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# defaults file for s3 +bucket_name: '{{ resource_prefix | hash("md5") }}' +bucket_name_acl: "{{ bucket_name + '-with-acl' }}" +bucket_name_with_dot: "{{ bucket_name + '.bucket' }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/files/hello.txt b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/files/hello.txt new file mode 100644 index 000000000..8ab686eaf --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/files/hello.txt @@ -0,0 +1 @@ +Hello, World! diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/files/test.png b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/files/test.png new file mode 100644 index 000000000..1dc64bab8 Binary files /dev/null and b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/files/test.png differ diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/meta/main.yml new file mode 100644 index 000000000..60f81883a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/meta/main.yml @@ -0,0 +1,6 @@ +dependencies: + - setup_remote_tmp_dir + # required for s3.get_object_attributes + - role: setup_botocore_pip + vars: + botocore_version: '1.24.7' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml new file mode 100644 index 000000000..aff38eba1 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml @@ -0,0 +1,135 @@ +- block: + - name: define bucket name used for tests + set_fact: + copy_bucket: + src: "{{ bucket_name }}-copysrc" + dst: "{{ bucket_name }}-copydst" + + - name: create bucket source + s3_object: + bucket: "{{ copy_bucket.src }}" + mode: create + + - name: Create content + set_fact: + content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}" + + - name: Put a content in the source bucket + s3_object: + bucket: "{{ copy_bucket.src }}" + mode: put + content: "{{ content }}" + object: source.txt + tags: + ansible_release: '2.0.0' + ansible_team: cloud + retries: 3 + delay: 3 + register: put_result + until: "put_result.msg == 'PUT operation complete'" + + - name: Copy the content of the source bucket into dest bucket + s3_object: + bucket: "{{ copy_bucket.dst }}" + mode: copy + object: destination.txt + copy_src: + bucket: "{{ copy_bucket.src }}" + object: source.txt + + - name: Get the content copied into {{ copy_bucket.dst }} + s3_object: + bucket: "{{ copy_bucket.dst }}" + mode: getstr + object: destination.txt + register: copy_content + + - name: assert that the content is matching with the source + assert: + that: + - content == copy_content.contents + + - name: Get the download url for object copied into {{ copy_bucket.dst }} + s3_object: + bucket: "{{ copy_bucket.dst }}" + mode: geturl + object: destination.txt + register: copy_url + + - name: assert that tags are the same in the destination bucket + assert: + that: + - put_result.tags == copy_url.tags + + - name: Copy the same content from the source bucket into dest bucket (idempotency) + s3_object: + bucket: "{{ copy_bucket.dst }}" + mode: copy + object: destination.txt + copy_src: + bucket: "{{ copy_bucket.src }}" + object: source.txt + register: copy_idempotency + + - name: assert that no change was made + assert: + that: + - copy_idempotency is not changed + - "copy_idempotency.msg == 'ETag from source and destination are the same'" + + - name: Copy object with tags + s3_object: + bucket: "{{ copy_bucket.dst }}" + mode: copy + object: destination.txt + tags: + ansible_release: "2.0.1" + copy_src: + bucket: "{{ copy_bucket.src }}" + object: source.txt + register: copy_result + + - name: assert that tags were updated + assert: + that: + - copy_result is changed + - copy_result.tags['ansible_release'] == '2.0.1' + + - name: Copy object with tags (idempotency) + s3_object: + bucket: "{{ copy_bucket.dst }}" + mode: copy + object: destination.txt + tags: + ansible_release: "2.0.1" + copy_src: + bucket: "{{ copy_bucket.src }}" + object: source.txt + register: copy_result + + - name: assert that no change was made + assert: + that: + - copy_result is not changed + + - name: Copy from unexisting key should not succeed + s3_object: + bucket: "{{ copy_bucket.dst }}" + mode: copy + object: missing_key.txt + copy_src: + bucket: "{{ copy_bucket.src }}" + object: this_key_does_not_exist.txt + register: result + + - name: Validate result when copying missing key + assert: + that: + - result is not changed + - 'result.msg == "Key this_key_does_not_exist.txt does not exist in bucket {{ copy_bucket.src }}."' + + always: + - include_tasks: delete_bucket.yml + with_items: + - "{{ copy_bucket.dst }}" + - "{{ copy_bucket.src }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object_acl_disabled_bucket.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object_acl_disabled_bucket.yml new file mode 100644 index 000000000..7fbd8b786 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object_acl_disabled_bucket.yml @@ -0,0 +1,111 @@ +- name: test copying objects to bucket with ACL disabled + block: + - name: Create a bucket with ACL disabled for the test + s3_bucket: + name: "{{ bucket_name }}-acl-disabled" + object_ownership: BucketOwnerEnforced + state: present + register: create_result + + - name: Ensure bucket creation + assert: + that: + - create_result is changed + - create_result is not failed + - create_result.object_ownership == "BucketOwnerEnforced" + + - name: Create content + set_fact: + content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}" + + - name: Create local acl_disabled_upload_test.txt + copy: + content: "{{ content }}" + dest: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt" + + - name: Upload a file to the bucket (check_mode) + amazon.aws.s3_object: + bucket: "{{ bucket_name }}-acl-disabled" + src: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt" + object: "acl_disabled_upload_test.txt" + mode: put + check_mode: true + register: upload_file_result + + - assert: + that: + - upload_file_result is changed + - upload_file_result is not failed + - upload_file_result.msg == "PUT operation skipped - running in check mode" + - '"s3:PutObject" not in upload_file_result.resource_actions' + + - name: Upload a file to the bucket + amazon.aws.s3_object: + bucket: "{{ bucket_name }}-acl-disabled" + src: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt" + object: "acl_disabled_upload_test.txt" + mode: put + register: upload_file_result + + - assert: + that: + - upload_file_result is changed + - upload_file_result is not failed + - upload_file_result.msg == "PUT operation complete" + - '"s3:PutObject" in upload_file_result.resource_actions' + + - name: Upload a file to the bucket (check_mode - idempotency) + amazon.aws.s3_object: + bucket: "{{ bucket_name }}-acl-disabled" + src: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt" + object: "acl_disabled_upload_test.txt" + mode: put + check_mode: true + register: upload_file_result + + - assert: + that: + - upload_file_result is not changed + - upload_file_result is not failed + - upload_file_result.msg != "PUT operation complete" + - '"s3:PutObject" not in upload_file_result.resource_actions' + + - name: Upload a file to the bucket (idempotency) + amazon.aws.s3_object: + bucket: "{{ bucket_name }}-acl-disabled" + src: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt" + object: "acl_disabled_upload_test.txt" + mode: put + register: upload_file_result + + - assert: + that: + - upload_file_result is not changed + - upload_file_result is not failed + - upload_file_result.msg != "PUT operation complete" + - '"s3:PutObject" not in upload_file_result.resource_actions' + + always: + + - name: Delete the file in the bucket + amazon.aws.s3_object: + bucket: "{{ bucket_name }}-acl-disabled" + src: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt" + object: "acl_disabled_upload_test.txt" + mode: delobj + retries: 3 + delay: 3 + ignore_errors: true + + - name: Delete bucket created in this test + s3_bucket: + name: "{{ bucket_name }}-acl-disabled" + object_ownership: BucketOwnerEnforced + state: absent + register: delete_result + + - name: Ensure bucket deletion + assert: + that: + - delete_result is changed + - delete_result is not failed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/delete_bucket.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/delete_bucket.yml new file mode 100644 index 000000000..d285c7a95 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/delete_bucket.yml @@ -0,0 +1,25 @@ +- name: delete bucket at the end of Integration tests + block: + - name: list bucket object + s3_object: + bucket: "{{ item }}" + mode: list + register: objects + ignore_errors: true + + - name: remove objects from bucket + s3_object: + bucket: "{{ item }}" + mode: delobj + object: "{{ obj }}" + with_items: "{{ objects.s3_keys }}" + loop_control: + loop_var: obj + when: "'s3_keys' in objects" + ignore_errors: true + + - name: delete the bucket + s3_object: + bucket: "{{ item }}" + mode: delete + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml new file mode 100644 index 000000000..e85fd7886 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml @@ -0,0 +1,1092 @@ +--- +# Integration tests for s3_object +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + + block: + - name: get ARN of calling user + aws_caller_info: + register: aws_caller_info + + - name: register account id + set_fact: + aws_account: "{{ aws_caller_info.account }}" + + - name: check that temp directory was made + assert: + that: + - remote_tmp_dir is defined + + - name: Create content + set_fact: + content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}" + + - name: test create bucket without permissions + module_defaults: { group/aws: {} } + s3_object: + bucket: "{{ bucket_name }}" + mode: create + register: result + ignore_errors: true + + - assert: + that: + - result is failed + - "result.msg != 'MODULE FAILURE'" + + - name: test create bucket with an invalid name + s3_object: + bucket: "{{ bucket_name }}-" + mode: create + register: result + ignore_errors: true + + - assert: + that: + - result is failed + + - name: test create bucket + s3_object: + bucket: "{{ bucket_name }}" + mode: create + register: result + + - assert: + that: + - result is changed + + - name: trying to create a bucket name that already exists + s3_object: + bucket: "{{ bucket_name }}" + mode: create + register: result + + - assert: + that: + - result is not changed + + - name: Create local upload.txt + copy: + content: "{{ content }}" + dest: "{{ remote_tmp_dir }}/upload.txt" + + - name: stat the file + stat: + path: "{{ remote_tmp_dir }}/upload.txt" + get_checksum: yes + register: upload_file + + - name: test putting an object in the bucket + s3_object: + bucket: "{{ bucket_name }}" + mode: put + src: "{{ remote_tmp_dir }}/upload.txt" + object: delete.txt + tags: + "lowercase spaced": "hello cruel world" + "Title Case": "Hello Cruel World" + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result is changed + - result.msg == "PUT operation complete" + + - name: List keys + amazon.aws.s3_object_info: + bucket_name: "{{ bucket_name }}" + register: list_keys_result + + - name: Get s3 object info without specific details + amazon.aws.s3_object_info: + bucket_name: "{{ bucket_name }}" + object_name: "{{ list_keys_result.s3_keys[0] }}" + register: info_result + + - assert: + that: + - info_result is not failed + - info_result is not changed + - '"object_info" in info_result' + - info_result.object_info[0] | length != 0 + - '"object_data" in info_result.object_info[0]' + - '"e_tag" in info_result.object_info[0].object_data' + - '"last_modified" in info_result.object_info[0].object_data' + - '"content_type" in info_result.object_info[0].object_data' + + - name: Get s3 object info with specific details + amazon.aws.s3_object_info: + bucket_name: "{{ bucket_name }}" + object_name: "{{ list_keys_result.s3_keys[0] }}" + object_details: + object_acl: true + object_tagging: true + object_lock_configuration: true + object_attributes: true + object_retention: false + attributes_list: + - ETag + - ObjectSize + - StorageClass + - Checksum + - ObjectParts + register: info_detail_result + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - assert: + that: + - info_detail_result is not failed + - info_detail_result is not changed + - '"object_info" in info_detail_result' + - info_detail_result.object_info[0] | length != 0 + - '"object_acl" in info_detail_result.object_info[0]' + - '"object_tagging" in info_detail_result.object_info[0]' + - info_detail_result.object_info[0].object_tagging | length == 2 + - '"object_legal_hold" not in info_detail_result.object_info[0]' + - '"object_lock_configuration" in info_detail_result.object_info[0]' + - '"object_attributes" in info_detail_result.object_info[0]' + - '"object_retention" not in info_detail_result.object_info[0]' + - '"e_tag" in info_result.object_info[0].object_data' + - '"last_modified" in info_result.object_info[0].object_data' + - '"content_type" in info_result.object_info[0].object_data' + + - name: test using s3_object with async + s3_object: + bucket: "{{ bucket_name }}" + mode: put + src: "{{ remote_tmp_dir }}/upload.txt" + object: delete.txt + register: test_async + async: 30 + poll: 0 + + - name: ensure it completed + async_status: + jid: "{{ test_async.ansible_job_id }}" + register: status + until: status is finished + retries: 30 + + - name: test put with overwrite=different and unmodified object + s3_object: + bucket: "{{ bucket_name }}" + mode: put + src: "{{ remote_tmp_dir }}/upload.txt" + object: delete.txt + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result is not changed + + - name: check that roles file lookups work as expected + s3_object: + bucket: "{{ bucket_name }}" + mode: put + src: hello.txt + object: delete.txt + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result is changed + - result.msg == "PUT operation complete" + + # s3_object (and its old alias) use an action plugin to support using the + # 'file' lookup path or a remote path. Keeping this working is dependent on + # having a redirect for both the module and the action plugin + - name: check that roles file lookups work as expected when using old name + aws_s3: + bucket: "{{ bucket_name }}" + mode: put + src: hello.txt + object: delete.txt + overwrite: always + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result is changed + - result.msg == "PUT operation complete" + + - name: test put with overwrite=never + s3_object: + bucket: "{{ bucket_name }}" + mode: put + src: "{{ remote_tmp_dir }}/upload.txt" + object: delete.txt + overwrite: never + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result is not changed + + - name: test put with overwrite=different and modified object + s3_object: + bucket: "{{ bucket_name }}" + mode: put + src: "{{ remote_tmp_dir }}/upload.txt" + object: delete.txt + overwrite: different + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result is changed + + - name: test put with overwrite=always + s3_object: + bucket: "{{ bucket_name }}" + mode: put + src: "{{ remote_tmp_dir }}/upload.txt" + object: delete.txt + overwrite: always + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result is changed + + - name: test get object + s3_object: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ remote_tmp_dir }}/download.txt" + object: delete.txt + retries: 3 + delay: 3 + register: result + until: "result.msg == 'GET operation complete'" + + - name: stat the file so we can compare the checksums + stat: + path: "{{ remote_tmp_dir }}/download.txt" + get_checksum: yes + register: download_file + + - assert: + that: + - upload_file.stat.checksum == download_file.stat.checksum + + - name: test get with overwrite=different and identical files + s3_object: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ remote_tmp_dir }}/download.txt" + object: delete.txt + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result is not changed + + - name: modify destination + copy: + dest: "{{ remote_tmp_dir }}/download.txt" + src: hello.txt + + - name: test get with overwrite=never + s3_object: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ remote_tmp_dir }}/download.txt" + object: delete.txt + overwrite: never + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result is not changed + + - name: test get with overwrite=different and modified file + s3_object: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ remote_tmp_dir }}/download.txt" + object: delete.txt + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result is changed + + - name: test get with overwrite=always + s3_object: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ remote_tmp_dir }}/download.txt" + object: delete.txt + overwrite: always + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result is changed + + - name: test get with overwrite=latest and identical files + s3_object: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ remote_tmp_dir }}/download.txt" + object: delete.txt + overwrite: latest + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result is not changed + + - name: modify mtime for local file to past + shell: touch -mt 197001010900.00 "{{ remote_tmp_dir }}/download.txt" + + - name: test get with overwrite=latest and files that mtimes are different + s3_object: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ remote_tmp_dir }}/download.txt" + object: delete.txt + overwrite: latest + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result is changed + + - name: test geturl of the object + s3_object: + bucket: "{{ bucket_name }}" + mode: geturl + object: delete.txt + retries: 3 + delay: 3 + register: result + until: result is changed + + - assert: + that: + - "'Download url:' in result.msg" + - result is changed + + - name: test geturl of the object with sigv4 + s3_object: + bucket: "{{ bucket_name }}" + mode: geturl + sig_v4: true + object: delete.txt + retries: 3 + delay: 3 + register: result + until: result is changed + + - assert: + that: + - "'Download url:' in result.msg" + - result is changed + + - name: test getstr of the object + s3_object: + bucket: "{{ bucket_name }}" + mode: getstr + object: delete.txt + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result.msg == "GET operation complete" + - result.contents == content + + - name: test list to get all objects in the bucket + s3_object: + bucket: "{{ bucket_name }}" + mode: list + retries: 3 + delay: 3 + register: result + + - assert: + that: + - "'delete.txt' in result.s3_keys" + - result.msg == "LIST operation complete" + + - name: test delobj to just delete an object in the bucket + s3_object: + bucket: "{{ bucket_name }}" + mode: delobj + object: delete.txt + retries: 3 + delay: 3 + register: result + + - assert: + that: + - "'Object deleted from bucket' in result.msg" + - result is changed + + - name: test putting an encrypted object in the bucket + s3_object: + bucket: "{{ bucket_name }}" + mode: put + src: "{{ remote_tmp_dir }}/upload.txt" + encrypt: yes + object: delete_encrypt.txt + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result is changed + - result.msg == "PUT operation complete" + + - name: test get encrypted object + s3_object: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ remote_tmp_dir }}/download_encrypted.txt" + object: delete_encrypt.txt + retries: 3 + delay: 3 + register: result + until: "result.msg == 'GET operation complete'" + + - name: stat the file so we can compare the checksums + stat: + path: "{{ remote_tmp_dir }}/download_encrypted.txt" + get_checksum: yes + register: download_file + + - assert: + that: + - upload_file.stat.checksum == download_file.stat.checksum + + - name: delete encrypted file + s3_object: + bucket: "{{ bucket_name }}" + mode: delobj + object: delete_encrypt.txt + retries: 3 + delay: 3 + + - name: test putting an aws:kms encrypted object in the bucket + s3_object: + bucket: "{{ bucket_name }}" + mode: put + src: "{{ remote_tmp_dir }}/upload.txt" + encrypt: yes + encryption_mode: aws:kms + object: delete_encrypt_kms.txt + retries: 3 + delay: 3 + register: result + + - assert: + that: + - result is changed + - result.msg == "PUT operation complete" + + - name: test get KMS encrypted object + s3_object: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ remote_tmp_dir }}/download_kms.txt" + object: delete_encrypt_kms.txt + retries: 3 + delay: 3 + register: result + until: "result.msg == 'GET operation complete'" + + - name: get the stat of the file so we can compare the checksums + stat: + path: "{{ remote_tmp_dir }}/download_kms.txt" + get_checksum: yes + register: download_file + + - assert: + that: + - upload_file.stat.checksum == download_file.stat.checksum + + # FIXME - could use a test that checks uploaded file is *actually* aws:kms encrypted + + - name: delete KMS encrypted file + s3_object: + bucket: "{{ bucket_name }}" + mode: delobj + object: delete_encrypt_kms.txt + retries: 3 + delay: 3 + + # FIXME: could use a test that checks non standard KMS key + # but that would require ability to create and remove such keys. + # PRs exist for that, but propose deferring until after merge. + + - name: test creation of empty path + s3_object: + bucket: "{{ bucket_name }}" + mode: create + object: foo/bar/baz/ + retries: 3 + delay: 3 + register: result + + - assert: + that: + - "'Virtual directory foo/bar/baz/ created' in result.msg" + - result is changed + + - name: test deletion of empty path + s3_object: + bucket: "{{ bucket_name }}" + mode: delobj + object: foo/bar/baz/ + retries: 3 + delay: 3 + + - name: test delete bucket + s3_object: + bucket: "{{ bucket_name }}" + mode: delete + register: result + retries: 3 + delay: 3 + until: result is changed + + - assert: + that: + - result is changed + + - name: test create a bucket with a dot in the name + s3_object: + bucket: "{{ bucket_name_with_dot }}" + mode: create + register: result + + - assert: + that: + - result is changed + + - name: test delete a bucket with a dot in the name + s3_object: + bucket: "{{ bucket_name_with_dot }}" + mode: delete + register: result + + - assert: + that: + - result is changed + + - name: test delete a nonexistent bucket + s3_object: + bucket: "{{ bucket_name_with_dot }}" + mode: delete + register: result + + - assert: + that: + - result is not changed + + - name: make tempfile 4 GB for OSX + command: + _raw_params: "dd if=/dev/zero of={{ remote_tmp_dir }}/largefile bs=1m count=4096" + when: ansible_distribution == 'MacOSX' + + - name: make tempfile 4 GB for linux + command: + _raw_params: "dd if=/dev/zero of={{ remote_tmp_dir }}/largefile bs=1M count=4096" + when: ansible_system == 'Linux' + + - name: test multipart download - platform specific + block: + - name: make a bucket to upload the file + s3_object: + bucket: "{{ bucket_name }}" + mode: create + + - name: upload the file to the bucket + s3_object: + bucket: "{{ bucket_name }}" + mode: put + src: "{{ remote_tmp_dir }}/largefile" + object: multipart.txt + + - name: download file once + s3_object: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ remote_tmp_dir }}/download.txt" + object: multipart.txt + overwrite: different + retries: 3 + delay: 3 + until: "result.msg == 'GET operation complete'" + register: result + + - assert: + that: + - result is changed + + - name: download file again + s3_object: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ remote_tmp_dir }}/download.txt" + object: multipart.txt + overwrite: different + register: result + + - assert: + that: + - result is not changed + when: ansible_system == 'Linux' or ansible_distribution == 'MacOSX' + + - name: make a bucket with the bucket-owner-full-control ACL + s3_bucket: + name: "{{ bucket_name_acl }}" + state: present + policy: "{{ lookup('template', 'policy.json.j2') }}" + register: bucket_with_policy + + - assert: + that: + - bucket_with_policy is changed + + # # XXX Doesn't fail... ( ? Eventual consistency ? ) + # - name: fail to upload the file to the bucket with an ACL + # s3_object: + # bucket: "{{ bucket_name_acl }}" + # mode: put + # src: "{{ tmpdir.path }}/upload.txt" + # object: file-with-permissions.txt + # permission: private + # ignore_nonexistent_bucket: True + # register: upload_private + # ignore_errors: True + # + # - assert: + # that: + # - upload_private is failed + + - name: upload the file to the bucket with an ACL + s3_object: + bucket: "{{ bucket_name_acl }}" + mode: put + src: "{{ remote_tmp_dir }}/upload.txt" + object: file-with-permissions.txt + permission: bucket-owner-full-control + ignore_nonexistent_bucket: True + register: upload_owner + + - assert: + that: + - upload_owner is changed + + - name: create an object from static content + s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + register: result + + - assert: + that: + - result is changed + + - name: ensure idempotency on static content + s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + register: result + + - assert: + that: + - result is not changed + + - name: fetch test content + s3_object: + bucket: "{{ bucket_name }}" + mode: getstr + object: put-content.txt + register: result + + - assert: + that: + - result.contents == "test content" + + - set_fact: + put_template_text: test template + + - name: create an object from a template + s3_object: + bucket: "{{ bucket_name }}" + object: put-template.txt + mode: put + content: "{{ lookup('template', 'templates/put-template.txt.j2')|replace('\n', '') }}" + register: result + + - assert: + that: + - result is changed + + - name: create an object from a template (idempotency) + aws_s3: + bucket: "{{ bucket_name }}" + object: put-template.txt + mode: put + content: "{{ lookup('template', 'templates/put-template.txt.j2')|replace('\n', '') }}" + register: result + + - assert: + that: + - result is not changed + + - name: fetch template content + s3_object: + bucket: "{{ bucket_name }}" + mode: getstr + object: put-template.txt + register: result + + - assert: + that: + - result.contents == "template:test template" + + # at present, there is no lookup that can process binary data, so we use slurp instead + - slurp: + src: "{{ role_path }}/files/test.png" + register: put_binary + + - name: create an object from binary data + s3_object: + bucket: "{{ bucket_name }}" + object: put-binary.bin + mode: put + content_base64: "{{ put_binary.content }}" + register: result + + - assert: + that: + - result is changed + + - name: create an object from binary data (idempotency) + aws_s3: + bucket: "{{ bucket_name }}" + object: put-binary.bin + mode: put + content_base64: "{{ put_binary.content }}" + register: result + + - assert: + that: + - result is not changed + + - name: fetch binary content + s3_object: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ remote_tmp_dir }}/download_binary.bin" + object: put-binary.bin + register: result + + - name: stat the files so we can compare the checksums + stat: + path: "{{ item }}" + get_checksum: yes + loop: + - "{{ role_path }}/files/test.png" + - "{{ remote_tmp_dir }}/download_binary.bin" + register: binary_files + + - assert: + that: + - binary_files.results[0].stat.checksum == binary_files.results[1].stat.checksum + + - include_tasks: copy_object.yml + + - include_tasks: copy_object_acl_disabled_bucket.yml + + # ============================================================ + - name: 'Run tagging tests' + block: + # ============================================================ + - name: create an object from static content + s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: + tag_one: '{{ resource_prefix }} One' + "Tag Two": 'two {{ resource_prefix }}' + register: result + + - assert: + that: + - result is changed + - "'tags' in result" + - (result.tags | length) == 2 + - result.tags["tag_one"] == '{{ resource_prefix }} One' + - result.tags["Tag Two"] == 'two {{ resource_prefix }}' + + - name: ensure idempotency on static content + s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: + tag_one: '{{ resource_prefix }} One' + "Tag Two": 'two {{ resource_prefix }}' + register: result + + - assert: + that: + - result is not changed + - "'tags' in result" + - (result.tags | length) == 2 + - result.tags["tag_one"] == '{{ resource_prefix }} One' + - result.tags["Tag Two"] == 'two {{ resource_prefix }}' + + - name: Remove a tag from an S3 object + s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: + tag_one: '{{ resource_prefix }} One' + register: result + + - assert: + that: + - result is changed + - "'tags' in result" + - (result.tags | length) == 1 + - result.tags["tag_one"] == "{{ resource_prefix }} One" + - "'Tag Two' not in result.tags" + + - name: Remove the tag from an S3 object (idempotency) + s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: + tag_one: '{{ resource_prefix }} One' + register: result + + - assert: + that: + - result is not changed + - "'tags' in result" + - (result.tags | length) == 1 + - result.tags["tag_one"] == "{{ resource_prefix }} One" + - "'Tag Two' not in result.tags" + + - name: Add a tag for an S3 object with purge_tags False + s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: + tag_three: '{{ resource_prefix }} Three' + purge_tags: false + register: result + + - assert: + that: + - result is changed + - "'tags' in result" + - (result.tags | length) == 2 + - result.tags["tag_three"] == '{{ resource_prefix }} Three' + - result.tags["tag_one"] == '{{ resource_prefix }} One' + + - name: Add a tag for an S3 object with purge_tags False (idempotency) + s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: + tag_three: '{{ resource_prefix }} Three' + purge_tags: false + register: result + + - assert: + that: + - result is not changed + - "'tags' in result" + - (result.tags | length) == 2 + - result.tags["tag_three"] == '{{ resource_prefix }} Three' + - result.tags["tag_one"] == '{{ resource_prefix }} One' + + - name: Update tags for an S3 object with purge_tags False + s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: + "TagFour": '{{ resource_prefix }} tag_four' + purge_tags: false + register: result + + - assert: + that: + - result is changed + - "'tags' in result" + - (result.tags | length) == 3 + - result.tags["tag_one"] == '{{ resource_prefix }} One' + - result.tags["tag_three"] == '{{ resource_prefix }} Three' + - result.tags["TagFour"] == '{{ resource_prefix }} tag_four' + + - name: Update tags for an S3 object with purge_tags False (idempotency) + s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: + "TagFour": '{{ resource_prefix }} tag_four' + purge_tags: false + register: result + + - assert: + that: + - result is not changed + - "'tags' in result" + - (result.tags | length) == 3 + - result.tags["tag_one"] == '{{ resource_prefix }} One' + - result.tags["tag_three"] == '{{ resource_prefix }} Three' + - result.tags["TagFour"] == '{{ resource_prefix }} tag_four' + + - name: Specify empty tags for an S3 object with purge_tags False + s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: {} + purge_tags: false + register: result + + - assert: + that: + - result is not changed + - "'tags' in result" + - (result.tags | length) == 3 + - result.tags["tag_one"] == '{{ resource_prefix }} One' + - result.tags["tag_three"] == '{{ resource_prefix }} Three' + - result.tags["TagFour"] == '{{ resource_prefix }} tag_four' + + - name: Do not specify any tag to ensure previous tags are not removed + s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + register: result + + - assert: + that: + - result is not changed + - "'tags' in result" + - (result.tags | length) == 3 + - result.tags["tag_one"] == '{{ resource_prefix }} One' + - result.tags["tag_three"] == '{{ resource_prefix }} Three' + - result.tags["TagFour"] == '{{ resource_prefix }} tag_four' + + - name: Remove all tags + s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + overwrite: different + content: >- + test content + tags: {} + register: result + + - assert: + that: + - result is changed + - "'tags' in result" + - (result.tags | length) == 0 + + - name: Remove all tags (idempotency) + s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: {} + register: result + + - assert: + that: + - result is not changed + - "'tags' in result" + - (result.tags | length) == 0 + + always: + + - name: delete temporary files + file: + state: absent + path: "{{ remote_tmp_dir }}" + ignore_errors: true + + - include_tasks: delete_bucket.yml + with_items: + - "{{ bucket_name }}" + - "{{ bucket_name_with_dot }}" + - "{{ bucket_name_acl }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/templates/policy.json.j2 b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/templates/policy.json.j2 new file mode 100644 index 000000000..4af2e0713 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/templates/policy.json.j2 @@ -0,0 +1,21 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Only allow writes to my bucket with bucket owner full control", + "Effect": "Allow", + "Principal": { "AWS":"{{ aws_account }}" }, + "Action": [ + "s3:PutObject" + ], + "Resource": [ + "arn:aws:s3:::{{ bucket_name_acl }}/*" + ], + "Condition": { + "StringEquals": { + "s3:x-amz-acl": "bucket-owner-full-control" + } + } + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/templates/put-template.txt.j2 b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/templates/put-template.txt.j2 new file mode 100644 index 000000000..2a75e9f2d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/templates/put-template.txt.j2 @@ -0,0 +1,2 @@ +template: +{{ put_template_text }} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/defaults/main.yml new file mode 100644 index 000000000..16ad00270 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/defaults/main.yml @@ -0,0 +1,2 @@ +default_botocore_version: '1.21.0' +default_boto3_version: '1.18.0' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/handlers/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/handlers/main.yml new file mode 100644 index 000000000..2536d1ac7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/handlers/main.yml @@ -0,0 +1,2 @@ +- name: 'Delete temporary pip environment' + include_tasks: cleanup.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/cleanup.yml new file mode 100644 index 000000000..25b3ec27e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/cleanup.yml @@ -0,0 +1,5 @@ +- name: 'Delete temporary pip environment' + file: + path: "{{ botocore_pip_directory }}" + state: absent + no_log: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/main.yml new file mode 100644 index 000000000..1a0d7c4fb --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/main.yml @@ -0,0 +1,43 @@ +- name: 'Ensure that we have virtualenv available to us' + pip: + name: virtualenv + +- name: 'Create temporary directory for pip environment' + tempfile: + path: /var/tmp + state: directory + prefix: botocore + suffix: .test + register: botocore_pip_directory + notify: + - 'Delete temporary pip environment' + +- name: 'Record temporary directory' + set_fact: + botocore_pip_directory: "{{ botocore_pip_directory.path }}" + +- set_fact: + botocore_virtualenv: "{{ botocore_pip_directory }}/virtualenv" + botocore_virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv" + +- set_fact: + botocore_virtualenv_interpreter: "{{ botocore_virtualenv }}/bin/python" + +- pip: + name: + - 'boto3{{ _boto3_comparison }}{{ _boto3_version }}' + - 'botocore{{ _botocore_comparison }}{{ _botocore_version }}' + - 'coverage<5' + virtualenv: "{{ botocore_virtualenv }}" + virtualenv_command: "{{ botocore_virtualenv_command }}" + virtualenv_site_packages: no + vars: + _boto3_version: '{{ boto3_version | default(default_boto3_version) }}' + _botocore_version: '{{ botocore_version | default(default_botocore_version) }}' + _is_default_boto3: '{{ _boto3_version == default_boto3_version }}' + _is_default_botocore: '{{ _botocore_version == default_botocore_version }}' + # Only set the default to >= if the other dep has been updated and the dep has not been set + _default_boto3_comparison: '{% if _is_default_boto3 and not _is_default_botocore %}>={% else %}=={% endif %}' + _default_botocore_comparison: '{% if _is_default_botocore and not _is_default_boto3 %}>={% else %}=={% endif %}' + _boto3_comparison: '{{ boto3_comparison | default(_default_boto3_comparison) }}' + _botocore_comparison: '{{ botocore_comparison | default(_default_botocore_comparison) }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/defaults/main.yml new file mode 100644 index 000000000..172a10a20 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/defaults/main.yml @@ -0,0 +1,4 @@ +ec2_ami_name: 'Fedora-Cloud-Base-*.x86_64*' +# CentOS Community Platform Engineering (CPE) +ec2_ami_owner_id: '125523088429' +ec2_ami_ssh_user: 'fedora' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/tasks/main.yml new file mode 100644 index 000000000..f41791073 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/tasks/main.yml @@ -0,0 +1,53 @@ +--- +# Setup a couple of common facts about the AWS Region +# +# Information about availablity zones +# - ec2_availability_zone_names +# +# An EC2 AMI that can be used for spinning up Instances performs as search +# rather than hardcoding the IDs so we're not limited to specific Regions +# - ec2_ami_id +# +- module_defaults: + group/aws: + aws_access_key: '{{ aws_access_key }}' + aws_secret_key: '{{ aws_secret_key }}' + security_token: '{{ security_token | default(omit) }}' + region: '{{ aws_region }}' + + run_once: True + block: + # ============================================================ + + - name: Get available AZs + aws_az_info: + filters: + region-name: '{{ aws_region }}' + register: _az_info + + - name: Pick an AZ + set_fact: + ec2_availability_zone_names: '{{ _az_info.availability_zones | selectattr("zone_name", "defined") | map(attribute="zone_name") | list }}' + + # ============================================================ + + - name: Get a list of images + ec2_ami_info: + filters: + name: '{{ ec2_ami_name }}' + owner-id: '{{ ec2_ami_owner_id }}' + architecture: x86_64 + virtualization-type: hvm + root-device-type: ebs + register: _images_info + # Very spammy + no_log: True + + - name: Set Fact for latest AMI + vars: + latest_image: '{{ _images_info.images | sort(attribute="creation_date") | reverse | first }}' + set_fact: + ec2_ami_id: '{{ latest_image.image_id }}' + ec2_ami_details: '{{ latest_image }}' + ec2_ami_root_disk: '{{ latest_image.block_device_mappings[0].device_name }}' + ec2_ami_ssh_user: '{{ ec2_ami_ssh_user }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/defaults/main.yml new file mode 100644 index 000000000..e73afad8f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/defaults/main.yml @@ -0,0 +1,24 @@ +--- +# defaults file for ec2_instance tests +ec2_instance_test_name: 'ec2_instance' + +ec2_instance_owner: 'integration-run-{{ ec2_instance_test_name }}' +ec2_instance_type: 't3.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-{{ ec2_instance_test_name }}' + +vpc_name: '{{ resource_prefix }}-{{ ec2_instance_test_name }}' +vpc_seed: '{{ resource_prefix }}-{{ ec2_instance_test_name }}' + +vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16' + +subnet_a_az: '{{ ec2_availability_zone_names[0] }}' +subnet_a_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24' +subnet_a_startswith: '10.{{ 256 | random(seed=vpc_seed) }}.32.' +subnet_a_name: '{{ resource_prefix }}-{{ ec2_instance_test_name }}-a' +subnet_b_az: '{{ ec2_availability_zone_names[1] }}' +subnet_b_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.33.0/24' +subnet_b_startswith: '10.{{ 256 | random(seed=vpc_seed) }}.33.' +subnet_b_name: '{{ resource_prefix }}-{{ ec2_instance_test_name }}-b' + +security_group_name_1: '{{ resource_prefix }}-{{ ec2_instance_test_name }}-1' +security_group_name_2: '{{ resource_prefix }}-{{ ec2_instance_test_name }}-2' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/handlers/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/handlers/main.yml new file mode 100644 index 000000000..b8dee611d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/handlers/main.yml @@ -0,0 +1,2 @@ +- name: 'Delete ec2_instance environment' + include_tasks: cleanup.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/cleanup.yml new file mode 100644 index 000000000..0a0aa1eed --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/cleanup.yml @@ -0,0 +1,118 @@ +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: Set termination protection to false (so we can terminate instance) (cleanup) + ec2_instance: + filters: + instance-state-name: ['pending', 'running', 'stopping', 'stopped'] + vpc-id: '{{ testing_vpc.vpc.id }}' + termination_protection: false + ignore_errors: yes + + - name: "(Cleanup) Find all remaining Instances" + ec2_instance_info: + filters: + vpc-id: '{{ testing_vpc.vpc.id }}' + instance-state-name: ['pending', 'running', 'shutting-down', 'stopping', 'stopped'] + register: instances + + - name: "(Cleanup) Remove Instances (start)" + ec2_instance: + state: absent + instance_ids: '{{ item.instance_id }}' + wait: no + ignore_errors: yes + loop: '{{ instances.instances }}' + + - name: "(Cleanup) Remove Instances (wait for completion)" + ec2_instance: + state: absent + instance_ids: '{{ item.instance_id }}' + filters: + instance-state-name: ['pending', 'running', 'shutting-down', 'stopping', 'stopped'] + vpc-id: '{{ testing_vpc.vpc.id }}' + wait: yes + ignore_errors: yes + loop: '{{ instances.instances }}' + + - name: "(Cleanup) Find all remaining ENIs" + ec2_eni_info: + filters: + vpc-id: "{{ testing_vpc.vpc.id }}" + register: enis + + - name: "(Cleanup) delete all ENIs" + ec2_eni: + state: absent + eni_id: "{{ item.id }}" + register: eni_removed + until: eni_removed is not failed + with_items: "{{ enis.network_interfaces }}" + ignore_errors: yes + retries: 10 + + - name: "(Cleanup) Find all remaining Security Groups" + ec2_security_group_info: + filters: + vpc-id: '{{ testing_vpc.vpc.id }}' + register: security_groups + + - name: "(Cleanup) Remove the security group rules" + ec2_security_group: + state: present + name: '{{ item.group_name }}' + description: '{{ item.description }}' + vpc_id: '{{ testing_vpc.vpc.id }}' + rules: [] + egress_rules: [] + loop: '{{ security_groups.security_groups }}' + register: sg_removed + until: sg_removed is not failed + ignore_errors: yes + retries: 10 + + - name: "(Cleanup) Remove the security groups" + ec2_security_group: + state: absent + group_id: '{{ item.group_id }}' + loop: '{{ security_groups.security_groups }}' + when: + - item.group_name != 'default' + register: sg_removed + until: sg_removed is not failed + ignore_errors: yes + retries: 10 + + - name: "(Cleanup) Find all remaining Subnets" + ec2_vpc_subnet_info: + filters: + vpc-id: '{{ testing_vpc.vpc.id }}' + register: subnets + + - name: "(Cleanup) Remove subnets" + ec2_vpc_subnet: + state: absent + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: "{{ item.cidr_block }}" + register: removed + loop: '{{ subnets.subnets }}' + until: removed is not failed + ignore_errors: yes + retries: 10 + + - name: "(Cleanup) Remove the VPC" + ec2_vpc_net: + state: absent + name: "{{ vpc_name }}" + cidr_block: "{{ vpc_cidr }}" + tags: + Name: Ansible Testing VPC + tenancy: default + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/main.yml new file mode 100644 index 000000000..fa12818c1 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/main.yml @@ -0,0 +1,88 @@ +- run_once: '{{ setup_run_once | default("no") | bool }}' + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "Create VPC for use in testing" + ec2_vpc_net: + state: present + name: "{{ vpc_name }}" + cidr_block: "{{ vpc_cidr }}" + tags: + Name: "{{ vpc_name }}" + tenancy: default + register: testing_vpc + notify: + - 'Delete ec2_instance environment' + + - name: "Create default subnet in zone A" + ec2_vpc_subnet: + state: present + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: "{{ subnet_a_cidr }}" + az: "{{ subnet_a_az }}" + resource_tags: + Name: "{{ subnet_a_name }}" + register: testing_subnet_a + + - name: "Create secondary subnet in zone B" + ec2_vpc_subnet: + state: present + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: "{{ subnet_b_cidr }}" + az: "{{ subnet_b_az }}" + resource_tags: + Name: "{{ subnet_b_name }}" + register: testing_subnet_b + + - name: "create a security group with the vpc" + ec2_group: + state: present + name: "{{ security_group_name_1 }}" + description: a security group for ansible tests + vpc_id: "{{ testing_vpc.vpc.id }}" + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + register: sg + + - name: "create secondary security group with the vpc" + ec2_group: + name: "{{ security_group_name_2 }}" + description: a secondary security group for ansible tests + vpc_id: "{{ testing_vpc.vpc.id }}" + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + register: sg2 + + - name: Preserve defaults for other roles + set_fact: + # Ensure variables are available outside of this role + vpc_cidr: '{{ vpc_cidr }}' + vpc_name: '{{ vpc_name }}' + subnet_a_az: '{{ subnet_a_az }}' + subnet_a_cidr: '{{ subnet_a_cidr }}' + subnet_a_startswith: '{{ subnet_a_startswith }}' + subnet_a_name: '{{ subnet_a_name }}' + subnet_b_az: '{{ subnet_b_az }}' + subnet_b_cidr: '{{ subnet_b_cidr }}' + subnet_b_startswith: '{{ subnet_b_startswith }}' + subnet_b_name: '{{ subnet_b_name }}' + security_group_name_1: '{{ security_group_name_1 }}' + security_group_name_2: '{{ security_group_name_2 }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml new file mode 100644 index 000000000..229037c8b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml @@ -0,0 +1,5 @@ +- name: delete temporary directory + include_tasks: default-cleanup.yml + +- name: delete temporary directory (windows) + include_tasks: windows-cleanup.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml new file mode 100644 index 000000000..39872d749 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml @@ -0,0 +1,5 @@ +- name: delete temporary directory + file: + path: "{{ remote_tmp_dir }}" + state: absent + no_log: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml new file mode 100644 index 000000000..00877dca0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml @@ -0,0 +1,12 @@ +- name: create temporary directory + tempfile: + path: /var/tmp + state: directory + suffix: .test + register: remote_tmp_dir + notify: + - delete temporary directory + +- name: record temporary directory + set_fact: + remote_tmp_dir: "{{ remote_tmp_dir.path }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml new file mode 100644 index 000000000..f8df391b5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml @@ -0,0 +1,10 @@ +- name: make sure we have the ansible_os_family and ansible_distribution_version facts + setup: + gather_subset: distribution + when: ansible_facts == {} + +- include_tasks: "{{ lookup('first_found', files)}}" + vars: + files: + - "{{ ansible_os_family | lower }}.yml" + - "default.yml" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml new file mode 100644 index 000000000..32f372d0f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml @@ -0,0 +1,4 @@ +- name: delete temporary directory (windows) + ansible.windows.win_file: + path: '{{ remote_tmp_dir }}' + state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml new file mode 100644 index 000000000..317c146db --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml @@ -0,0 +1,10 @@ +- name: create temporary directory + register: remote_tmp_dir + notify: + - delete temporary directory (windows) + ansible.windows.win_tempfile: + state: directory + suffix: .test +- name: record temporary directory + set_fact: + remote_tmp_dir: '{{ remote_tmp_dir.path }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py new file mode 100644 index 000000000..827856386 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +""" +Reads an OpenSSH Public key and spits out the 'AWS' MD5 sum +The equivalent of + +ssh-keygen -f id_rsa.pub -e -m PKCS8 | openssl pkey -pubin -outform DER | openssl md5 -c | cut -f 2 -d ' ' + +(but without needing the OpenSSL CLI) +""" + + +import hashlib +import sys +from cryptography.hazmat.primitives import serialization + +if len(sys.argv) == 0: + ssh_public_key = "id_rsa.pub" +else: + ssh_public_key = sys.argv[1] + +with open(ssh_public_key, "rb") as key_file: + public_key = serialization.load_ssh_public_key( + key_file.read(), + ) +pub_der = public_key.public_bytes( + encoding=serialization.Encoding.DER, + format=serialization.PublicFormat.SubjectPublicKeyInfo, +) +md5digest = hashlib.md5(pub_der).hexdigest() +# Format the md5sum into the normal format +pairs = zip(md5digest[::2], md5digest[1::2]) +md5string = ":".join(["".join(pair) for pair in pairs]) + +print(md5string) diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/meta/main.yml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/tasks/main.yml new file mode 100644 index 000000000..31bd2176e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/tasks/main.yml @@ -0,0 +1,71 @@ +# (c) 2014, James Laska + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: create a temp dir + tempfile: + state: directory + register: sshkey_dir + tags: + - prepare + +- name: ensure script is available + copy: + src: ec2-fingerprint.py + dest: '{{ sshkey_dir.path }}/ec2-fingerprint.py' + mode: 0700 + tags: + - prepare + +- name: Set location of SSH keys + set_fact: + sshkey: '{{ sshkey_dir.path }}/key_one' + another_sshkey: '{{ sshkey_dir.path }}/key_two' + sshkey_pub: '{{ sshkey_dir.path }}/key_one.pub' + another_sshkey_pub: '{{ sshkey_dir.path }}/key_two.pub' + +- name: generate sshkey + shell: echo 'y' | ssh-keygen -P '' -f '{{ sshkey }}' + tags: + - prepare + +- name: record fingerprint + shell: '{{ sshkey_dir.path }}/ec2-fingerprint.py {{ sshkey_pub }}' + register: fingerprint + tags: + - prepare + +- name: generate another_sshkey + shell: echo 'y' | ssh-keygen -P '' -f {{ another_sshkey }} + tags: + - prepare + +- name: record another fingerprint + shell: '{{ sshkey_dir.path }}/ec2-fingerprint.py {{ another_sshkey_pub }}' + register: another_fingerprint + tags: + - prepare + +- name: set facts for future roles + set_fact: + # Public SSH keys (OpenSSH format) + key_material: "{{ lookup('file', sshkey_pub) }}" + another_key_material: "{{ lookup('file', another_sshkey_pub) }}" + # AWS 'fingerprint' (md5digest) + fingerprint: '{{ fingerprint.stdout }}' + another_fingerprint: '{{ another_fingerprint.stdout }}' + tags: + - prepare diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.10.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.10.txt new file mode 100644 index 000000000..09a7e9cbb --- /dev/null +++ b/ansible_collections/amazon/aws/tests/sanity/ignore-2.10.txt @@ -0,0 +1 @@ +plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.11.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.11.txt new file mode 100644 index 000000000..9f9adc33c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/sanity/ignore-2.11.txt @@ -0,0 +1 @@ +plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.12.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.12.txt new file mode 100644 index 000000000..9f9adc33c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/sanity/ignore-2.12.txt @@ -0,0 +1 @@ +plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.13.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.13.txt new file mode 100644 index 000000000..9f9adc33c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/sanity/ignore-2.13.txt @@ -0,0 +1 @@ +plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.14.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.14.txt new file mode 100644 index 000000000..9f9adc33c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/sanity/ignore-2.14.txt @@ -0,0 +1 @@ +plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.15.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.15.txt new file mode 100644 index 000000000..09a7e9cbb --- /dev/null +++ b/ansible_collections/amazon/aws/tests/sanity/ignore-2.15.txt @@ -0,0 +1 @@ +plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.9.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.9.txt new file mode 100644 index 000000000..b491a9e7a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/sanity/ignore-2.9.txt @@ -0,0 +1,7 @@ +plugins/modules/ec2_vpc_dhcp_option.py pylint:ansible-deprecated-no-version # We use dates for deprecations, Ansible 2.9 only supports this for compatibility +plugins/modules/ec2_vpc_endpoint.py pylint:ansible-deprecated-no-version # We use dates for deprecations, Ansible 2.9 only supports this for compatibility +plugins/modules/ec2_vpc_endpoint_info.py pylint:ansible-deprecated-no-version # We use dates for deprecations, Ansible 2.9 only supports this for compatibility +plugins/modules/ec2_instance.py pylint:ansible-deprecated-no-version # We use dates for deprecations, Ansible 2.9 only supports this for compatibility +plugins/modules/iam_policy.py pylint:ansible-deprecated-no-version +plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this +plugins/modules/iam_user.py pylint:ansible-deprecated-no-version diff --git a/ansible_collections/amazon/aws/tests/unit/compat/__init__.py b/ansible_collections/amazon/aws/tests/unit/compat/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/compat/builtins.py b/ansible_collections/amazon/aws/tests/unit/compat/builtins.py new file mode 100644 index 000000000..349d310e8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/compat/builtins.py @@ -0,0 +1,33 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# +# Compat for python2.7 +# + +# One unittest needs to import builtins via __import__() so we need to have +# the string that represents it +try: + import __builtin__ # pylint: disable=unused-import +except ImportError: + BUILTINS = 'builtins' +else: + BUILTINS = '__builtin__' diff --git a/ansible_collections/amazon/aws/tests/unit/compat/mock.py b/ansible_collections/amazon/aws/tests/unit/compat/mock.py new file mode 100644 index 000000000..0972cd2e8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/compat/mock.py @@ -0,0 +1,122 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python3.x's unittest.mock module +''' +import sys + +# Python 2.7 + +# Note: Could use the pypi mock library on python3.x as well as python2.x. It +# is the same as the python3 stdlib mock library + +try: + # Allow wildcard import because we really do want to import all of mock's + # symbols into this compat shim + # pylint: disable=wildcard-import,unused-wildcard-import + from unittest.mock import * +except ImportError: + # Python 2 + # pylint: disable=wildcard-import,unused-wildcard-import + try: + from mock import * + except ImportError: + print('You need the mock library installed on python2.x to run tests') + + +# Prior to 3.4.4, mock_open cannot handle binary read_data +if sys.version_info >= (3,) and sys.version_info < (3, 4, 4): + file_spec = None + + def _iterate_read_data(read_data): + # Helper for mock_open: + # Retrieve lines from read_data via a generator so that separate calls to + # readline, read, and readlines are properly interleaved + sep = b'\n' if isinstance(read_data, bytes) else '\n' + data_as_list = [l + sep for l in read_data.split(sep)] + + if data_as_list[-1] == sep: + # If the last line ended in a newline, the list comprehension will have an + # extra entry that's just a newline. Remove this. + data_as_list = data_as_list[:-1] + else: + # If there wasn't an extra newline by itself, then the file being + # emulated doesn't have a newline to end the last line remove the + # newline that our naive format() added + data_as_list[-1] = data_as_list[-1][:-1] + + for line in data_as_list: + yield line + + def mock_open(mock=None, read_data=''): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + + `read_data` is a string for the `read` methoddline`, and `readlines` of the + file handle to return. This is an empty string by default. + """ + def _readlines_side_effect(*args, **kwargs): + if handle.readlines.return_value is not None: + return handle.readlines.return_value + return list(_data) + + def _read_side_effect(*args, **kwargs): + if handle.read.return_value is not None: + return handle.read.return_value + return type(read_data)().join(_data) + + def _readline_side_effect(): + if handle.readline.return_value is not None: + while True: + yield handle.readline.return_value + for line in _data: + yield line + + global file_spec + if file_spec is None: + import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + + if mock is None: + mock = MagicMock(name='open', spec=open) + + handle = MagicMock(spec=file_spec) + handle.__enter__.return_value = handle + + _data = _iterate_read_data(read_data) + + handle.write.return_value = None + handle.read.return_value = None + handle.readline.return_value = None + handle.readlines.return_value = None + + handle.read.side_effect = _read_side_effect + handle.readline.side_effect = _readline_side_effect() + handle.readlines.side_effect = _readlines_side_effect + + mock.return_value = handle + return mock diff --git a/ansible_collections/amazon/aws/tests/unit/compat/unittest.py b/ansible_collections/amazon/aws/tests/unit/compat/unittest.py new file mode 100644 index 000000000..98f08ad6a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/compat/unittest.py @@ -0,0 +1,38 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python2.7's unittest module +''' + +import sys + +# Allow wildcard import because we really do want to import all of +# unittests's symbols into this compat shim +# pylint: disable=wildcard-import,unused-wildcard-import +if sys.version_info < (2, 7): + try: + # Need unittest2 on python2.6 + from unittest2 import * + except ImportError: + print('You need unittest2 installed on python2.6.x to run tests') +else: + from unittest import * diff --git a/ansible_collections/amazon/aws/tests/unit/constraints.txt b/ansible_collections/amazon/aws/tests/unit/constraints.txt new file mode 100644 index 000000000..cd546e7c2 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/constraints.txt @@ -0,0 +1,7 @@ +# Specifically run tests against the oldest versions that we support +boto3==1.18.0 +botocore==1.21.0 + +# AWS CLI has `botocore==` dependencies, provide the one that matches botocore +# to avoid needing to download over a years worth of awscli wheels. +awscli==1.20.0 diff --git a/ansible_collections/amazon/aws/tests/unit/mock/loader.py b/ansible_collections/amazon/aws/tests/unit/mock/loader.py new file mode 100644 index 000000000..00a584127 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/mock/loader.py @@ -0,0 +1,116 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ansible.errors import AnsibleParserError +from ansible.parsing.dataloader import DataLoader +from ansible.module_utils._text import to_bytes, to_text + + +class DictDataLoader(DataLoader): + + def __init__(self, file_mapping=None): + file_mapping = {} if file_mapping is None else file_mapping + assert type(file_mapping) == dict + + super(DictDataLoader, self).__init__() + + self._file_mapping = file_mapping + self._build_known_directories() + self._vault_secrets = None + + def load_from_file(self, path, cache=True, unsafe=False): + path = to_text(path) + if path in self._file_mapping: + return self.load(self._file_mapping[path], path) + return None + + # TODO: the real _get_file_contents returns a bytestring, so we actually convert the + # unicode/text it's created with to utf-8 + def _get_file_contents(self, file_name): + file_name = to_text(file_name) + if file_name in self._file_mapping: + return (to_bytes(self._file_mapping[file_name]), False) + else: + raise AnsibleParserError("file not found: %s" % file_name) + + def path_exists(self, path): + path = to_text(path) + return path in self._file_mapping or path in self._known_directories + + def is_file(self, path): + path = to_text(path) + return path in self._file_mapping + + def is_directory(self, path): + path = to_text(path) + return path in self._known_directories + + def list_directory(self, path): + ret = [] + path = to_text(path) + for x in (list(self._file_mapping.keys()) + self._known_directories): + if x.startswith(path): + if os.path.dirname(x) == path: + ret.append(os.path.basename(x)) + return ret + + def is_executable(self, path): + # FIXME: figure out a way to make paths return true for this + return False + + def _add_known_directory(self, directory): + if directory not in self._known_directories: + self._known_directories.append(directory) + + def _build_known_directories(self): + self._known_directories = [] + for path in self._file_mapping: + dirname = os.path.dirname(path) + while dirname not in ('/', ''): + self._add_known_directory(dirname) + dirname = os.path.dirname(dirname) + + def push(self, path, content): + rebuild_dirs = False + if path not in self._file_mapping: + rebuild_dirs = True + + self._file_mapping[path] = content + + if rebuild_dirs: + self._build_known_directories() + + def pop(self, path): + if path in self._file_mapping: + del self._file_mapping[path] + self._build_known_directories() + + def clear(self): + self._file_mapping = dict() + self._known_directories = [] + + def get_basedir(self): + return os.getcwd() + + def set_vault_secrets(self, vault_secrets): + self._vault_secrets = vault_secrets diff --git a/ansible_collections/amazon/aws/tests/unit/mock/path.py b/ansible_collections/amazon/aws/tests/unit/mock/path.py new file mode 100644 index 000000000..8de2aec25 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/mock/path.py @@ -0,0 +1,8 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.amazon.aws.tests.unit.compat.mock import MagicMock +from ansible.utils.path import unfrackpath + + +mock_unfrackpath_noop = MagicMock(spec_set=unfrackpath, side_effect=lambda x, *args, **kwargs: x) diff --git a/ansible_collections/amazon/aws/tests/unit/mock/procenv.py b/ansible_collections/amazon/aws/tests/unit/mock/procenv.py new file mode 100644 index 000000000..273959e4b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/mock/procenv.py @@ -0,0 +1,90 @@ +# (c) 2016, Matt Davis +# (c) 2016, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import json + +from contextlib import contextmanager +from io import BytesIO, StringIO +from ansible_collections.amazon.aws.tests.unit.compat import unittest +from ansible.module_utils.six import PY3 +from ansible.module_utils._text import to_bytes + + +@contextmanager +def swap_stdin_and_argv(stdin_data='', argv_data=tuple()): + """ + context manager that temporarily masks the test runner's values for stdin and argv + """ + real_stdin = sys.stdin + real_argv = sys.argv + + if PY3: + fake_stream = StringIO(stdin_data) + fake_stream.buffer = BytesIO(to_bytes(stdin_data)) + else: + fake_stream = BytesIO(to_bytes(stdin_data)) + + try: + sys.stdin = fake_stream + sys.argv = argv_data + + yield + finally: + sys.stdin = real_stdin + sys.argv = real_argv + + +@contextmanager +def swap_stdout(): + """ + context manager that temporarily replaces stdout for tests that need to verify output + """ + old_stdout = sys.stdout + + if PY3: + fake_stream = StringIO() + else: + fake_stream = BytesIO() + + try: + sys.stdout = fake_stream + + yield fake_stream + finally: + sys.stdout = old_stdout + + +class ModuleTestCase(unittest.TestCase): + def setUp(self, module_args=None): + if module_args is None: + module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False} + + args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args)) + + # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually + self.stdin_swap = swap_stdin_and_argv(stdin_data=args) + self.stdin_swap.__enter__() + + def tearDown(self): + # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually + self.stdin_swap.__exit__(None, None, None) diff --git a/ansible_collections/amazon/aws/tests/unit/mock/vault_helper.py b/ansible_collections/amazon/aws/tests/unit/mock/vault_helper.py new file mode 100644 index 000000000..dcce9c784 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/mock/vault_helper.py @@ -0,0 +1,39 @@ +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils._text import to_bytes + +from ansible.parsing.vault import VaultSecret + + +class TextVaultSecret(VaultSecret): + '''A secret piece of text. ie, a password. Tracks text encoding. + + The text encoding of the text may not be the default text encoding so + we keep track of the encoding so we encode it to the same bytes.''' + + def __init__(self, text, encoding=None, errors=None, _bytes=None): + super(TextVaultSecret, self).__init__() + self.text = text + self.encoding = encoding or 'utf-8' + self._bytes = _bytes + self.errors = errors or 'strict' + + @property + def bytes(self): + '''The text encoded with encoding, unless we specifically set _bytes.''' + return self._bytes or to_bytes(self.text, encoding=self.encoding, errors=self.errors) diff --git a/ansible_collections/amazon/aws/tests/unit/mock/yaml_helper.py b/ansible_collections/amazon/aws/tests/unit/mock/yaml_helper.py new file mode 100644 index 000000000..1ef172159 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/mock/yaml_helper.py @@ -0,0 +1,124 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import io +import yaml + +from ansible.module_utils.six import PY3 +from ansible.parsing.yaml.loader import AnsibleLoader +from ansible.parsing.yaml.dumper import AnsibleDumper + + +class YamlTestUtils(object): + """Mixin class to combine with a unittest.TestCase subclass.""" + def _loader(self, stream): + """Vault related tests will want to override this. + + Vault cases should setup a AnsibleLoader that has the vault password.""" + return AnsibleLoader(stream) + + def _dump_stream(self, obj, stream, dumper=None): + """Dump to a py2-unicode or py3-string stream.""" + if PY3: + return yaml.dump(obj, stream, Dumper=dumper) + else: + return yaml.dump(obj, stream, Dumper=dumper, encoding=None) + + def _dump_string(self, obj, dumper=None): + """Dump to a py2-unicode or py3-string""" + if PY3: + return yaml.dump(obj, Dumper=dumper) + else: + return yaml.dump(obj, Dumper=dumper, encoding=None) + + def _dump_load_cycle(self, obj): + # Each pass though a dump or load revs the 'generation' + # obj to yaml string + string_from_object_dump = self._dump_string(obj, dumper=AnsibleDumper) + + # wrap a stream/file like StringIO around that yaml + stream_from_object_dump = io.StringIO(string_from_object_dump) + loader = self._loader(stream_from_object_dump) + # load the yaml stream to create a new instance of the object (gen 2) + obj_2 = loader.get_data() + + # dump the gen 2 objects directory to strings + string_from_object_dump_2 = self._dump_string(obj_2, + dumper=AnsibleDumper) + + # The gen 1 and gen 2 yaml strings + self.assertEqual(string_from_object_dump, string_from_object_dump_2) + # the gen 1 (orig) and gen 2 py object + self.assertEqual(obj, obj_2) + + # again! gen 3... load strings into py objects + stream_3 = io.StringIO(string_from_object_dump_2) + loader_3 = self._loader(stream_3) + obj_3 = loader_3.get_data() + + string_from_object_dump_3 = self._dump_string(obj_3, dumper=AnsibleDumper) + + self.assertEqual(obj, obj_3) + # should be transitive, but... + self.assertEqual(obj_2, obj_3) + self.assertEqual(string_from_object_dump, string_from_object_dump_3) + + def _old_dump_load_cycle(self, obj): + '''Dump the passed in object to yaml, load it back up, dump again, compare.''' + stream = io.StringIO() + + yaml_string = self._dump_string(obj, dumper=AnsibleDumper) + self._dump_stream(obj, stream, dumper=AnsibleDumper) + + yaml_string_from_stream = stream.getvalue() + + # reset stream + stream.seek(0) + + loader = self._loader(stream) + # loader = AnsibleLoader(stream, vault_password=self.vault_password) + obj_from_stream = loader.get_data() + + stream_from_string = io.StringIO(yaml_string) + loader2 = self._loader(stream_from_string) + # loader2 = AnsibleLoader(stream_from_string, vault_password=self.vault_password) + obj_from_string = loader2.get_data() + + stream_obj_from_stream = io.StringIO() + stream_obj_from_string = io.StringIO() + + if PY3: + yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper) + yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper) + else: + yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper, encoding=None) + yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper, encoding=None) + + yaml_string_stream_obj_from_stream = stream_obj_from_stream.getvalue() + yaml_string_stream_obj_from_string = stream_obj_from_string.getvalue() + + stream_obj_from_stream.seek(0) + stream_obj_from_string.seek(0) + + if PY3: + yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper) + yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper) + else: + yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper, encoding=None) + yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper, encoding=None) + + assert yaml_string == yaml_string_obj_from_stream + assert yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string + assert (yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string == yaml_string_stream_obj_from_stream == + yaml_string_stream_obj_from_string) + assert obj == obj_from_stream + assert obj == obj_from_string + assert obj == yaml_string_obj_from_stream + assert obj == yaml_string_obj_from_string + assert obj == obj_from_stream == obj_from_string == yaml_string_obj_from_stream == yaml_string_obj_from_string + return {'obj': obj, + 'yaml_string': yaml_string, + 'yaml_string_from_stream': yaml_string_from_stream, + 'obj_from_stream': obj_from_stream, + 'obj_from_string': obj_from_string, + 'yaml_string_obj_from_string': yaml_string_obj_from_string} diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_is_outpost_arn.py b/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_is_outpost_arn.py new file mode 100644 index 000000000..7c2e21eb2 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_is_outpost_arn.py @@ -0,0 +1,27 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils.arn import is_outpost_arn + +outpost_arn_test_inputs = [ + ("arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", True), + ("arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0123", False), + ("arn:aws:outpost:us-east-1:123456789012:outpost/op-1234567890abcdef0", False), + ("ars:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", False), + ("arn:was:outposts:us-east-1:123456789012:outpost/ op-1234567890abcdef0", False), + ("arn:aws:outpost:us-east-1: 123456789012:outpost/ op-1234567890abcdef0", False), + ("ars:aws:outposts:us-east-1: 123456789012:outpost/ op-1234567890abcdef0", False), + ("arn:was:outposts:us-east-1: 123456789012:outpost/ op-1234567890abcdef0", False), +] + + +@pytest.mark.parametrize("outpost_arn, result", outpost_arn_test_inputs) +def test_is_outpost_arn(outpost_arn, result): + assert is_outpost_arn(outpost_arn) == result diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_parse_aws_arn.py b/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_parse_aws_arn.py new file mode 100644 index 000000000..87dada4a9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_parse_aws_arn.py @@ -0,0 +1,95 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils.arn import parse_aws_arn + +arn_bad_values = [ + ("arn:aws:outpost:us-east-1: 123456789012:outpost/op-1234567890abcdef0"), + ("arn:aws:out post:us-east-1:123456789012:outpost/op-1234567890abcdef0"), + ("arn:aws:outpost:us east 1:123456789012:outpost/op-1234567890abcdef0"), + ("invalid:aws:outpost:us-east-1:123456789012:outpost/op-1234567890abcdef0"), + ("arn:junk:outpost:us-east-1:123456789012:outpost/op-1234567890abcdef0"), + ("arn:aws:outpost:us-east-1:junk:outpost/op-1234567890abcdef0"), +] + +arn_good_values = [ + # Play about with partition name in valid ways + dict(partition='aws', service='outpost', region='us-east-1', account_id='123456789012', + resource='outpost/op-1234567890abcdef0'), + dict(partition='aws-gov', service='outpost', region='us-gov-east-1', account_id='123456789012', + resource='outpost/op-1234567890abcdef0'), + dict(partition='aws-cn', service='outpost', region='us-east-1', account_id='123456789012', + resource='outpost/op-1234567890abcdef0'), + # Start the account ID with 0s, it's a 12 digit *string*, if someone treats + # it as an integer the leading 0s can disappear. + dict(partition='aws-cn', service='outpost', region='us-east-1', account_id='000123000123', + resource='outpost/op-1234567890abcdef0'), + # S3 doesn't "need" region/account_id as bucket names are globally unique + dict(partition='aws', service='s3', region='', account_id='', resource='bucket/object'), + # IAM is a 'global' service, so the ARNs don't have regions + dict(partition='aws', service='iam', region='', account_id='123456789012', + resource='policy/foo/bar/PolicyName'), + dict(partition='aws', service='iam', region='', account_id='123456789012', + resource='instance-profile/ExampleProfile'), + dict(partition='aws', service='iam', region='', account_id='123456789012', resource='root'), + # Some examples with different regions + dict(partition='aws', service='sqs', region='eu-west-3', account_id='123456789012', + resource='example-queue'), + dict(partition='aws', service='sqs', region='us-gov-east-1', account_id='123456789012', + resource='example-queue'), + dict(partition='aws', service='sqs', region='sa-east-1', account_id='123456789012', + resource='example-queue'), + dict(partition='aws', service='sqs', region='ap-northeast-2', account_id='123456789012', + resource='example-queue'), + dict(partition='aws', service='sqs', region='ca-central-1', account_id='123456789012', + resource='example-queue'), + # Some more unusual service names + dict(partition='aws', service='network-firewall', region='us-east-1', account_id='123456789012', + resource='stateful-rulegroup/ExampleDomainList'), + dict(partition='aws', service='resource-groups', region='us-east-1', account_id='123456789012', + resource='group/group-name'), + # A special case for resources AWS curate + dict(partition='aws', service='network-firewall', region='us-east-1', account_id='aws-managed', + resource='stateful-rulegroup/BotNetCommandAndControlDomainsActionOrder'), + dict(partition='aws', service='iam', region='', account_id='aws', + resource='policy/AWSDirectConnectReadOnlyAccess'), + # Examples merged in from test_arn.py + dict(partition="aws-us-gov", service="iam", region="", account_id="0123456789", + resource="role/foo-role"), + dict(partition="aws", service='iam', region="", account_id="123456789012", + resource="user/dev/*"), + dict(partition="aws", service="iam", region="", account_id="123456789012", + resource="user:test"), + dict(partition="aws-cn", service="iam", region="", account_id="123456789012", + resource="user:test"), + dict(partition="aws", service="iam", region="", account_id="123456789012", + resource="user"), + dict(partition="aws", service="s3", region="", account_id="", + resource="my_corporate_bucket/*"), + dict(partition="aws", service="s3", region="", account_id="", + resource="my_corporate_bucket/Development/*"), + dict(partition="aws", service="rds", region="es-east-1", account_id="000000000000", + resource="snapshot:rds:my-db-snapshot"), + dict(partition="aws", service="cloudformation", region="us-east-1", account_id="012345678901", + resource="changeSet/Ansible-StackName-c6884247ede41eb0"), +] + + +@pytest.mark.parametrize("arn", arn_bad_values) +def test_parse_aws_arn_bad_values(arn): + # Make sure we get the expected 'None' for various 'bad' ARNs. + assert parse_aws_arn(arn) is None + + +@pytest.mark.parametrize("result", arn_good_values) +def test_parse_aws_arn_good_values(result): + # Something of a cheat, but build the ARN from the result we expect + arn = 'arn:{partition}:{service}:{region}:{account_id}:{resource}'.format(**result) + assert parse_aws_arn(arn) == result diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_code.py b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_code.py new file mode 100644 index 000000000..627ae4cb3 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_code.py @@ -0,0 +1,214 @@ +# -*- coding: utf-8 -*- +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +try: + import botocore +except ImportError: + # Handled by HAS_BOTO3 + pass + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_is_boto3_error_code.py requires the python modules 'boto3' and 'botocore'") + + +class TestIsBoto3ErrorCode(): + + def _make_denied_exception(self): + return botocore.exceptions.ClientError( + { + "Error": { + "Code": "AccessDenied", + "Message": "User: arn:aws:iam::123456789012:user/ExampleUser " + + "is not authorized to perform: iam:GetUser on resource: user ExampleUser" + }, + "ResponseMetadata": { + "RequestId": "01234567-89ab-cdef-0123-456789abcdef" + } + }, 'getUser') + + def _make_unexpected_exception(self): + return botocore.exceptions.ClientError( + { + "Error": { + "Code": "SomeThingWentWrong", + "Message": "Boom!" + }, + "ResponseMetadata": { + "RequestId": "01234567-89ab-cdef-0123-456789abcdef" + } + }, 'someCall') + + def _make_encoded_exception(self): + return botocore.exceptions.ClientError( + { + "Error": { + "Code": "PermissionDenied", + "Message": "You are not authorized to perform this operation. Encoded authorization failure message: " + + "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" + + "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" + + "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" + + "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" + + "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" + + "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" + + "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" + + "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" + + "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2" + }, + "ResponseMetadata": { + "RequestId": "01234567-89ab-cdef-0123-456789abcdef" + } + }, 'someCall') + + def _make_botocore_exception(self): + return botocore.exceptions.EndpointConnectionError(endpoint_url='junk.endpoint') + + ### + # Test that is_boto3_error_code does what's expected when used in a try/except block + # (where we don't explicitly pass an exception to the function) + ### + + def _do_try_code(self, exception, codes): + try: + raise exception + except is_boto3_error_code(codes) as e: + return e + + def test_is_boto3_error_code_single__raise__client(self): + # 'AccessDenied' error, should be caught in our try/except in _do_try_code + thrown_exception = self._make_denied_exception() + codes_to_catch = 'AccessDenied' + + caught_exception = self._do_try_code(thrown_exception, codes_to_catch) + assert caught_exception == thrown_exception + + def test_is_boto3_error_code_single__raise__unexpected(self): + # 'SomeThingWentWrong' error, shouldn't be caught because the Code doesn't match + thrown_exception = self._make_unexpected_exception() + codes_to_catch = 'AccessDenied' + + with pytest.raises(botocore.exceptions.ClientError) as context: + self._do_try_code(thrown_exception, codes_to_catch) + assert context.value == thrown_exception + + def test_is_boto3_error_code_single__raise__botocore(self): + # BotoCoreExceptions don't have an error code, so shouldn't be caught (and shouldn't throw + # some other error due to the missing 'Code' data on the exception) + thrown_exception = self._make_botocore_exception() + codes_to_catch = 'AccessDenied' + + with pytest.raises(botocore.exceptions.BotoCoreError) as context: + self._do_try_code(thrown_exception, codes_to_catch) + + assert context.value == thrown_exception + + def test_is_boto3_error_code_multiple__raise__client(self): + # 'AccessDenied' error, should be caught in our try/except in _do_try_code + # test with multiple possible codes to catch + thrown_exception = self._make_denied_exception() + codes_to_catch = ['AccessDenied', 'NotAccessDenied'] + + caught_exception = self._do_try_code(thrown_exception, codes_to_catch) + assert caught_exception == thrown_exception + + thrown_exception = self._make_denied_exception() + codes_to_catch = ['NotAccessDenied', 'AccessDenied'] + + caught_exception = self._do_try_code(thrown_exception, codes_to_catch) + assert caught_exception == thrown_exception + + def test_is_boto3_error_code_multiple__raise__unexpected(self): + # 'SomeThingWentWrong' error, shouldn't be caught because the Code doesn't match + # test with multiple possible codes to catch + thrown_exception = self._make_unexpected_exception() + codes_to_catch = ['NotAccessDenied', 'AccessDenied'] + + with pytest.raises(botocore.exceptions.ClientError) as context: + self._do_try_code(thrown_exception, codes_to_catch) + assert context.value == thrown_exception + + def test_is_boto3_error_code_multiple__raise__botocore(self): + # BotoCoreErrors don't have an error code, so shouldn't be caught (and shouldn't throw + # some other error due to the missing 'Code' data on the exception) + # test with multiple possible codes to catch + thrown_exception = self._make_botocore_exception() + codes_to_catch = ['NotAccessDenied', 'AccessDenied'] + + with pytest.raises(botocore.exceptions.BotoCoreError) as context: + self._do_try_code(thrown_exception, codes_to_catch) + assert context.value == thrown_exception + + ### + # Test that is_boto3_error_code returns what we expect when explicitly passed an exception + ### + + def test_is_boto3_error_code_single__pass__client(self): + passed_exception = self._make_denied_exception() + returned_exception = is_boto3_error_code('AccessDenied', e=passed_exception) + assert isinstance(passed_exception, returned_exception) + assert issubclass(returned_exception, botocore.exceptions.ClientError) + assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) + assert issubclass(returned_exception, Exception) + assert returned_exception.__name__ != "NeverEverRaisedException" + + def test_is_boto3_error_code_single__pass__unexpected(self): + passed_exception = self._make_unexpected_exception() + returned_exception = is_boto3_error_code('AccessDenied', e=passed_exception) + assert not isinstance(passed_exception, returned_exception) + assert not issubclass(returned_exception, botocore.exceptions.ClientError) + assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) + assert issubclass(returned_exception, Exception) + assert returned_exception.__name__ == "NeverEverRaisedException" + + def test_is_boto3_error_code_single__pass__botocore(self): + passed_exception = self._make_botocore_exception() + returned_exception = is_boto3_error_code('AccessDenied', e=passed_exception) + assert not isinstance(passed_exception, returned_exception) + assert not issubclass(returned_exception, botocore.exceptions.ClientError) + assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) + assert issubclass(returned_exception, Exception) + assert returned_exception.__name__ == "NeverEverRaisedException" + + def test_is_boto3_error_code_multiple__pass__client(self): + passed_exception = self._make_denied_exception() + returned_exception = is_boto3_error_code(['NotAccessDenied', 'AccessDenied'], e=passed_exception) + assert isinstance(passed_exception, returned_exception) + assert issubclass(returned_exception, botocore.exceptions.ClientError) + assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) + assert issubclass(returned_exception, Exception) + assert returned_exception.__name__ != "NeverEverRaisedException" + + returned_exception = is_boto3_error_code(['AccessDenied', 'NotAccessDenied'], e=passed_exception) + assert isinstance(passed_exception, returned_exception) + assert issubclass(returned_exception, botocore.exceptions.ClientError) + assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) + assert issubclass(returned_exception, Exception) + assert returned_exception.__name__ != "NeverEverRaisedException" + + def test_is_boto3_error_code_multiple__pass__unexpected(self): + passed_exception = self._make_unexpected_exception() + returned_exception = is_boto3_error_code(['NotAccessDenied', 'AccessDenied'], e=passed_exception) + assert not isinstance(passed_exception, returned_exception) + assert not issubclass(returned_exception, botocore.exceptions.ClientError) + assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) + assert issubclass(returned_exception, Exception) + assert returned_exception.__name__ == "NeverEverRaisedException" + + def test_is_boto3_error_code_multiple__pass__botocore(self): + passed_exception = self._make_botocore_exception() + returned_exception = is_boto3_error_code(['NotAccessDenied', 'AccessDenied'], e=passed_exception) + assert not isinstance(passed_exception, returned_exception) + assert not issubclass(returned_exception, botocore.exceptions.ClientError) + assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) + assert issubclass(returned_exception, Exception) + assert returned_exception.__name__ == "NeverEverRaisedException" diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_message.py b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_message.py new file mode 100644 index 000000000..cd40a58dd --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_message.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +try: + import botocore +except ImportError: + # Handled by HAS_BOTO3 + pass + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_is_boto3_error_message.py requires the python modules 'boto3' and 'botocore'") + + +class TestIsBoto3ErrorMessaged(): + + def _make_denied_exception(self): + return botocore.exceptions.ClientError( + { + "Error": { + "Code": "AccessDenied", + "Message": "User: arn:aws:iam::123456789012:user/ExampleUser " + + "is not authorized to perform: iam:GetUser on resource: user ExampleUser" + }, + "ResponseMetadata": { + "RequestId": "01234567-89ab-cdef-0123-456789abcdef" + } + }, 'getUser') + + def _make_unexpected_exception(self): + return botocore.exceptions.ClientError( + { + "Error": { + "Code": "SomeThingWentWrong", + "Message": "Boom!" + }, + "ResponseMetadata": { + "RequestId": "01234567-89ab-cdef-0123-456789abcdef" + } + }, 'someCall') + + def _make_encoded_exception(self): + return botocore.exceptions.ClientError( + { + "Error": { + "Code": "AccessDenied", + "Message": "You are not authorized to perform this operation. Encoded authorization failure message: " + + "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" + + "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" + + "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" + + "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" + + "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" + + "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" + + "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" + + "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" + + "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2" + }, + "ResponseMetadata": { + "RequestId": "01234567-89ab-cdef-0123-456789abcdef" + } + }, 'someCall') + + def _make_botocore_exception(self): + return botocore.exceptions.EndpointConnectionError(endpoint_url='junk.endpoint') + + def _do_try_message(self, exception, messages): + try: + raise exception + except is_boto3_error_message(messages) as e: + return e + + ### + # Test that is_boto3_error_message does what's expected when used in a try/except block + # (where we don't explicitly pass an exception to the function) + ### + + def test_is_boto3_error_message_single__raise__client(self): + # error with 'is not authorized to perform' in the message, should be caught in our try/except in _do_try_code + thrown_exception = self._make_denied_exception() + messages_to_catch = 'is not authorized to perform' + + caught_exception = self._do_try_message(thrown_exception, messages_to_catch) + + assert caught_exception == thrown_exception + + def test_is_boto3_error_message_single__raise__unexpected(self): + # error with 'Boom!' as the message, shouldn't match and should fall through + thrown_exception = self._make_unexpected_exception() + messages_to_catch = 'is not authorized to perform' + + with pytest.raises(botocore.exceptions.ClientError) as context: + self._do_try_message(thrown_exception, messages_to_catch) + + assert context.value == thrown_exception + + def test_is_boto3_error_message_single__raise__botocore(self): + # Test that we don't catch BotoCoreError + thrown_exception = self._make_botocore_exception() + messages_to_catch = 'is not authorized to perform' + + with pytest.raises(botocore.exceptions.BotoCoreError) as context: + self._do_try_message(thrown_exception, messages_to_catch) + + assert context.value == thrown_exception + + ### + # Test that is_boto3_error_message returns what we expect when explicitly passed an exception + ### + + def test_is_boto3_error_message_single__pass__client(self): + passed_exception = self._make_denied_exception() + returned_exception = is_boto3_error_message('is not authorized to perform', e=passed_exception) + assert isinstance(passed_exception, returned_exception) + assert issubclass(returned_exception, botocore.exceptions.ClientError) + assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) + assert issubclass(returned_exception, Exception) + assert returned_exception.__name__ != "NeverEverRaisedException" + + def test_is_boto3_error_message_single__pass__unexpected(self): + passed_exception = self._make_unexpected_exception() + returned_exception = is_boto3_error_message('is not authorized to perform', e=passed_exception) + assert not isinstance(passed_exception, returned_exception) + assert not issubclass(returned_exception, botocore.exceptions.ClientError) + assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) + assert issubclass(returned_exception, Exception) + assert returned_exception.__name__ == "NeverEverRaisedException" + + def test_is_boto3_error_message_single__pass__botocore(self): + passed_exception = self._make_botocore_exception() + returned_exception = is_boto3_error_message('is not authorized to perform', e=passed_exception) + assert not isinstance(passed_exception, returned_exception) + assert not issubclass(returned_exception, botocore.exceptions.ClientError) + assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) + assert issubclass(returned_exception, Exception) + assert returned_exception.__name__ == "NeverEverRaisedException" diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_normalize_boto3_result.py b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_normalize_boto3_result.py new file mode 100644 index 000000000..71da9d66d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_normalize_boto3_result.py @@ -0,0 +1,59 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result + +example_date_txt = '2020-12-30T00:00:00.000Z' +example_date_iso = '2020-12-30T00:00:00+00:00' + +try: + from dateutil import parser as date_parser + example_date = date_parser.parse(example_date_txt) +except ImportError: + example_date = None + pytestmark = pytest.mark.skip("test_normalize_boto3_result.py requires the python module dateutil (python-dateutil)") + + +normalize_boto3_result_data = [ + (dict(), + dict() + ), + # Bool + (dict(param1=False), + dict(param1=False) + ), + # Simple string (shouldn't be touched + (dict(date_example=example_date_txt), + dict(date_example=example_date_txt) + ), + (dict(date_example=example_date_iso), + dict(date_example=example_date_iso) + ), + # Datetime -> String + (dict(date_example=example_date), + dict(date_example=example_date_iso) + ), + (list(), + list() + ), + (list([False]), + list([False]) + ), + (list([example_date_txt]), + list([example_date_txt]) + ), + (list([example_date_iso]), + list([example_date_iso]) + ), + (list([example_date]), + list([example_date_iso]) + ), +] + + +@pytest.mark.parametrize("input_params, output_params", normalize_boto3_result_data) +def test_normalize_boto3_result(input_params, output_params): + + assert normalize_boto3_result(input_params) == output_params diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_backoff_iterator.py b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_backoff_iterator.py new file mode 100644 index 000000000..5fee115c2 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_backoff_iterator.py @@ -0,0 +1,45 @@ +# (c) 2021 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.amazon.aws.plugins.module_utils.cloud import BackoffIterator + + +def test_backoff_value_generator(): + max_delay = 60 + initial = 3 + backoff = 2 + + min_sleep = initial + counter = 0 + for sleep in BackoffIterator(delay=initial, backoff=backoff, max_delay=max_delay): + if counter > 4: + assert sleep == max_delay + else: + assert sleep == min_sleep + min_sleep *= backoff + counter += 1 + if counter == 10: + break + + +def test_backoff_value_generator_with_jitter(): + max_delay = 60 + initial = 3 + backoff = 2 + + min_sleep = initial + counter = 0 + for sleep in BackoffIterator(delay=initial, backoff=backoff, max_delay=max_delay, jitter=True): + if counter > 4: + assert sleep <= max_delay + else: + assert sleep <= min_sleep + min_sleep *= backoff + counter += 1 + if counter == 10: + break diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_cloud_retry.py b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_cloud_retry.py new file mode 100644 index 000000000..ce5f03f11 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_cloud_retry.py @@ -0,0 +1,236 @@ +# (c) 2021 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import random +from datetime import datetime +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils.cloud import CloudRetry + + +class TestCloudRetry(): + + error_codes = [400, 500, 600] + custom_error_codes = [100, 200, 300] + + class OurTestException(Exception): + """ + custom exception class for testing + """ + def __init__(self, status): + self.status = status + + def __str__(self): + return "TestException with status: {0}".format(self.status) + + class UnitTestsRetry(CloudRetry): + base_class = Exception + + @staticmethod + def status_code_from_exception(error): + return getattr(error, "status") if hasattr(error, "status") else None + + class CustomRetry(CloudRetry): + base_class = Exception + + @staticmethod + def status_code_from_exception(error): + return error.status['response']['status'] + + @staticmethod + def found(response_code, catch_extra_error_codes=None): + if catch_extra_error_codes: + return response_code in catch_extra_error_codes + TestCloudRetry.custom_error_codes + else: + return response_code in TestCloudRetry.custom_error_codes + + class KeyRetry(CloudRetry): + base_class = KeyError + + @staticmethod + def status_code_from_exception(error): + return True + + @staticmethod + def found(response_code, catch_extra_error_codes=None): + return True + + class KeyAndIndexRetry(CloudRetry): + base_class = (KeyError, IndexError) + + @staticmethod + def status_code_from_exception(error): + return True + + @staticmethod + def found(response_code, catch_extra_error_codes=None): + return True + + # ======================================================== + # retry original backoff + # ======================================================== + def test_retry_backoff(self): + + @TestCloudRetry.UnitTestsRetry.backoff(tries=3, delay=1, backoff=1.1, + catch_extra_error_codes=TestCloudRetry.error_codes) + def test_retry_func(): + if test_retry_func.counter < 2: + test_retry_func.counter += 1 + raise self.OurTestException(status=random.choice(TestCloudRetry.error_codes)) + else: + return True + + test_retry_func.counter = 0 + ret = test_retry_func() + assert ret is True + + # ======================================================== + # retry exponential backoff + # ======================================================== + def test_retry_exponential_backoff(self): + + @TestCloudRetry.UnitTestsRetry.exponential_backoff(retries=3, delay=1, backoff=1.1, max_delay=3, + catch_extra_error_codes=TestCloudRetry.error_codes) + def test_retry_func(): + if test_retry_func.counter < 2: + test_retry_func.counter += 1 + raise self.OurTestException(status=random.choice(TestCloudRetry.error_codes)) + else: + return True + + test_retry_func.counter = 0 + ret = test_retry_func() + assert ret is True + + def test_retry_exponential_backoff_with_unexpected_exception(self): + unexpected_except = self.OurTestException(status=100) + + @TestCloudRetry.UnitTestsRetry.exponential_backoff(retries=3, delay=1, backoff=1.1, max_delay=3, + catch_extra_error_codes=TestCloudRetry.error_codes) + def test_retry_func(): + if test_retry_func.counter == 0: + test_retry_func.counter += 1 + raise self.OurTestException(status=random.choice(TestCloudRetry.error_codes)) + else: + raise unexpected_except + + test_retry_func.counter = 0 + with pytest.raises(self.OurTestException) as context: + test_retry_func() + + assert context.value.status == unexpected_except.status + + # ======================================================== + # retry jittered backoff + # ======================================================== + def test_retry_jitter_backoff(self): + @TestCloudRetry.UnitTestsRetry.jittered_backoff(retries=3, delay=1, max_delay=3, + catch_extra_error_codes=TestCloudRetry.error_codes) + def test_retry_func(): + if test_retry_func.counter < 2: + test_retry_func.counter += 1 + raise self.OurTestException(status=random.choice(TestCloudRetry.error_codes)) + else: + return True + + test_retry_func.counter = 0 + ret = test_retry_func() + assert ret is True + + def test_retry_jittered_backoff_with_unexpected_exception(self): + unexpected_except = self.OurTestException(status=100) + + @TestCloudRetry.UnitTestsRetry.jittered_backoff(retries=3, delay=1, max_delay=3, + catch_extra_error_codes=TestCloudRetry.error_codes) + def test_retry_func(): + if test_retry_func.counter == 0: + test_retry_func.counter += 1 + raise self.OurTestException(status=random.choice(TestCloudRetry.error_codes)) + else: + raise unexpected_except + + test_retry_func.counter = 0 + with pytest.raises(self.OurTestException) as context: + test_retry_func() + + assert context.value.status == unexpected_except.status + + # ======================================================== + # retry with custom class + # ======================================================== + def test_retry_exponential_backoff_custom_class(self): + def build_response(): + return dict(response=dict(status=random.choice(TestCloudRetry.custom_error_codes))) + + @self.CustomRetry.exponential_backoff(retries=3, delay=1, backoff=1.1, max_delay=3, + catch_extra_error_codes=TestCloudRetry.error_codes) + def test_retry_func(): + if test_retry_func.counter < 2: + test_retry_func.counter += 1 + raise self.OurTestException(build_response()) + else: + return True + + test_retry_func.counter = 0 + + ret = test_retry_func() + assert ret is True + + # ============================================================= + # Test wrapped function multiple times will restart the sleep + # ============================================================= + def test_wrapped_function_called_several_times(self): + @TestCloudRetry.UnitTestsRetry.exponential_backoff(retries=2, delay=2, backoff=4, max_delay=100, + catch_extra_error_codes=TestCloudRetry.error_codes) + def _fail(): + raise self.OurTestException(status=random.choice(TestCloudRetry.error_codes)) + + # run the method 3 times and assert that each it is retrying after 2secs + # the elapsed execution time should be closed to 2sec + for _i in range(3): + start = datetime.now() + with pytest.raises(self.OurTestException): + _fail() + duration = (datetime.now() - start).seconds + assert duration == 2 + + def test_only_base_exception(self): + def _fail_index(): + my_list = list() + return my_list[5] + + def _fail_key(): + my_dict = dict() + return my_dict['invalid_key'] + + def _fail_exception(): + raise Exception('bang') + + key_retry_decorator = TestCloudRetry.KeyRetry.exponential_backoff(retries=2, delay=2, backoff=4, max_delay=100) + key_and_index_retry_decorator = TestCloudRetry.KeyAndIndexRetry.exponential_backoff(retries=2, delay=2, backoff=4, max_delay=100) + + expectations = [ + [key_retry_decorator, _fail_exception, 0, Exception], + [key_retry_decorator, _fail_index, 0, IndexError], + [key_retry_decorator, _fail_key, 2, KeyError], + [key_and_index_retry_decorator, _fail_exception, 0, Exception], + [key_and_index_retry_decorator, _fail_index, 2, IndexError], + [key_and_index_retry_decorator, _fail_key, 2, KeyError], + ] + + for expectation in expectations: + decorator = expectation[0] + function = expectation[1] + duration = expectation[2] + exception = expectation[3] + + start = datetime.now() + with pytest.raises(exception): + decorator(function)() + _duration = (datetime.now() - start).seconds + assert duration == _duration diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_decorator_generation.py b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_decorator_generation.py new file mode 100644 index 000000000..23b446763 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_decorator_generation.py @@ -0,0 +1,156 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +from ansible_collections.amazon.aws.plugins.module_utils.cloud import CloudRetry +from ansible_collections.amazon.aws.plugins.module_utils.cloud import BackoffIterator +from ansible_collections.amazon.aws.tests.unit.compat.mock import MagicMock +from ansible_collections.amazon.aws.tests.unit.compat.mock import sentinel + +if sys.version_info < (3, 8): + pytest.skip("accessing call_args.kwargs by keyword (instead of index) was introduced in Python 3.8", allow_module_level=True) + + +@pytest.fixture +def patch_cloud_retry(monkeypatch): + """ + replaces CloudRetry.base_decorator with a MagicMock so that we can exercise the generation of + the various "public" decorators. We can then check that base_decorator was called as expected. + Note: this doesn't test the operation of CloudRetry.base_decorator itself, but does make sure + we can fully exercise the various wrapper functions built over the top of it. + """ + def perform_patch(): + decorator_generator = MagicMock() + decorator_generator.return_value = sentinel.decorator + monkeypatch.setattr(CloudRetry, 'base_decorator', decorator_generator) + return CloudRetry, decorator_generator + + return perform_patch + + +def check_common_side_effects(decorator_generator): + """ + By invoking CloudRetry.(exponential_backoff|jittered_backoff|backoff) we expect certain things + to have happend, specifically CloudRetry.base_decorator should have been called *once* with a + number of keyword arguments. + "found" should be CloudRetry.found + "status_code_from_exception" should be CloudRetry.status_code_from_exception (this is replaced when the abstract class is realised) + "sleep_time_generator" should be an instance of CloudRetry.BackoffIterator + """ + + assert decorator_generator.called is True + assert decorator_generator.call_count == 1 + + gen_kw_args = decorator_generator.call_args.kwargs + assert gen_kw_args['found'] is CloudRetry.found + assert gen_kw_args['status_code_from_exception'] is CloudRetry.status_code_from_exception + + sleep_time_generator = gen_kw_args['sleep_time_generator'] + assert isinstance(sleep_time_generator, BackoffIterator) + + # Return the KW args used when CloudRetry.base_decorator was called and the sleep_time_generator + # passed, these are what should change between the different decorators + return gen_kw_args, sleep_time_generator + + +def test_create_exponential_backoff_with_defaults(patch_cloud_retry): + cloud_retry, decorator_generator = patch_cloud_retry() + + decorator = cloud_retry.exponential_backoff() + + assert decorator is sentinel.decorator + + gen_kw_args, sleep_time_generator = check_common_side_effects(decorator_generator) + + assert gen_kw_args['retries'] == 10 + assert gen_kw_args['catch_extra_error_codes'] is None + assert sleep_time_generator.delay == 3 + assert sleep_time_generator.backoff == 2 + assert sleep_time_generator.max_delay == 60 + assert sleep_time_generator.jitter is False + + +def test_create_exponential_backoff_with_args(patch_cloud_retry): + cloud_retry, decorator_generator = patch_cloud_retry() + + decorator = cloud_retry.exponential_backoff(retries=11, delay=4, backoff=3, max_delay=61, catch_extra_error_codes=[42]) + assert decorator is sentinel.decorator + + gen_kw_args, sleep_time_generator = check_common_side_effects(decorator_generator) + + assert gen_kw_args['catch_extra_error_codes'] == [42] + assert gen_kw_args['retries'] == 11 + assert sleep_time_generator.delay == 4 + assert sleep_time_generator.backoff == 3 + assert sleep_time_generator.max_delay == 61 + assert sleep_time_generator.jitter is False + + +def test_create_jittered_backoff_with_defaults(patch_cloud_retry): + cloud_retry, decorator_generator = patch_cloud_retry() + + decorator = cloud_retry.jittered_backoff() + assert decorator is sentinel.decorator + + gen_kw_args, sleep_time_generator = check_common_side_effects(decorator_generator) + + assert gen_kw_args['catch_extra_error_codes'] is None + assert gen_kw_args['retries'] == 10 + assert sleep_time_generator.delay == 3 + assert sleep_time_generator.backoff == 2 + assert sleep_time_generator.max_delay == 60 + assert sleep_time_generator.jitter is True + + +def test_create_jittered_backoff_with_args(patch_cloud_retry): + cloud_retry, decorator_generator = patch_cloud_retry() + + decorator = cloud_retry.jittered_backoff(retries=11, delay=4, backoff=3, max_delay=61, catch_extra_error_codes=[42]) + assert decorator is sentinel.decorator + + gen_kw_args, sleep_time_generator = check_common_side_effects(decorator_generator) + + assert gen_kw_args['catch_extra_error_codes'] == [42] + assert gen_kw_args['retries'] == 11 + assert sleep_time_generator.delay == 4 + assert sleep_time_generator.backoff == 3 + assert sleep_time_generator.max_delay == 61 + assert sleep_time_generator.jitter is True + + +def test_create_legacy_backoff_with_defaults(patch_cloud_retry): + cloud_retry, decorator_generator = patch_cloud_retry() + + decorator = cloud_retry.backoff() + + gen_kw_args, sleep_time_generator = check_common_side_effects(decorator_generator) + + assert gen_kw_args['catch_extra_error_codes'] is None + assert gen_kw_args['retries'] == 10 + assert sleep_time_generator.delay == 3 + assert sleep_time_generator.backoff == 1.1 + assert sleep_time_generator.max_delay is None + assert sleep_time_generator.jitter is False + + +def test_create_legacy_backoff_with_args(patch_cloud_retry): + cloud_retry, decorator_generator = patch_cloud_retry() + + # Note: the Keyword Args have different names here, and not all of them can be passed... + decorator = cloud_retry.backoff(tries=11, delay=4, backoff=3, catch_extra_error_codes=[42]) + + gen_kw_args, sleep_time_generator = check_common_side_effects(decorator_generator) + + assert gen_kw_args['catch_extra_error_codes'] == [42] + assert gen_kw_args['retries'] == 11 + assert sleep_time_generator.delay == 4 + assert sleep_time_generator.backoff == 3 + assert sleep_time_generator.max_delay is None + assert sleep_time_generator.jitter is False diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_retries_found.py b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_retries_found.py new file mode 100644 index 000000000..21ad74d42 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_retries_found.py @@ -0,0 +1,34 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.amazon.aws.plugins.module_utils.cloud import CloudRetry + + +def test_found_not_itterable(): + assert CloudRetry.found('404', 5) is False + assert CloudRetry.found('404', None) is False + assert CloudRetry.found('404', 404) is False + # This seems counter intuitive, but the second argument is supposed to be iterable... + assert CloudRetry.found(404, 404) is False + + +def test_found_no_match(): + assert CloudRetry.found('404', ['403']) is False + assert CloudRetry.found('404', ['500', '403']) is False + assert CloudRetry.found('404', {'403'}) is False + assert CloudRetry.found('404', {'500', '403'}) is False + + +def test_found_match(): + assert CloudRetry.found('404', ['404']) is True + assert CloudRetry.found('404', ['403', '404']) is True + assert CloudRetry.found('404', ['404', '403']) is True + assert CloudRetry.found('404', {'404'}) is True + assert CloudRetry.found('404', {'403', '404'}) is True + # Beware, this will generally only work with strings (they're iterable) + assert CloudRetry.found('404', '404') is True diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_retry_func.py b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_retry_func.py new file mode 100644 index 000000000..609c0718b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_retry_func.py @@ -0,0 +1,129 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.amazon.aws.plugins.module_utils.cloud as cloud_utils +from ansible_collections.amazon.aws.tests.unit.compat.mock import Mock +from ansible_collections.amazon.aws.tests.unit.compat.mock import sentinel + +if sys.version_info < (3, 8): + pytest.skip("accessing call_args.kwargs by keyword (instead of index) was introduced in Python 3.8", allow_module_level=True) + + +class ExceptionA(Exception): + def __init__(self): + pass + + +class ExceptionB(Exception): + def __init__(self): + pass + + +@pytest.fixture +def retrier(): + def do_retry( + func=None, + sleep_generator=None, + retries=4, + catch_extra_error_codes=None, + found_f=None, + extract_code=None, + base_class=None, + ): + if not func: + func = Mock(return_value=sentinel.successful_run) + if not sleep_generator: + sleep_generator = cloud_utils.BackoffIterator(0, 0) + if not found_f: + found_f = Mock(return_value=False) + if not extract_code: + extract_code = Mock(return_value=sentinel.extracted_code) + if not base_class: + base_class = ExceptionA + + result = cloud_utils._retry_func( + func, + sleep_generator, + retries, + catch_extra_error_codes, + found_f, + extract_code, + base_class, + ) + return func, result + + return do_retry + + +def test_success(retrier): + func, result = retrier() + assert result is sentinel.successful_run + assert func.called is True + assert func.call_count == 1 + + +def test_not_base(retrier): + func = Mock(side_effect=ExceptionB) + with pytest.raises(ExceptionB): + _f, _result = retrier(func=func) + assert func.called is True + assert func.call_count == 1 + + +def test_no_match(retrier): + found_f = Mock(return_value=False) + func = Mock(side_effect=ExceptionA) + + with pytest.raises(ExceptionA): + _f, _result = retrier(func=func, found_f=found_f) + assert func.called is True + assert func.call_count == 1 + assert found_f.called is True + assert found_f.call_count == 1 + assert found_f.call_args.args[0] is sentinel.extracted_code + assert found_f.call_args.args[1] is None + + +def test_no_match_with_extra_error_codes(retrier): + found_f = Mock(return_value=False) + func = Mock(side_effect=ExceptionA) + catch_extra_error_codes = sentinel.extra_codes + + with pytest.raises(ExceptionA): + _f, _result = retrier( + func=func, found_f=found_f, catch_extra_error_codes=catch_extra_error_codes + ) + assert func.called is True + assert func.call_count == 1 + assert found_f.called is True + assert found_f.call_count == 1 + assert found_f.call_args.args[0] is sentinel.extracted_code + assert found_f.call_args.args[1] is sentinel.extra_codes + + +def test_simple_retries_4_times(retrier): + found_f = Mock(return_value=True) + func = Mock(side_effect=ExceptionA) + + with pytest.raises(ExceptionA): + _f, _result = retrier(func=func, found_f=found_f) + assert func.called is True + assert func.call_count == 4 + + +def test_simple_retries_2_times(retrier): + found_f = Mock(return_value=True) + func = Mock(side_effect=ExceptionA) + + with pytest.raises(ExceptionA): + _f, _result = retrier(func=func, found_f=found_f, retries=2) + assert func.called is True + assert func.call_count == 2 diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/conftest.py b/ansible_collections/amazon/aws/tests/unit/module_utils/conftest.py new file mode 100644 index 000000000..f90055615 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/conftest.py @@ -0,0 +1,81 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import sys +from io import BytesIO +import warnings + +import pytest + +import ansible.module_utils.basic +import ansible.module_utils.common +from ansible.module_utils.six import PY3, string_types +from ansible.module_utils._text import to_bytes +from ansible.module_utils.common._collections_compat import MutableMapping + + +@pytest.fixture +def stdin(mocker, request): + old_args = ansible.module_utils.basic._ANSIBLE_ARGS + ansible.module_utils.basic._ANSIBLE_ARGS = None + old_argv = sys.argv + sys.argv = ['ansible_unittest'] + + for var in ["_global_warnings", "_global_deprecations"]: + if hasattr(ansible.module_utils.common.warnings, var): + setattr(ansible.module_utils.common.warnings, var, []) + else: + # No need to reset the value + warnings.warn("deprecated") + + if isinstance(request.param, string_types): + args = request.param + elif isinstance(request.param, MutableMapping): + if 'ANSIBLE_MODULE_ARGS' not in request.param: + request.param = {'ANSIBLE_MODULE_ARGS': request.param} + if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']: + request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp' + if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']: + request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False + args = json.dumps(request.param) + else: + raise Exception('Malformed data to the stdin pytest fixture') + + fake_stdin = BytesIO(to_bytes(args, errors='surrogate_or_strict')) + if PY3: + mocker.patch('ansible.module_utils.basic.sys.stdin', mocker.MagicMock()) + mocker.patch('ansible.module_utils.basic.sys.stdin.buffer', fake_stdin) + else: + mocker.patch('ansible.module_utils.basic.sys.stdin', fake_stdin) + + yield fake_stdin + + ansible.module_utils.basic._ANSIBLE_ARGS = old_args + sys.argv = old_argv + + +@pytest.fixture +def am(stdin, request): + old_args = ansible.module_utils.basic._ANSIBLE_ARGS + ansible.module_utils.basic._ANSIBLE_ARGS = None + old_argv = sys.argv + sys.argv = ['ansible_unittest'] + + argspec = {} + if hasattr(request, 'param'): + if isinstance(request.param, dict): + argspec = request.param + + am = ansible.module_utils.basic.AnsibleModule( + argument_spec=argspec, + ) + am._name = 'ansible_unittest' + + yield am + + ansible.module_utils.basic._ANSIBLE_ARGS = old_args + sys.argv = old_argv diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/elbv2/test_prune.py b/ansible_collections/amazon/aws/tests/unit/module_utils/elbv2/test_prune.py new file mode 100644 index 000000000..3a02b9e2e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/elbv2/test_prune.py @@ -0,0 +1,188 @@ +# +# (c) 2021 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils import elbv2 + +example_arn = 'arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/nlb-123456789abc/abcdef0123456789' +example_arn2 = 'arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/nlb-0123456789ab/0123456789abcdef' + +one_action = [ + dict( + ForwardConfig=dict( + TargetGroupStickinessConfig=dict(Enabled=False), + TargetGroups=[ + dict(TargetGroupArn=example_arn, Weight=1), + ] + ), + TargetGroupArn=example_arn, Type='forward', + ) +] + +one_action_two_tg = [ + dict( + ForwardConfig=dict( + TargetGroupStickinessConfig=dict(Enabled=False), + TargetGroups=[ + dict(TargetGroupArn=example_arn, Weight=1), + dict(TargetGroupArn=example_arn2, Weight=1), + ] + ), + TargetGroupArn=example_arn, Type='forward', + ) +] + +simplified_action = dict(Type='forward', TargetGroupArn=example_arn) +# Examples of various minimalistic actions which are all the same +simple_actions = [ + dict(Type='forward', TargetGroupArn=example_arn), + + dict(Type='forward', TargetGroupArn=example_arn, ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn)])), + dict(Type='forward', ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn)])), + dict(Type='forward', TargetGroupArn=example_arn, ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn, Weight=1)])), + dict(Type='forward', ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn, Weight=1)])), + dict(Type='forward', TargetGroupArn=example_arn, ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn, Weight=42)])), + dict(Type='forward', ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn, Weight=42)])), + + dict(Type='forward', TargetGroupArn=example_arn, ForwardConfig=dict(TargetGroupStickinessConfig=dict(Enabled=False), + TargetGroups=[dict(TargetGroupArn=example_arn)])), + dict(Type='forward', ForwardConfig=dict(TargetGroupStickinessConfig=dict(Enabled=False), TargetGroups=[dict(TargetGroupArn=example_arn)])), + dict(Type='forward', TargetGroupArn=example_arn, ForwardConfig=dict(TargetGroupStickinessConfig=dict(Enabled=False), + TargetGroups=[dict(TargetGroupArn=example_arn, Weight=1)])), + dict(Type='forward', ForwardConfig=dict(TargetGroupStickinessConfig=dict(Enabled=False), TargetGroups=[dict(TargetGroupArn=example_arn, Weight=1)])), + dict(Type='forward', TargetGroupArn=example_arn, ForwardConfig=dict(TargetGroupStickinessConfig=dict(Enabled=False), + TargetGroups=[dict(TargetGroupArn=example_arn, Weight=42)])), + dict(Type='forward', ForwardConfig=dict(TargetGroupStickinessConfig=dict(Enabled=False), TargetGroups=[dict(TargetGroupArn=example_arn, Weight=42)])), +] + +# Test that _prune_ForwardConfig() doesn't mangle things we don't expect +complex_actions = [ + # Non-Forwarding + dict( + Type='authenticate-oidc', TargetGroupArn=example_arn, + AuthenticateOidcConfig=dict( + Issuer='https://idp.ansible.test/oidc-config', + AuthorizationEndpoint='https://idp.ansible.test/authz', + TokenEndpoint='https://idp.ansible.test/token', + UserInfoEndpoint='https://idp.ansible.test/user', + ClientId='ExampleClient', + UseExistingClientSecret=False, + ), + ), + dict( + Type='redirect', + RedirectConfig=dict(Protocol='HTTPS', Port=443, Host='redirect.ansible.test', Path='/', StatusCode='HTTP_302'), + ), + # Multiple TGs + dict( + TargetGroupArn=example_arn, Type='forward', + ForwardConfig=dict( + TargetGroupStickinessConfig=dict(Enabled=False), + TargetGroups=[ + dict(TargetGroupArn=example_arn, Weight=1), + dict(TargetGroupArn=example_arn2, Weight=1), + ] + ), + ), + # Sticky-Sessions + dict( + Type='forward', TargetGroupArn=example_arn, + ForwardConfig=dict( + TargetGroupStickinessConfig=dict(Enabled=True, DurationSeconds=3600), + TargetGroups=[dict(TargetGroupArn=example_arn)] + ) + ), +] + +simplified_oidc_action = dict( + Type='authenticate-oidc', TargetGroupArn=example_arn, + AuthenticateOidcConfig=dict( + Issuer='https://idp.ansible.test/oidc-config', + AuthorizationEndpoint='https://idp.ansible.test/authz', + TokenEndpoint='https://idp.ansible.test/token', + UserInfoEndpoint='https://idp.ansible.test/user', + ClientId='ExampleClient', + Scope='openid', + SessionTimeout=604800, + UseExistingClientSecret=True, + ), +) +oidc_actions = [ + dict( + Type='authenticate-oidc', TargetGroupArn=example_arn, + AuthenticateOidcConfig=dict( + Issuer='https://idp.ansible.test/oidc-config', + AuthorizationEndpoint='https://idp.ansible.test/authz', + TokenEndpoint='https://idp.ansible.test/token', + UserInfoEndpoint='https://idp.ansible.test/user', + ClientId='ExampleClient', + UseExistingClientSecret=True, + Scope='openid', + SessionTimeout=604800 + ), + ), + dict( + Type='authenticate-oidc', TargetGroupArn=example_arn, + AuthenticateOidcConfig=dict( + Issuer='https://idp.ansible.test/oidc-config', + AuthorizationEndpoint='https://idp.ansible.test/authz', + TokenEndpoint='https://idp.ansible.test/token', + UserInfoEndpoint='https://idp.ansible.test/user', + ClientId='ExampleClient', + ClientSecret='MyVerySecretString', + UseExistingClientSecret=True, + ), + ), +] + + +#### + + +# Original tests +def test__prune_secret(): + assert elbv2._prune_secret(one_action[0]) == one_action[0] + + +def test__prune_ForwardConfig(): + expectation = {"TargetGroupArn": example_arn, "Type": "forward"} + pruned_config = elbv2._prune_ForwardConfig(one_action[0]) + assert pruned_config == expectation + + # https://github.com/ansible-collections/community.aws/issues/1089 + pruned_config = elbv2._prune_ForwardConfig(one_action_two_tg[0]) + assert pruned_config == one_action_two_tg[0] + + +#### + + +@pytest.mark.parametrize("action", simple_actions) +def test__prune_ForwardConfig_simplifiable_actions(action): + pruned_config = elbv2._prune_ForwardConfig(action) + assert pruned_config == simplified_action + + +@pytest.mark.parametrize("action", complex_actions) +def test__prune_ForwardConfig_non_simplifiable_actions(action): + pruned_config = elbv2._prune_ForwardConfig(action) + assert pruned_config == action + + +@pytest.mark.parametrize("action", oidc_actions) +def test__prune_secret_simplifiable_actions(action): + pruned_config = elbv2._prune_secret(action) + assert pruned_config == simplified_oidc_action + + +@pytest.mark.parametrize("action", complex_actions) +def test__prune_secret_non_simplifiable_actions(action): + pruned_config = elbv2._prune_secret(action) + assert pruned_config == action diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_fail_json_aws.py b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_fail_json_aws.py new file mode 100644 index 000000000..51e64490f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_fail_json_aws.py @@ -0,0 +1,330 @@ +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import pytest + +try: + import botocore + import boto3 +except ImportError: + pass + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_fail_json_aws.py requires the python modules 'boto3' and 'botocore'") + + +class TestFailJsonAwsTestSuite(object): + # ======================================================== + # Prepare some data for use in our testing + # ======================================================== + def setup_method(self): + # Basic information that ClientError needs to spawn off an error + self.EXAMPLE_EXCEPTION_DATA = { + "Error": { + "Code": "InvalidParameterValue", + "Message": "The filter 'exampleFilter' is invalid" + }, + "ResponseMetadata": { + "RequestId": "01234567-89ab-cdef-0123-456789abcdef", + "HTTPStatusCode": 400, + "HTTPHeaders": { + "transfer-encoding": "chunked", + "date": "Fri, 13 Nov 2020 00:00:00 GMT", + "connection": "close", + "server": "AmazonEC2" + }, + "RetryAttempts": 0 + } + } + self.CAMEL_RESPONSE = camel_dict_to_snake_dict(self.EXAMPLE_EXCEPTION_DATA.get("ResponseMetadata")) + self.CAMEL_ERROR = camel_dict_to_snake_dict(self.EXAMPLE_EXCEPTION_DATA.get("Error")) + # ClientError(EXAMPLE_EXCEPTION_DATA, "testCall") will generate this + self.EXAMPLE_MSG = "An error occurred (InvalidParameterValue) when calling the testCall operation: The filter 'exampleFilter' is invalid" + self.DEFAULT_CORE_MSG = "An unspecified error occurred" + self.FAIL_MSG = "I Failed!" + + # ======================================================== + # Passing fail_json_aws nothing more than a ClientError + # ======================================================== + @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) + def test_fail_client_minimal(self, monkeypatch, stdin, capfd): + monkeypatch.setattr(botocore, "__version__", "1.2.3") + monkeypatch.setattr(boto3, "__version__", "1.2.4") + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + try: + raise botocore.exceptions.ClientError(self.EXAMPLE_EXCEPTION_DATA, "testCall") + except botocore.exceptions.ClientError as e: + with pytest.raises(SystemExit) as ctx: + module.fail_json_aws(e) + assert ctx.value.code == 1 + out, _err = capfd.readouterr() + return_val = json.loads(out) + + assert return_val.get("msg") == self.EXAMPLE_MSG + assert return_val.get("boto3_version") == "1.2.4" + assert return_val.get("botocore_version") == "1.2.3" + assert return_val.get("exception") is not None + assert return_val.get("failed") + assert return_val.get("response_metadata") == self.CAMEL_RESPONSE + assert return_val.get("error") == self.CAMEL_ERROR + + # ======================================================== + # Passing fail_json_aws a ClientError and a message + # ======================================================== + @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) + def test_fail_client_msg(self, monkeypatch, stdin, capfd): + monkeypatch.setattr(botocore, "__version__", "1.2.3") + monkeypatch.setattr(boto3, "__version__", "1.2.4") + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + try: + raise botocore.exceptions.ClientError(self.EXAMPLE_EXCEPTION_DATA, "testCall") + except botocore.exceptions.ClientError as e: + with pytest.raises(SystemExit) as ctx: + module.fail_json_aws(e, msg=self.FAIL_MSG) + assert ctx.value.code == 1 + out, _err = capfd.readouterr() + return_val = json.loads(out) + + assert return_val.get("msg") == self.FAIL_MSG + ": " + self.EXAMPLE_MSG + assert return_val.get("boto3_version") == "1.2.4" + assert return_val.get("botocore_version") == "1.2.3" + assert return_val.get("exception") is not None + assert return_val.get("failed") + assert return_val.get("response_metadata") == self.CAMEL_RESPONSE + assert return_val.get("error") == self.CAMEL_ERROR + + # ======================================================== + # Passing fail_json_aws a ClientError and a message as a positional argument + # ======================================================== + @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) + def test_fail_client_positional_msg(self, monkeypatch, stdin, capfd): + monkeypatch.setattr(botocore, "__version__", "1.2.3") + monkeypatch.setattr(boto3, "__version__", "1.2.4") + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + try: + raise botocore.exceptions.ClientError(self.EXAMPLE_EXCEPTION_DATA, "testCall") + except botocore.exceptions.ClientError as e: + with pytest.raises(SystemExit) as ctx: + module.fail_json_aws(e, self.FAIL_MSG) + assert ctx.value.code == 1 + out, _err = capfd.readouterr() + return_val = json.loads(out) + + assert return_val.get("msg") == self.FAIL_MSG + ": " + self.EXAMPLE_MSG + assert return_val.get("boto3_version") == "1.2.4" + assert return_val.get("botocore_version") == "1.2.3" + assert return_val.get("exception") is not None + assert return_val.get("failed") + assert return_val.get("response_metadata") == self.CAMEL_RESPONSE + assert return_val.get("error") == self.CAMEL_ERROR + + # ======================================================== + # Passing fail_json_aws a ClientError and an arbitrary key + # ======================================================== + @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) + def test_fail_client_key(self, monkeypatch, stdin, capfd): + monkeypatch.setattr(botocore, "__version__", "1.2.3") + monkeypatch.setattr(boto3, "__version__", "1.2.4") + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + try: + raise botocore.exceptions.ClientError(self.EXAMPLE_EXCEPTION_DATA, "testCall") + except botocore.exceptions.ClientError as e: + with pytest.raises(SystemExit) as ctx: + module.fail_json_aws(e, extra_key="Some Value") + assert ctx.value.code == 1 + out, _err = capfd.readouterr() + return_val = json.loads(out) + + assert return_val.get("msg") == self.EXAMPLE_MSG + assert return_val.get("extra_key") == "Some Value" + assert return_val.get("boto3_version") == "1.2.4" + assert return_val.get("botocore_version") == "1.2.3" + assert return_val.get("exception") is not None + assert return_val.get("failed") + assert return_val.get("response_metadata") == self.CAMEL_RESPONSE + assert return_val.get("error") == self.CAMEL_ERROR + + # ======================================================== + # Passing fail_json_aws a ClientError, and arbitraty key and a message + # ======================================================== + @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) + def test_fail_client_msg_and_key(self, monkeypatch, stdin, capfd): + monkeypatch.setattr(botocore, "__version__", "1.2.3") + monkeypatch.setattr(boto3, "__version__", "1.2.4") + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + try: + raise botocore.exceptions.ClientError(self.EXAMPLE_EXCEPTION_DATA, "testCall") + except botocore.exceptions.ClientError as e: + with pytest.raises(SystemExit) as ctx: + module.fail_json_aws(e, extra_key="Some Value", msg=self.FAIL_MSG) + assert ctx.value.code == 1 + out, _err = capfd.readouterr() + return_val = json.loads(out) + + assert return_val.get("msg") == self.FAIL_MSG + ": " + self.EXAMPLE_MSG + assert return_val.get("extra_key") == "Some Value" + assert return_val.get("boto3_version") == "1.2.4" + assert return_val.get("botocore_version") == "1.2.3" + assert return_val.get("exception") is not None + assert return_val.get("failed") + assert return_val.get("response_metadata") == self.CAMEL_RESPONSE + assert return_val.get("error") == self.CAMEL_ERROR + + # ======================================================== + # Passing fail_json_aws nothing more than a BotoCoreError + # ======================================================== + @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) + def test_fail_botocore_minimal(self, monkeypatch, stdin, capfd): + monkeypatch.setattr(botocore, "__version__", "1.2.3") + monkeypatch.setattr(boto3, "__version__", "1.2.4") + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + try: + raise botocore.exceptions.BotoCoreError() + except botocore.exceptions.BotoCoreError as e: + with pytest.raises(SystemExit) as ctx: + module.fail_json_aws(e) + assert ctx.value.code == 1 + out, _err = capfd.readouterr() + return_val = json.loads(out) + + assert return_val.get("msg") == self.DEFAULT_CORE_MSG + assert return_val.get("boto3_version") == "1.2.4" + assert return_val.get("botocore_version") == "1.2.3" + assert return_val.get("exception") is not None + assert return_val.get("failed") + assert "response_metadata" not in return_val + assert "error" not in return_val + + # ======================================================== + # Passing fail_json_aws BotoCoreError and a message + # ======================================================== + @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) + def test_fail_botocore_msg(self, monkeypatch, stdin, capfd): + monkeypatch.setattr(botocore, "__version__", "1.2.3") + monkeypatch.setattr(boto3, "__version__", "1.2.4") + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + try: + raise botocore.exceptions.BotoCoreError() + except botocore.exceptions.BotoCoreError as e: + with pytest.raises(SystemExit) as ctx: + module.fail_json_aws(e, msg=self.FAIL_MSG) + assert ctx.value.code == 1 + out, _err = capfd.readouterr() + return_val = json.loads(out) + + assert return_val.get("msg") == self.FAIL_MSG + ": " + self.DEFAULT_CORE_MSG + assert return_val.get("boto3_version") == "1.2.4" + assert return_val.get("botocore_version") == "1.2.3" + assert return_val.get("exception") is not None + assert return_val.get("failed") + assert "response_metadata" not in return_val + assert "error" not in return_val + + # ======================================================== + # Passing fail_json_aws BotoCoreError and a message as a positional + # argument + # ======================================================== + @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) + def test_fail_botocore_positional_msg(self, monkeypatch, stdin, capfd): + monkeypatch.setattr(botocore, "__version__", "1.2.3") + monkeypatch.setattr(boto3, "__version__", "1.2.4") + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + try: + raise botocore.exceptions.BotoCoreError() + except botocore.exceptions.BotoCoreError as e: + with pytest.raises(SystemExit) as ctx: + module.fail_json_aws(e, self.FAIL_MSG) + assert ctx.value.code == 1 + out, _err = capfd.readouterr() + return_val = json.loads(out) + + assert return_val.get("msg") == self.FAIL_MSG + ": " + self.DEFAULT_CORE_MSG + assert return_val.get("boto3_version") == "1.2.4" + assert return_val.get("botocore_version") == "1.2.3" + assert return_val.get("exception") is not None + assert return_val.get("failed") + assert "response_metadata" not in return_val + assert "error" not in return_val + + # ======================================================== + # Passing fail_json_aws a BotoCoreError and an arbitrary key + # ======================================================== + @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) + def test_fail_botocore_key(self, monkeypatch, stdin, capfd): + monkeypatch.setattr(botocore, "__version__", "1.2.3") + monkeypatch.setattr(boto3, "__version__", "1.2.4") + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + try: + raise botocore.exceptions.BotoCoreError() + except botocore.exceptions.BotoCoreError as e: + with pytest.raises(SystemExit) as ctx: + module.fail_json_aws(e, extra_key="Some Value") + assert ctx.value.code == 1 + out, _err = capfd.readouterr() + return_val = json.loads(out) + + assert return_val.get("msg") == self.DEFAULT_CORE_MSG + assert return_val.get("extra_key") == "Some Value" + assert return_val.get("boto3_version") == "1.2.4" + assert return_val.get("botocore_version") == "1.2.3" + assert return_val.get("exception") is not None + assert return_val.get("failed") + assert "response_metadata" not in return_val + assert "error" not in return_val + + # ======================================================== + # Passing fail_json_aws BotoCoreError, an arbitry key and a message + # ======================================================== + @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) + def test_fail_botocore_msg_and_key(self, monkeypatch, stdin, capfd): + monkeypatch.setattr(botocore, "__version__", "1.2.3") + monkeypatch.setattr(boto3, "__version__", "1.2.4") + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + try: + raise botocore.exceptions.BotoCoreError() + except botocore.exceptions.BotoCoreError as e: + with pytest.raises(SystemExit) as ctx: + module.fail_json_aws(e, extra_key="Some Value", msg=self.FAIL_MSG) + assert ctx.value.code == 1 + out, _err = capfd.readouterr() + return_val = json.loads(out) + + assert return_val.get("msg") == self.FAIL_MSG + ": " + self.DEFAULT_CORE_MSG + assert return_val.get("extra_key") == "Some Value" + assert return_val.get("boto3_version") == "1.2.4" + assert return_val.get("botocore_version") == "1.2.3" + assert return_val.get("exception") is not None + assert return_val.get("failed") + assert "response_metadata" not in return_val + assert "error" not in return_val diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_minimal_versions.py b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_minimal_versions.py new file mode 100644 index 000000000..17e69ecb5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_minimal_versions.py @@ -0,0 +1,191 @@ +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from pprint import pprint +import pytest +import json +import warnings + +try: + import botocore + import boto3 +except ImportError: + pass + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_minimal_versions.py requires the python modules 'boto3' and 'botocore'") + + +class TestMinimalVersionTestSuite(object): + # ======================================================== + # Prepare some data for use in our testing + # ======================================================== + def setup_method(self): + self.MINIMAL_BOTO3 = '1.18.0' + self.MINIMAL_BOTOCORE = '1.21.0' + self.OLD_BOTO3 = '1.17.999' + self.OLD_BOTOCORE = '1.20.999' + + # ======================================================== + # Test we don't warn when using valid versions + # ======================================================== + @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) + def test_no_warn(self, monkeypatch, stdin, capfd): + monkeypatch.setattr(botocore, "__version__", self.MINIMAL_BOTOCORE) + monkeypatch.setattr(boto3, "__version__", self.MINIMAL_BOTO3) + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + + with pytest.raises(SystemExit): + module.exit_json() + + out, _err = capfd.readouterr() + return_val = json.loads(out) + + assert return_val.get("exception") is None + assert return_val.get("invocation") is not None + assert return_val.get("failed") is None + assert return_val.get("error") is None + assert return_val.get("warnings") is None + + # ======================================================== + # Test we don't warn when botocore/boto3 isn't required + # ======================================================== + @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) + def test_no_check(self, monkeypatch, stdin, capfd): + monkeypatch.setattr(botocore, "__version__", self.OLD_BOTOCORE) + monkeypatch.setattr(boto3, "__version__", self.OLD_BOTO3) + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict(), check_boto3=False) + + with pytest.raises(SystemExit): + module.exit_json() + + out, _err = capfd.readouterr() + return_val = json.loads(out) + + assert return_val.get("exception") is None + assert return_val.get("invocation") is not None + assert return_val.get("failed") is None + assert return_val.get("error") is None + assert return_val.get("warnings") is None + + # ======================================================== + # Test we warn when using an old version of boto3 + # ======================================================== + @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) + def test_warn_boto3(self, monkeypatch, stdin, capfd): + monkeypatch.setattr(botocore, "__version__", self.MINIMAL_BOTOCORE) + monkeypatch.setattr(boto3, "__version__", self.OLD_BOTO3) + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + + with pytest.raises(SystemExit): + module.exit_json() + + out, err = capfd.readouterr() + return_val = json.loads(out) + + pprint(out) + pprint(err) + pprint(return_val) + + assert return_val.get("exception") is None + assert return_val.get("invocation") is not None + assert return_val.get("failed") is None + assert return_val.get("error") is None + assert return_val.get("warnings") is not None + warnings = return_val.get("warnings") + assert len(warnings) == 1 + # Assert that we have a warning about the version but be + # relaxed about the exact message + assert 'boto3' in warnings[0] + assert self.MINIMAL_BOTO3 in warnings[0] + + # ======================================================== + # Test we warn when using an old version of botocore + # ======================================================== + @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) + def test_warn_botocore(self, monkeypatch, stdin, capfd): + monkeypatch.setattr(botocore, "__version__", self.OLD_BOTOCORE) + monkeypatch.setattr(boto3, "__version__", self.MINIMAL_BOTO3) + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + + with pytest.raises(SystemExit): + module.exit_json() + + out, err = capfd.readouterr() + return_val = json.loads(out) + + pprint(out) + pprint(err) + pprint(return_val) + + assert return_val.get("exception") is None + assert return_val.get("invocation") is not None + assert return_val.get("failed") is None + assert return_val.get("error") is None + assert return_val.get("warnings") is not None + warnings = return_val.get("warnings") + assert len(warnings) == 1 + # Assert that we have a warning about the version but be + # relaxed about the exact message + assert 'botocore' in warnings[0] + assert self.MINIMAL_BOTOCORE in warnings[0] + + # ======================================================== + # Test we warn when using an old version of botocore and boto3 + # ======================================================== + @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) + def test_warn_boto3_and_botocore(self, monkeypatch, stdin, capfd): + monkeypatch.setattr(botocore, "__version__", self.OLD_BOTOCORE) + monkeypatch.setattr(boto3, "__version__", self.OLD_BOTO3) + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + + with pytest.raises(SystemExit): + module.exit_json() + + out, err = capfd.readouterr() + return_val = json.loads(out) + + pprint(out) + pprint(err) + pprint(return_val) + + assert return_val.get("exception") is None + assert return_val.get("invocation") is not None + assert return_val.get("failed") is None + assert return_val.get("error") is None + assert return_val.get("warnings") is not None + + warnings = return_val.get("warnings") + assert len(warnings) == 2 + + warning_dict = dict() + for warning in warnings: + if 'boto3' in warning: + warning_dict['boto3'] = warning + if 'botocore' in warning: + warning_dict['botocore'] = warning + + # Assert that we have a warning about the version but be + # relaxed about the exact message + assert warning_dict.get('boto3') is not None + assert self.MINIMAL_BOTO3 in warning_dict.get('boto3') + assert warning_dict.get('botocore') is not None + assert self.MINIMAL_BOTOCORE in warning_dict.get('botocore') diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_require_at_least.py b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_require_at_least.py new file mode 100644 index 000000000..adf2bf558 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_require_at_least.py @@ -0,0 +1,220 @@ +# (c) 2021 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import pytest + +try: + import botocore + import boto3 +except ImportError: + # Handled by HAS_BOTO3 + pass + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + +DUMMY_VERSION = '5.5.5.5' + +TEST_VERSIONS = [ + ['1.1.1', '2.2.2', True], + ['1.1.1', '0.0.1', False], + ['9.9.9', '9.9.9', True], + ['9.9.9', '9.9.10', True], + ['9.9.9', '9.10.9', True], + ['9.9.9', '10.9.9', True], + ['9.9.9', '9.9.8', False], + ['9.9.9', '9.8.9', False], + ['9.9.9', '8.9.9', False], + ['10.10.10', '10.10.10', True], + ['10.10.10', '10.10.11', True], + ['10.10.10', '10.11.10', True], + ['10.10.10', '11.10.10', True], + ['10.10.10', '10.10.9', False], + ['10.10.10', '10.9.10', False], + ['10.10.10', '9.19.10', False], +] + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_require_at_least.py requires the python modules 'boto3' and 'botocore'") + + +class TestRequireAtLeastTestSuite(object): + # ======================================================== + # Prepare some data for use in our testing + # ======================================================== + def setup_method(self): + pass + + # ======================================================== + # Test botocore_at_least + # ======================================================== + @pytest.mark.parametrize("stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"]) + def test_botocore_at_least(self, monkeypatch, stdin, desired_version, compare_version, at_least, capfd): + monkeypatch.setattr(botocore, "__version__", compare_version) + # Set boto3 version to a known value (tests are on both sides) to make + # sure we're comparing the right library + monkeypatch.setattr(boto3, "__version__", DUMMY_VERSION) + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + + assert at_least == module.botocore_at_least(desired_version) + + # ======================================================== + # Test boto3_at_least + # ======================================================== + @pytest.mark.parametrize("stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"]) + def test_boto3_at_least(self, monkeypatch, stdin, desired_version, compare_version, at_least, capfd): + # Set botocore version to a known value (tests are on both sides) to make + # sure we're comparing the right library + monkeypatch.setattr(botocore, "__version__", DUMMY_VERSION) + monkeypatch.setattr(boto3, "__version__", compare_version) + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + + assert at_least == module.boto3_at_least(desired_version) + + # ======================================================== + # Test require_botocore_at_least + # ======================================================== + @pytest.mark.parametrize("stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"]) + def test_require_botocore_at_least(self, monkeypatch, stdin, desired_version, compare_version, at_least, capfd): + monkeypatch.setattr(botocore, "__version__", compare_version) + # Set boto3 version to a known value (tests are on both sides) to make + # sure we're comparing the right library + monkeypatch.setattr(boto3, "__version__", DUMMY_VERSION) + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + + with pytest.raises(SystemExit): + module.require_botocore_at_least(desired_version) + module.exit_json() + + out, _err = capfd.readouterr() + return_val = json.loads(out) + + assert return_val.get("exception") is None + assert return_val.get("invocation") is not None + if at_least: + assert return_val.get("failed") is None + else: + assert return_val.get("failed") + # The message is generated by Ansible, don't test for an exact + # message + assert desired_version in return_val.get("msg") + assert "botocore" in return_val.get("msg") + assert return_val.get("boto3_version") == DUMMY_VERSION + assert return_val.get("botocore_version") == compare_version + + # ======================================================== + # Test require_boto3_at_least + # ======================================================== + @pytest.mark.parametrize("stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"]) + def test_require_boto3_at_least(self, monkeypatch, stdin, desired_version, compare_version, at_least, capfd): + monkeypatch.setattr(botocore, "__version__", DUMMY_VERSION) + # Set boto3 version to a known value (tests are on both sides) to make + # sure we're comparing the right library + monkeypatch.setattr(boto3, "__version__", compare_version) + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + + with pytest.raises(SystemExit): + module.require_boto3_at_least(desired_version) + module.exit_json() + + out, _err = capfd.readouterr() + return_val = json.loads(out) + + assert return_val.get("exception") is None + assert return_val.get("invocation") is not None + if at_least: + assert return_val.get("failed") is None + else: + assert return_val.get("failed") + # The message is generated by Ansible, don't test for an exact + # message + assert desired_version in return_val.get("msg") + assert "boto3" in return_val.get("msg") + assert return_val.get("botocore_version") == DUMMY_VERSION + assert return_val.get("boto3_version") == compare_version + + # ======================================================== + # Test require_botocore_at_least with reason + # ======================================================== + @pytest.mark.parametrize("stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"]) + def test_require_botocore_at_least_with_reason(self, monkeypatch, stdin, desired_version, compare_version, at_least, capfd): + monkeypatch.setattr(botocore, "__version__", compare_version) + # Set boto3 version to a known value (tests are on both sides) to make + # sure we're comparing the right library + monkeypatch.setattr(boto3, "__version__", DUMMY_VERSION) + + reason = 'testing in progress' + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + + with pytest.raises(SystemExit): + module.require_botocore_at_least(desired_version, reason=reason) + module.exit_json() + + out, _err = capfd.readouterr() + return_val = json.loads(out) + + assert return_val.get("exception") is None + assert return_val.get("invocation") is not None + if at_least: + assert return_val.get("failed") is None + else: + assert return_val.get("failed") + # The message is generated by Ansible, don't test for an exact + # message + assert desired_version in return_val.get("msg") + assert " {0}".format(reason) in return_val.get("msg") + assert "botocore" in return_val.get("msg") + assert return_val.get("boto3_version") == DUMMY_VERSION + assert return_val.get("botocore_version") == compare_version + + # ======================================================== + # Test require_boto3_at_least with reason + # ======================================================== + @pytest.mark.parametrize("stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"]) + def test_require_boto3_at_least_with_reason(self, monkeypatch, stdin, desired_version, compare_version, at_least, capfd): + monkeypatch.setattr(botocore, "__version__", DUMMY_VERSION) + # Set boto3 version to a known value (tests are on both sides) to make + # sure we're comparing the right library + monkeypatch.setattr(boto3, "__version__", compare_version) + + reason = 'testing in progress' + + # Create a minimal module that we can call + module = AnsibleAWSModule(argument_spec=dict()) + + with pytest.raises(SystemExit): + module.require_boto3_at_least(desired_version, reason=reason) + module.exit_json() + + out, _err = capfd.readouterr() + return_val = json.loads(out) + + assert return_val.get("exception") is None + assert return_val.get("invocation") is not None + if at_least: + assert return_val.get("failed") is None + else: + assert return_val.get("failed") + # The message is generated by Ansible, don't test for an exact + # message + assert desired_version in return_val.get("msg") + assert " {0}".format(reason) in return_val.get("msg") + assert "boto3" in return_val.get("msg") + assert return_val.get("botocore_version") == DUMMY_VERSION + assert return_val.get("boto3_version") == compare_version diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_compare_policies.py b/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_compare_policies.py new file mode 100644 index 000000000..eb6de22db --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_compare_policies.py @@ -0,0 +1,339 @@ +# (c) 2017 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies + + +class TestComparePolicy(): + + # ======================================================== + # Setup some initial data that we can use within our tests + # ======================================================== + def setup_method(self): + # A pair of simple IAM Trust relationships using bools, the first a + # native bool the second a quoted string + self.bool_policy_bool = { + 'Version': '2012-10-17', + 'Statement': [ + { + "Action": "sts:AssumeRole", + "Condition": { + "Bool": {"aws:MultiFactorAuthPresent": True} + }, + "Effect": "Allow", + "Principal": {"AWS": "arn:aws:iam::XXXXXXXXXXXX:root"}, + "Sid": "AssumeRoleWithBoolean" + } + ] + } + + self.bool_policy_string = { + 'Version': '2012-10-17', + 'Statement': [ + { + "Action": "sts:AssumeRole", + "Condition": { + "Bool": {"aws:MultiFactorAuthPresent": "true"} + }, + "Effect": "Allow", + "Principal": {"AWS": "arn:aws:iam::XXXXXXXXXXXX:root"}, + "Sid": "AssumeRoleWithBoolean" + } + ] + } + + # A pair of simple bucket policies using numbers, the first a + # native int the second a quoted string + self.numeric_policy_number = { + 'Version': '2012-10-17', + 'Statement': [ + { + "Action": "s3:ListBucket", + "Condition": { + "NumericLessThanEquals": {"s3:max-keys": 15} + }, + "Effect": "Allow", + "Resource": "arn:aws:s3:::examplebucket", + "Sid": "s3ListBucketWithNumericLimit" + } + ] + } + + self.numeric_policy_string = { + 'Version': '2012-10-17', + 'Statement': [ + { + "Action": "s3:ListBucket", + "Condition": { + "NumericLessThanEquals": {"s3:max-keys": "15"} + }, + "Effect": "Allow", + "Resource": "arn:aws:s3:::examplebucket", + "Sid": "s3ListBucketWithNumericLimit" + } + ] + } + + self.small_policy_one = { + 'Version': '2012-10-17', + 'Statement': [ + { + 'Action': 's3:PutObjectAcl', + 'Sid': 'AddCannedAcl2', + 'Resource': 'arn:aws:s3:::test_policy/*', + 'Effect': 'Allow', + 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']} + } + ] + } + + # The same as small_policy_one, except the single resource is in a list and the contents of Statement are jumbled + self.small_policy_two = { + 'Version': '2012-10-17', + 'Statement': [ + { + 'Effect': 'Allow', + 'Action': 's3:PutObjectAcl', + 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}, + 'Resource': ['arn:aws:s3:::test_policy/*'], + 'Sid': 'AddCannedAcl2' + } + ] + } + + self.version_policy_missing = { + 'Statement': [ + { + 'Action': 's3:PutObjectAcl', + 'Sid': 'AddCannedAcl2', + 'Resource': 'arn:aws:s3:::test_policy/*', + 'Effect': 'Allow', + 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']} + } + ] + } + + self.version_policy_old = { + 'Version': '2008-10-17', + 'Statement': [ + { + 'Action': 's3:PutObjectAcl', + 'Sid': 'AddCannedAcl2', + 'Resource': 'arn:aws:s3:::test_policy/*', + 'Effect': 'Allow', + 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']} + } + ] + } + + self.version_policy_new = { + 'Version': '2012-10-17', + 'Statement': [ + { + 'Action': 's3:PutObjectAcl', + 'Sid': 'AddCannedAcl2', + 'Resource': 'arn:aws:s3:::test_policy/*', + 'Effect': 'Allow', + 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']} + } + ] + } + + self.larger_policy_one = { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Test", + "Effect": "Allow", + "Principal": { + "AWS": [ + "arn:aws:iam::XXXXXXXXXXXX:user/testuser1", + "arn:aws:iam::XXXXXXXXXXXX:user/testuser2" + ] + }, + "Action": "s3:PutObjectAcl", + "Resource": "arn:aws:s3:::test_policy/*" + }, + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::XXXXXXXXXXXX:user/testuser2" + }, + "Action": [ + "s3:PutObject", + "s3:PutObjectAcl" + ], + "Resource": "arn:aws:s3:::test_policy/*" + } + ] + } + + # The same as larger_policy_one, except having a list of length 1 and jumbled contents + self.larger_policy_two = { + "Version": "2012-10-17", + "Statement": [ + { + "Principal": { + "AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser2"] + }, + "Effect": "Allow", + "Resource": "arn:aws:s3:::test_policy/*", + "Action": [ + "s3:PutObject", + "s3:PutObjectAcl" + ] + }, + { + "Action": "s3:PutObjectAcl", + "Principal": { + "AWS": [ + "arn:aws:iam::XXXXXXXXXXXX:user/testuser1", + "arn:aws:iam::XXXXXXXXXXXX:user/testuser2" + ] + }, + "Sid": "Test", + "Resource": "arn:aws:s3:::test_policy/*", + "Effect": "Allow" + } + ] + } + + # Different than larger_policy_two: a different principal is given + self.larger_policy_three = { + "Version": "2012-10-17", + "Statement": [ + { + "Principal": { + "AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser2"] + }, + "Effect": "Allow", + "Resource": "arn:aws:s3:::test_policy/*", + "Action": [ + "s3:PutObject", + "s3:PutObjectAcl"] + }, + { + "Action": "s3:PutObjectAcl", + "Principal": { + "AWS": [ + "arn:aws:iam::XXXXXXXXXXXX:user/testuser1", + "arn:aws:iam::XXXXXXXXXXXX:user/testuser3" + ] + }, + "Sid": "Test", + "Resource": "arn:aws:s3:::test_policy/*", + "Effect": "Allow" + } + ] + } + + # Minimal policy using wildcarded Principal + self.wildcard_policy_one = { + "Version": "2012-10-17", + "Statement": [ + { + "Principal": { + "AWS": ["*"] + }, + "Effect": "Allow", + "Resource": "arn:aws:s3:::test_policy/*", + "Action": [ + "s3:PutObject", + "s3:PutObjectAcl"] + } + ] + } + + # Minimal policy using wildcarded Principal + self.wildcard_policy_two = { + "Version": "2012-10-17", + "Statement": [ + { + "Principal": "*", + "Effect": "Allow", + "Resource": "arn:aws:s3:::test_policy/*", + "Action": [ + "s3:PutObject", + "s3:PutObjectAcl"] + } + ] + } + + # ======================================================== + # ec2.compare_policies + # ======================================================== + + def test_compare_small_policies_without_differences(self): + """ Testing two small policies which are identical except for: + * The contents of the statement are in different orders + * The second policy contains a list of length one whereas in the first it is a string + """ + assert compare_policies(self.small_policy_one, self.small_policy_two) is False + + def test_compare_large_policies_without_differences(self): + """ Testing two larger policies which are identical except for: + * The statements are in different orders + * The contents of the statements are also in different orders + * The second contains a list of length one for the Principal whereas in the first it is a string + """ + assert compare_policies(self.larger_policy_one, self.larger_policy_two) is False + + def test_compare_larger_policies_with_difference(self): + """ Testing two larger policies which are identical except for: + * one different principal + """ + assert compare_policies(self.larger_policy_two, self.larger_policy_three) is True + + def test_compare_smaller_policy_with_larger(self): + """ Testing two policies of different sizes """ + assert compare_policies(self.larger_policy_one, self.small_policy_one) is True + + def test_compare_boolean_policy_bool_and_string_are_equal(self): + """ Testing two policies one using a quoted boolean, the other a bool """ + assert compare_policies(self.bool_policy_string, self.bool_policy_bool) is False + + def test_compare_numeric_policy_number_and_string_are_equal(self): + """ Testing two policies one using a quoted number, the other an int """ + assert compare_policies(self.numeric_policy_string, self.numeric_policy_number) is False + + def test_compare_version_policies_defaults_old(self): + """ Testing that a policy without Version is considered identical to one + with the 'old' Version (by default) + """ + assert compare_policies(self.version_policy_old, self.version_policy_missing) is False + assert compare_policies(self.version_policy_new, self.version_policy_missing) is True + + def test_compare_version_policies_default_disabled(self): + """ Testing that a policy without Version not considered identical when default_version=None + """ + assert compare_policies(self.version_policy_missing, self.version_policy_missing, default_version=None) is False + assert compare_policies(self.version_policy_old, self.version_policy_missing, default_version=None) is True + assert compare_policies(self.version_policy_new, self.version_policy_missing, default_version=None) is True + + def test_compare_version_policies_default_set(self): + """ Testing that a policy without Version is only considered identical + when default_version="2008-10-17" + """ + assert compare_policies(self.version_policy_missing, self.version_policy_missing, default_version="2012-10-17") is False + assert compare_policies(self.version_policy_old, self.version_policy_missing, default_version="2012-10-17") is True + assert compare_policies(self.version_policy_old, self.version_policy_missing, default_version="2008-10-17") is False + assert compare_policies(self.version_policy_new, self.version_policy_missing, default_version="2012-10-17") is False + assert compare_policies(self.version_policy_new, self.version_policy_missing, default_version="2008-10-17") is True + + def test_compare_version_policies_with_none(self): + """ Testing that comparing with no policy works + """ + assert compare_policies(self.small_policy_one, None) is True + assert compare_policies(None, self.small_policy_one) is True + assert compare_policies(None, None) is False + + def test_compare_wildcard_policies_without_differences(self): + """ Testing two small wildcard policies which are identical except for: + * Principal: "*" vs Principal: ["AWS": "*"] + """ + assert compare_policies(self.wildcard_policy_one, self.wildcard_policy_two) is False diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/retries/test_awsretry.py b/ansible_collections/amazon/aws/tests/unit/module_utils/retries/test_awsretry.py new file mode 100644 index 000000000..e08700382 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/retries/test_awsretry.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +# (c) 2015, Allen Sanabria +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +try: + import botocore +except ImportError: + pass + +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_awsretry.py requires the python modules 'boto3' and 'botocore'") + + +class TestAWSRetry(): + + def test_no_failures(self): + self.counter = 0 + + @AWSRetry.backoff(tries=2, delay=0.1) + def no_failures(): + self.counter += 1 + + no_failures() + assert self.counter == 1 + + def test_extend_boto3_failures(self): + self.counter = 0 + err_response = {'Error': {'Code': 'MalformedPolicyDocument'}} + + @AWSRetry.backoff(tries=2, delay=0.1, catch_extra_error_codes=['MalformedPolicyDocument']) + def extend_failures(): + self.counter += 1 + if self.counter < 2: + raise botocore.exceptions.ClientError(err_response, 'You did something wrong.') + else: + return 'success' + + result = extend_failures() + assert result == 'success' + assert self.counter == 2 + + def test_retry_once(self): + self.counter = 0 + err_response = {'Error': {'Code': 'InternalFailure'}} + + @AWSRetry.backoff(tries=2, delay=0.1) + def retry_once(): + self.counter += 1 + if self.counter < 2: + raise botocore.exceptions.ClientError(err_response, 'Something went wrong!') + else: + return 'success' + + result = retry_once() + assert result == 'success' + assert self.counter == 2 + + def test_reached_limit(self): + self.counter = 0 + err_response = {'Error': {'Code': 'RequestLimitExceeded'}} + + @AWSRetry.backoff(tries=4, delay=0.1) + def fail(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, 'toooo fast!!') + + with pytest.raises(botocore.exceptions.ClientError) as context: + fail() + response = context.value.response + assert response['Error']['Code'] == 'RequestLimitExceeded' + assert self.counter == 4 + + def test_unexpected_exception_does_not_retry(self): + self.counter = 0 + err_response = {'Error': {'Code': 'AuthFailure'}} + + @AWSRetry.backoff(tries=4, delay=0.1) + def raise_unexpected_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, 'unexpected error') + + with pytest.raises(botocore.exceptions.ClientError) as context: + raise_unexpected_error() + response = context.value.response + assert response['Error']['Code'] == 'AuthFailure' + assert self.counter == 1 diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py b/ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py new file mode 100644 index 000000000..48c32c78e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py @@ -0,0 +1,214 @@ +# +# (c) 2021 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.amazon.aws.plugins.module_utils import elbv2 +from ansible_collections.amazon.aws.tests.unit.compat.mock import MagicMock + +one_action = [ + { + "ForwardConfig": { + "TargetGroupStickinessConfig": {"Enabled": False}, + "TargetGroups": [ + { + "TargetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/my-tg-58045486/5b231e04f663ae21", + "Weight": 1, + } + ], + }, + "TargetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/my-tg-58045486/5b231e04f663ae21", + "Type": "forward", + } +] + +one_action_two_tg = [ + { + "ForwardConfig": { + "TargetGroupStickinessConfig": {"Enabled": False}, + "TargetGroups": [ + { + "TargetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/my-tg-58045486/5b231e04f663ae21", + "Weight": 1, + }, + { + "TargetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/my-tg-dadf7b62/be2f50b4041f11ed", + "Weight": 1, + } + ], + }, + "Type": "forward", + } +] + + +def _sort_actions_one_entry(): + assert elbv2._sort_actions(one_action) == one_action + + +class TestElBV2Utils(): + + def setup_method(self): + self.connection = MagicMock(name="connection") + self.module = MagicMock(name="module") + + self.module.params = dict() + + self.conn_paginator = MagicMock(name="connection.paginator") + self.paginate = MagicMock(name="paginator.paginate") + + self.connection.get_paginator.return_value = self.conn_paginator + self.conn_paginator.paginate.return_value = self.paginate + + self.loadbalancer = { + "Type": "application", + "Scheme": "internet-facing", + "IpAddressType": "ipv4", + "VpcId": "vpc-3ac0fb5f", + "AvailabilityZones": [ + { + "ZoneName": "us-west-2a", + "SubnetId": "subnet-8360a9e7" + }, + { + "ZoneName": "us-west-2b", + "SubnetId": "subnet-b7d581c0" + } + ], + "CreatedTime": "2016-03-25T21:26:12.920Z", + "CanonicalHostedZoneId": "Z2P70J7EXAMPLE", + "DNSName": "my-load-balancer-424835706.us-west-2.elb.amazonaws.com", + "SecurityGroups": [ + "sg-5943793c" + ], + "LoadBalancerName": "my-load-balancer", + "State": { + "Code": "active" + }, + "LoadBalancerArn": "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188" + } + self.paginate.build_full_result.return_value = { + 'LoadBalancers': [self.loadbalancer] + } + + self.connection.describe_load_balancer_attributes.return_value = { + "Attributes": [ + { + "Value": "false", + "Key": "access_logs.s3.enabled" + }, + { + "Value": "", + "Key": "access_logs.s3.bucket" + }, + { + "Value": "", + "Key": "access_logs.s3.prefix" + }, + { + "Value": "60", + "Key": "idle_timeout.timeout_seconds" + }, + { + "Value": "false", + "Key": "deletion_protection.enabled" + }, + { + "Value": "true", + "Key": "routing.http2.enabled" + }, + { + "Value": "defensive", + "Key": "routing.http.desync_mitigation_mode" + }, + { + "Value": "true", + "Key": "routing.http.drop_invalid_header_fields.enabled" + }, + { + "Value": "true", + "Key": "routing.http.x_amzn_tls_version_and_cipher_suite.enabled" + }, + { + "Value": "true", + "Key": "routing.http.xff_client_port.enabled" + }, + { + "Value": "true", + "Key": "waf.fail_open.enabled" + }, + ] + } + self.connection.describe_tags.return_value = { + "TagDescriptions": [ + { + "ResourceArn": "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188", + "Tags": [ + { + "Value": "ansible", + "Key": "project" + }, + { + "Value": "RedHat", + "Key": "company" + } + ] + } + ] + } + self.elbv2obj = elbv2.ElasticLoadBalancerV2(self.connection, self.module) + + # Test the simplest case - Read the ip address type + def test_get_elb_ip_address_type(self): + # Run module + return_value = self.elbv2obj.get_elb_ip_address_type() + # check that no method was called and this has been retrieved from elb attributes + self.connection.describe_load_balancer_attributes.assert_called_once() + self.connection.get_paginator.assert_called_once() + self.connection.describe_tags.assert_called_once() + self.conn_paginator.paginate.assert_called_once() + # assert we got the expected value + assert return_value == 'ipv4' + + # Test modify_ip_address_type idempotency + def test_modify_ip_address_type_idempotency(self): + # Run module + self.elbv2obj.modify_ip_address_type("ipv4") + # check that no method was called and this has been retrieved from elb attributes + self.connection.set_ip_address_type.assert_not_called() + # assert we got the expected value + assert self.elbv2obj.changed is False + + # Test modify_ip_address_type + def test_modify_ip_address_type_update(self): + # Run module + self.elbv2obj.modify_ip_address_type("dualstack") + # check that no method was called and this has been retrieved from elb attributes + self.connection.set_ip_address_type.assert_called_once() + # assert we got the expected value + assert self.elbv2obj.changed is True + + # Test get_elb_attributes + def test_get_elb_attributes(self): + # Build expected result + expected_elb_attributes = { + "access_logs_s3_bucket": "", + "access_logs_s3_enabled": "false", + "access_logs_s3_prefix": "", + "deletion_protection_enabled": "false", + "idle_timeout_timeout_seconds": "60", + "routing_http2_enabled": "true", + "routing_http_desync_mitigation_mode": "defensive", + "routing_http_drop_invalid_header_fields_enabled": "true", + "routing_http_x_amzn_tls_version_and_cipher_suite_enabled": "true", + "routing_http_xff_client_port_enabled": "true", + "waf_fail_open_enabled": "true" + } + # Run module + actual_elb_attributes = self.elbv2obj.get_elb_attributes() + # Assert we got the expected result + assert actual_elb_attributes == expected_elb_attributes diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/test_iam.py b/ansible_collections/amazon/aws/tests/unit/module_utils/test_iam.py new file mode 100644 index 000000000..4ce430262 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/test_iam.py @@ -0,0 +1,300 @@ +# +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +try: + import botocore +except ImportError: + # Handled by HAS_BOTO3 + pass + +from ansible_collections.amazon.aws.tests.unit.compat.mock import MagicMock + +import ansible_collections.amazon.aws.plugins.module_utils.iam as utils_iam +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_iam.py requires the python modules 'boto3' and 'botocore'") + + +class TestIamUtils(): + + def _make_denied_exception(self, partition): + return botocore.exceptions.ClientError( + { + "Error": { + "Code": "AccessDenied", + "Message": "User: arn:" + partition + ":iam::123456789012:user/ExampleUser " + + "is not authorized to perform: iam:GetUser on resource: user ExampleUser" + }, + "ResponseMetadata": { + "RequestId": "01234567-89ab-cdef-0123-456789abcdef" + } + }, 'getUser') + + def _make_unexpected_exception(self): + return botocore.exceptions.ClientError( + { + "Error": { + "Code": "SomeThingWentWrong", + "Message": "Boom!" + }, + "ResponseMetadata": { + "RequestId": "01234567-89ab-cdef-0123-456789abcdef" + } + }, 'someCall') + + def _make_encoded_exception(self): + return botocore.exceptions.ClientError( + { + "Error": { + "Code": "AccessDenied", + "Message": "You are not authorized to perform this operation. Encoded authorization failure message: " + + "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" + + "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" + + "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" + + "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" + + "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" + + "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" + + "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" + + "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" + + "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2" + }, + "ResponseMetadata": { + "RequestId": "01234567-89ab-cdef-0123-456789abcdef" + } + }, 'someCall') + + def _make_botocore_exception(self): + return botocore.exceptions.EndpointConnectionError(endpoint_url='junk.endpoint') + + def setup_method(self): + self.sts_client = MagicMock() + self.iam_client = MagicMock() + self.module = MagicMock() + clients = {'sts': self.sts_client, 'iam': self.iam_client} + + def get_client(*args, **kwargs): + return clients[args[0]] + + self.module.client.side_effect = get_client + self.module.fail_json_aws.side_effect = SystemExit(1) + self.module.fail_json.side_effect = SystemExit(2) + + # ========== get_aws_account_id ============ + # This is just a minimal (compatibility) wrapper around get_aws_account_info + # Perform some basic testing and call it a day. + + # Test the simplest case - We're permitted to call GetCallerIdentity + def test_get_aws_account_id__caller_success(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [{'UserId': 'AIDA12345EXAMPLE54321', + 'Account': '123456789012', + 'Arn': 'arn:aws:iam::123456789012:user/ExampleUser'}] + # Run module + return_value = utils_iam.get_aws_account_id(self.module) + # Check we only saw the calls we mocked out + self.module.client.assert_called_once() + self.sts_client.get_caller_identity.assert_called_once() + # Check we got the values back we expected. + assert return_value == '123456789012' + + # Test the simplest case - We're permitted to call GetCallerIdentity + # (China partition) + def test_get_aws_account_id__caller_success_cn(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [{'UserId': 'AIDA12345EXAMPLE54321', + 'Account': '123456789012', + 'Arn': 'arn:aws-cn:iam::123456789012:user/ExampleUser'}] + # Run module + return_value = utils_iam.get_aws_account_id(self.module) + # Check we only saw the calls we mocked out + self.module.client.assert_called_once() + self.sts_client.get_caller_identity.assert_called_once() + # Check we got the values back we expected. + assert return_value == '123456789012' + + # ========== get_aws_account_info ============ + # Test the simplest case - We're permitted to call GetCallerIdentity + def test_get_aws_account_info__caller_success(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [{'UserId': 'AIDA12345EXAMPLE54321', + 'Account': '123456789012', + 'Arn': 'arn:aws:iam::123456789012:user/ExampleUser'}] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + self.module.client.assert_called_once() + self.sts_client.get_caller_identity.assert_called_once() + # Check we got the values back we expected. + assert return_value == ('123456789012', 'aws',) + + # (China partition) + def test_get_aws_account_info__caller_success_cn(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [{'UserId': 'AIDA12345EXAMPLE54321', + 'Account': '123456789012', + 'Arn': 'arn:aws-cn:iam::123456789012:user/ExampleUser'}] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + self.module.client.assert_called_once() + self.sts_client.get_caller_identity.assert_called_once() + # Check we got the values back we expected. + assert return_value == ('123456789012', 'aws-cn',) + + # (US-Gov partition) + def test_get_aws_account_info__caller_success_gov(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [{'UserId': 'AIDA12345EXAMPLE54321', + 'Account': '123456789012', + 'Arn': 'arn:aws-us-gov:iam::123456789012:user/ExampleUser'}] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + self.module.client.assert_called_once() + self.sts_client.get_caller_identity.assert_called_once() + # Check we got the values back we expected. + assert return_value == ('123456789012', 'aws-us-gov',) + + # If sts:get_caller_identity fails (most likely something wierd on the + # client side), then try a few extra options. + # Test response if STS fails and we need to fall back to GetUser + def test_get_aws_account_info__user_success(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] + self.iam_client.get_user.side_effect = [{"User": {"Path": "/", "UserName": "ExampleUser", "UserId": "AIDA12345EXAMPLE54321", + "Arn": "arn:aws:iam::123456789012:user/ExampleUser", "CreateDate": "2020-09-08T14:04:32Z"}}] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert return_value == ('123456789012', 'aws',) + + # (China partition) + def test_get_aws_account_info__user_success_cn(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] + self.iam_client.get_user.side_effect = [{"User": {"Path": "/", "UserName": "ExampleUser", "UserId": "AIDA12345EXAMPLE54321", + "Arn": "arn:aws-cn:iam::123456789012:user/ExampleUser", "CreateDate": "2020-09-08T14:04:32Z"}}] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert return_value == ('123456789012', 'aws-cn',) + + # (US-Gov partition) + def test_get_aws_account_info__user_success_gov(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] + self.iam_client.get_user.side_effect = [{"User": {"Path": "/", "UserName": "ExampleUser", "UserId": "AIDA12345EXAMPLE54321", + "Arn": "arn:aws-us-gov:iam::123456789012:user/ExampleUser", "CreateDate": "2020-09-08T14:04:32Z"}}] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert return_value == ('123456789012', 'aws-us-gov',) + + # Test response if STS and IAM fails and we need to fall back to the denial message + def test_get_aws_account_info__user_denied(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] + self.iam_client.get_user.side_effect = [self._make_denied_exception('aws')] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert return_value == ('123456789012', 'aws',) + + # (China partition) + def test_get_aws_account_info__user_denied_cn(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] + self.iam_client.get_user.side_effect = [self._make_denied_exception('aws-cn')] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert return_value == ('123456789012', 'aws-cn',) + + # (US-Gov partition) + def test_get_aws_account_info__user_denied_gov(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] + self.iam_client.get_user.side_effect = [self._make_denied_exception('aws-us-gov')] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert return_value == ('123456789012', 'aws-us-gov',) + + # Test that we fail gracefully if Boto throws exceptions at us... + def test_get_aws_account_info__boto_failures(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] + self.iam_client.get_user.side_effect = [self._make_botocore_exception()] + # Run module + with pytest.raises(SystemExit) as e: + utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert e.type == SystemExit + assert e.value.code == 1 # 1 == fail_json_aws + + def test_get_aws_account_info__client_failures(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_unexpected_exception()] + self.iam_client.get_user.side_effect = [self._make_unexpected_exception()] + # Run module + with pytest.raises(SystemExit) as e: + utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert e.type == SystemExit + assert e.value.code == 1 # 1 == fail_json_aws + + def test_get_aws_account_info__encoded_failures(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_encoded_exception()] + self.iam_client.get_user.side_effect = [self._make_encoded_exception()] + # Run module + with pytest.raises(SystemExit) as e: + utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert e.type == SystemExit + assert e.value.code == 1 # 1 == fail_json (we couldn't parse the AccessDenied errors) diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/test_rds.py b/ansible_collections/amazon/aws/tests/unit/module_utils/test_rds.py new file mode 100644 index 000000000..9d96d44a8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/test_rds.py @@ -0,0 +1,805 @@ +# (c) 2021 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import sys +import pytest + +if sys.version_info < (3, 7): + pytest.skip("contextlib.nullcontext was introduced in Python 3.7", allow_module_level=True) + +from contextlib import nullcontext + +try: + import botocore +except ImportError: + # Handled by HAS_BOTO3 + pass + +from ansible_collections.amazon.aws.tests.unit.compat.mock import MagicMock + +from ansible_collections.amazon.aws.plugins.module_utils import rds +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_rds.py requires the python modules 'boto3' and 'botocore'") + + +def expected(x): + return x, nullcontext() + + +def error(*args, **kwargs): + return MagicMock(), pytest.raises(*args, **kwargs) + + +def build_exception( + operation_name, code=None, message=None, http_status_code=None, error=True +): + # Support skipping the test is botocore isn't installed + # (called by parametrize before skip is evaluated) + if not HAS_BOTO3: + return Exception('MissingBotoCore') + response = {} + if error or code or message: + response["Error"] = {} + if code: + response["Error"]["Code"] = code + if message: + response["Error"]["Message"] = message + if http_status_code: + response["ResponseMetadata"] = {"HTTPStatusCode": http_status_code} + + return botocore.exceptions.ClientError(response, operation_name) + + +@pytest.mark.parametrize("waiter_name", ["", "db_snapshot_available"]) +def test__wait_for_instance_snapshot_status(waiter_name): + rds.wait_for_instance_snapshot_status(MagicMock(), MagicMock(), "test", waiter_name) + + +@pytest.mark.parametrize("waiter_name", ["", "db_cluster_snapshot_available"]) +def test__wait_for_cluster_snapshot_status(waiter_name): + rds.wait_for_cluster_snapshot_status(MagicMock(), MagicMock(), "test", waiter_name) + + +@pytest.mark.parametrize( + "input, expected", + [ + ( + "db_snapshot_available", + "Failed to wait for DB snapshot test to be available", + ), + ( + "db_snapshot_deleted", + "Failed to wait for DB snapshot test to be deleted"), + ], +) +def test__wait_for_instance_snapshot_status_failed(input, expected): + spec = {"get_waiter.side_effect": [botocore.exceptions.WaiterError(None, None, None)]} + client = MagicMock(**spec) + module = MagicMock() + + rds.wait_for_instance_snapshot_status(client, module, "test", input) + module.fail_json_aws.assert_called_once + module.fail_json_aws.call_args[1]["msg"] == expected + + +@pytest.mark.parametrize( + "input, expected", + [ + ( + "db_cluster_snapshot_available", + "Failed to wait for DB cluster snapshot test to be available", + ), + ( + "db_cluster_snapshot_deleted", + "Failed to wait for DB cluster snapshot test to be deleted", + ), + ], +) +def test__wait_for_cluster_snapshot_status_failed(input, expected): + spec = {"get_waiter.side_effect": [botocore.exceptions.WaiterError(None, None, None)]} + client = MagicMock(**spec) + module = MagicMock() + + rds.wait_for_cluster_snapshot_status(client, module, "test", input) + module.fail_json_aws.assert_called_once + module.fail_json_aws.call_args[1]["msg"] == expected + + +@pytest.mark.parametrize( + "method_name, params, expected, error", + [ + ( + "delete_db_cluster", + { + "new_db_cluster_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="delete_db_cluster", + waiter="cluster_deleted", + operation_description="delete DB cluster", + resource='cluster', + retry_codes=['InvalidDBClusterState'] + ) + ), + ), + ( + "create_db_cluster", + { + "new_db_cluster_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="create_db_cluster", + waiter="cluster_available", + operation_description="create DB cluster", + resource='cluster', + retry_codes=['InvalidDBClusterState'] + ) + ), + ), + ( + "restore_db_cluster_from_snapshot", + { + "new_db_cluster_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="restore_db_cluster_from_snapshot", + waiter="cluster_available", + operation_description="restore DB cluster from snapshot", + resource='cluster', + retry_codes=['InvalidDBClusterSnapshotState'] + ) + ), + ), + ( + "modify_db_cluster", + { + "new_db_cluster_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="modify_db_cluster", + waiter="cluster_available", + operation_description="modify DB cluster", + resource='cluster', + retry_codes=['InvalidDBClusterState'] + ) + ), + ), + ( + "list_tags_for_resource", + { + "new_db_cluster_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="list_tags_for_resource", + waiter="cluster_available", + operation_description="list tags for resource", + resource='cluster', + retry_codes=['InvalidDBClusterState'] + ) + ), + ), + ( + "fake_method", + { + "wait": False + }, + *expected( + rds.Boto3ClientMethod( + name="fake_method", + waiter="", + operation_description="fake method", + resource='', + retry_codes=[] + ) + ), + ), + ( + "fake_method", + { + "wait": True + }, + *error( + NotImplementedError, + match="method fake_method hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + ), + ), + ], +) +def test__get_rds_method_attribute_cluster(method_name, params, expected, error): + module = MagicMock() + module.params = params + with error: + assert rds.get_rds_method_attribute(method_name, module) == expected + + +@pytest.mark.parametrize( + "method_name, params, expected, error", + [ + ( + "delete_db_instance", + { + "new_db_instance_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="delete_db_instance", + waiter="db_instance_deleted", + operation_description="delete DB instance", + resource='instance', + retry_codes=['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + ) + ), + ), + ( + "create_db_instance", + { + "new_db_instance_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="create_db_instance", + waiter="db_instance_available", + operation_description="create DB instance", + resource='instance', + retry_codes=['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + ) + ), + ), + ( + "stop_db_instance", + { + "new_db_instance_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="stop_db_instance", + waiter="db_instance_stopped", + operation_description="stop DB instance", + resource='instance', + retry_codes=['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + ) + ), + ), + ( + "promote_read_replica", + { + "new_db_instance_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="promote_read_replica", + waiter="read_replica_promoted", + operation_description="promote read replica", + resource='instance', + retry_codes=['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + ) + ), + ), + ( + "restore_db_instance_from_db_snapshot", + { + "new_db_instance_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="restore_db_instance_from_db_snapshot", + waiter="db_instance_available", + operation_description="restore DB instance from DB snapshot", + resource='instance', + retry_codes=['InvalidDBSnapshotState'] + ) + ), + ), + ( + "modify_db_instance", + { + "new_db_instance_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="modify_db_instance", + waiter="db_instance_available", + operation_description="modify DB instance", + resource='instance', + retry_codes=['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + ) + ), + ), + ( + "add_role_to_db_instance", + { + "new_db_instance_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="add_role_to_db_instance", + waiter="role_associated", + operation_description="add role to DB instance", + resource='instance', + retry_codes=['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + ) + ), + ), + ( + "remove_role_from_db_instance", + { + "new_db_instance_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="remove_role_from_db_instance", + waiter="role_disassociated", + operation_description="remove role from DB instance", + resource='instance', + retry_codes=['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + ) + ), + ), + ( + "list_tags_for_resource", + { + "new_db_instance_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="list_tags_for_resource", + waiter="db_instance_available", + operation_description="list tags for resource", + resource='instance', + retry_codes=['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + ) + ), + ), + ( + "fake_method", + { + "wait": False + }, + *expected( + rds.Boto3ClientMethod( + name="fake_method", + waiter="", + operation_description="fake method", + resource='', + retry_codes=[] + ) + ), + ), + ( + "fake_method", + { + "wait": True + }, + *error( + NotImplementedError, + match="method fake_method hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + ), + ), + ], +) +def test__get_rds_method_attribute_instance(method_name, params, expected, error): + module = MagicMock() + module.params = params + with error: + assert rds.get_rds_method_attribute(method_name, module) == expected + + +@pytest.mark.parametrize( + "method_name, params, expected, error", + [ + ( + "delete_db_snapshot", + { + "db_snapshot_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="delete_db_snapshot", + waiter="db_snapshot_deleted", + operation_description="delete DB snapshot", + resource='instance_snapshot', + retry_codes=['InvalidDBSnapshotState'] + ) + ), + ), + ( + "create_db_snapshot", + { + "db_snapshot_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="create_db_snapshot", + waiter="db_snapshot_available", + operation_description="create DB snapshot", + resource='instance_snapshot', + retry_codes=['InvalidDBInstanceState'] + ) + ), + ), + ( + "copy_db_snapshot", + { + "source_db_snapshot_identifier": "test", + "db_snapshot_identifier": "test-copy" + }, + *expected( + rds.Boto3ClientMethod( + name="copy_db_snapshot", + waiter="db_snapshot_available", + operation_description="copy DB snapshot", + resource='instance_snapshot', + retry_codes=['InvalidDBSnapshotState'] + ) + ), + ), + ( + "list_tags_for_resource", + { + "db_snapshot_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="list_tags_for_resource", + waiter="db_snapshot_available", + operation_description="list tags for resource", + resource='instance_snapshot', + retry_codes=['InvalidDBSnapshotState'] + ) + ), + ), + ( + "delete_db_cluster_snapshot", + { + "db_cluster_snapshot_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="delete_db_cluster_snapshot", + waiter="db_cluster_snapshot_deleted", + operation_description="delete DB cluster snapshot", + resource='cluster_snapshot', + retry_codes=['InvalidDBClusterSnapshotState'] + ) + ), + ), + ( + "create_db_cluster_snapshot", + { + "db_cluster_snapshot_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="create_db_cluster_snapshot", + waiter="db_cluster_snapshot_available", + operation_description="create DB cluster snapshot", + resource='cluster_snapshot', + retry_codes=['InvalidDBClusterState'] + ) + ), + ), + ( + "copy_db_cluster_snapshot", + { + "source_db_cluster_snapshot_identifier": "test", + "db_cluster_snapshot_identifier": "test-copy" + }, + *expected( + rds.Boto3ClientMethod( + name="copy_db_cluster_snapshot", + waiter="db_cluster_snapshot_available", + operation_description="copy DB cluster snapshot", + resource='cluster_snapshot', + retry_codes=['InvalidDBClusterSnapshotState'] + ) + ), + ), + ( + "list_tags_for_resource", + { + "db_cluster_snapshot_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="list_tags_for_resource", + waiter="db_cluster_snapshot_available", + operation_description="list tags for resource", + resource='cluster_snapshot', + retry_codes=['InvalidDBClusterSnapshotState'] + ) + ), + ), + ( + "fake_method", + { + "wait": False + }, + *expected( + rds.Boto3ClientMethod( + name="fake_method", + waiter="", + operation_description="fake method", + resource='', + retry_codes=[] + ) + ), + ), + ( + "fake_method", + { + "wait": True + }, + *error( + NotImplementedError, + match="method fake_method hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + ), + ), + ], +) +def test__get_rds_method_attribute_snapshot(method_name, params, expected, error): + module = MagicMock() + module.params = params + with error: + assert rds.get_rds_method_attribute(method_name, module) == expected + + +@pytest.mark.parametrize( + "method_name, params, expected", + [ + ( + "create_db_snapshot", + { + "db_snapshot_identifier": "test" + }, + "test" + ), + ( + "create_db_snapshot", + { + "db_snapshot_identifier": "test", + "apply_immediately": True + }, + "test", + ), + ( + "create_db_instance", + { + "db_instance_identifier": "test", + "new_db_instance_identifier": "test_updated", + }, + "test", + ), + ( + "create_db_snapshot", + { + "db_snapshot_identifier": "test", + "apply_immediately": True + }, + "test", + ), + ( + "create_db_instance", + { + "db_instance_identifier": "test", + "new_db_instance_identifier": "test_updated", + "apply_immediately": True, + }, + "test_updated", + ), + ( + "create_db_cluster", + { + "db_cluster_identifier": "test", + "new_db_cluster_identifier": "test_updated", + }, + "test", + ), + ( + "create_db_snapshot", + { + "db_snapshot_identifier": "test", + "apply_immediately": True + }, + "test", + ), + ( + "create_db_cluster", + { + "db_cluster_identifier": "test", + "new_db_cluster_identifier": "test_updated", + "apply_immediately": True, + }, + "test_updated", + ), + ], +) +def test__get_final_identifier(method_name, params, expected): + module = MagicMock() + module.params = params + module.check_mode = False + + assert rds.get_final_identifier(method_name, module) == expected + + +@pytest.mark.parametrize( + "method_name, exception, expected", + [ + ( + "modify_db_instance", + build_exception( + "modify_db_instance", + code="InvalidParameterCombination", + message="No modifications were requested", + ), + False, + ), + ( + "promote_read_replica", + build_exception( + "promote_read_replica", + code="InvalidDBInstanceState", + message="DB Instance is not a read replica", + ), + False, + ), + ( + "promote_read_replica_db_cluster", + build_exception( + "promote_read_replica_db_cluster", + code="InvalidDBClusterStateFault", + message="DB Cluster that is not a read replica", + ), + False, + ), + ], +) +def test__handle_errors(method_name, exception, expected): + assert rds.handle_errors(MagicMock(), exception, method_name, {}) == expected + + +@pytest.mark.parametrize( + "method_name, exception, expected, error", + [ + ( + "modify_db_instance", + build_exception( + "modify_db_instance", + code="InvalidParameterCombination", + message="ModifyDbCluster API", + ), + *expected( + "It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster" + ), + ), + ( + "modify_db_instance", + build_exception("modify_db_instance", code="InvalidParameterCombination"), + *error( + NotImplementedError, + match="method modify_db_instance hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + ), + ), + ( + "promote_read_replica", + build_exception("promote_read_replica", code="InvalidDBInstanceState"), + *error( + NotImplementedError, + match="method promote_read_replica hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + ), + ), + ( + "promote_read_replica_db_cluster", + build_exception( + "promote_read_replica_db_cluster", code="InvalidDBClusterStateFault" + ), + *error( + NotImplementedError, + match="method promote_read_replica_db_cluster hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + ), + ), + ( + "create_db_cluster", + build_exception("create_db_cluster", code="InvalidParameterValue"), + *expected( + "DB engine fake_engine should be one of aurora, aurora-mysql, aurora-postgresql" + ), + ), + ], +) +def test__handle_errors_failed(method_name, exception, expected, error): + module = MagicMock() + + with error: + rds.handle_errors(module, exception, method_name, {"Engine": "fake_engine"}) + module.fail_json_aws.assert_called_once + module.fail_json_aws.call_args[1]["msg"] == expected + + +class TestRdsUtils(): + + # ======================================================== + # Setup some initial data that we can use within our tests + # ======================================================== + def setup_method(self): + self.target_role_list = [ + { + 'role_arn': 'role_won', + 'feature_name': 's3Export' + }, + { + 'role_arn': 'role_too', + 'feature_name': 'Lambda' + }, + { + 'role_arn': 'role_thrie', + 'feature_name': 's3Import' + } + ] + + # ======================================================== + # rds.compare_iam_roles + # ======================================================== + + def test_compare_iam_roles_equal(self): + existing_list = self.target_role_list + roles_to_add, roles_to_delete = rds.compare_iam_roles(existing_list, self.target_role_list, purge_roles=False) + assert [] == roles_to_add + assert [] == roles_to_delete + roles_to_add, roles_to_delete = rds.compare_iam_roles(existing_list, self.target_role_list, purge_roles=True) + assert [] == roles_to_add + assert [] == roles_to_delete + + def test_compare_iam_roles_empty_arr_existing(self): + roles_to_add, roles_to_delete = rds.compare_iam_roles([], self.target_role_list, purge_roles=False) + assert self.target_role_list == roles_to_add + assert [] == roles_to_delete + roles_to_add, roles_to_delete = rds.compare_iam_roles([], self.target_role_list, purge_roles=True) + assert self.target_role_list, roles_to_add + assert [] == roles_to_delete + + def test_compare_iam_roles_empty_arr_target(self): + existing_list = self.target_role_list + roles_to_add, roles_to_delete = rds.compare_iam_roles(existing_list, [], purge_roles=False) + assert [] == roles_to_add + assert [] == roles_to_delete + roles_to_add, roles_to_delete = rds.compare_iam_roles(existing_list, [], purge_roles=True) + assert [] == roles_to_add + assert self.target_role_list == roles_to_delete + + def test_compare_iam_roles_different(self): + existing_list = [ + { + 'role_arn': 'role_wonn', + 'feature_name': 's3Export' + }] + roles_to_add, roles_to_delete = rds.compare_iam_roles(existing_list, self.target_role_list, purge_roles=False) + assert self.target_role_list == roles_to_add + assert [] == roles_to_delete + roles_to_add, roles_to_delete = rds.compare_iam_roles(existing_list, self.target_role_list, purge_roles=True) + assert self.target_role_list == roles_to_add + assert existing_list == roles_to_delete + + existing_list = self.target_role_list.copy() + self.target_role_list = [ + { + 'role_arn': 'role_wonn', + 'feature_name': 's3Export' + }] + roles_to_add, roles_to_delete = rds.compare_iam_roles(existing_list, self.target_role_list, purge_roles=False) + assert self.target_role_list == roles_to_add + assert [] == roles_to_delete + roles_to_add, roles_to_delete = rds.compare_iam_roles(existing_list, self.target_role_list, purge_roles=True) + assert self.target_role_list == roles_to_add + assert existing_list == roles_to_delete diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/test_s3.py b/ansible_collections/amazon/aws/tests/unit/module_utils/test_s3.py new file mode 100644 index 000000000..42c8ecfd0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/test_s3.py @@ -0,0 +1,86 @@ +# +# (c) 2021 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +from ansible_collections.amazon.aws.tests.unit.compat.mock import MagicMock +from ansible_collections.amazon.aws.plugins.module_utils import s3 +from ansible.module_utils.basic import AnsibleModule + +import pytest + + +class FakeAnsibleModule(AnsibleModule): + def __init__(self): + pass + + +def test_calculate_etag_single_part(tmp_path_factory): + module = FakeAnsibleModule() + my_image = tmp_path_factory.mktemp("data") / "my.txt" + my_image.write_text("Hello World!") + + etag = s3.calculate_etag( + module, str(my_image), etag="", s3=None, bucket=None, obj=None + ) + assert etag == '"ed076287532e86365e841e92bfc50d8c"' + + +def test_calculate_etag_multi_part(tmp_path_factory): + module = FakeAnsibleModule() + my_image = tmp_path_factory.mktemp("data") / "my.txt" + my_image.write_text("Hello World!" * 1000) + + mocked_s3 = MagicMock() + mocked_s3.head_object.side_effect = [{"ContentLength": "1000"} for _i in range(12)] + + etag = s3.calculate_etag( + module, + str(my_image), + etag='"f20e84ac3d0c33cea77b3f29e3323a09-12"', + s3=mocked_s3, + bucket="my-bucket", + obj="my-obj", + ) + assert etag == '"f20e84ac3d0c33cea77b3f29e3323a09-12"' + mocked_s3.head_object.assert_called_with( + Bucket="my-bucket", Key="my-obj", PartNumber=12 + ) + + +def test_validate_bucket_name(): + module = MagicMock() + + assert s3.validate_bucket_name(module, "docexamplebucket1") is True + assert not module.fail_json.called + assert s3.validate_bucket_name(module, "log-delivery-march-2020") is True + assert not module.fail_json.called + assert s3.validate_bucket_name(module, "my-hosted-content") is True + assert not module.fail_json.called + + assert s3.validate_bucket_name(module, "docexamplewebsite.com") is True + assert not module.fail_json.called + assert s3.validate_bucket_name(module, "www.docexamplewebsite.com") is True + assert not module.fail_json.called + assert s3.validate_bucket_name(module, "my.example.s3.bucket") is True + assert not module.fail_json.called + assert s3.validate_bucket_name(module, "doc") is True + assert not module.fail_json.called + + module.fail_json.reset_mock() + s3.validate_bucket_name(module, "doc_example_bucket") + assert module.fail_json.called + + module.fail_json.reset_mock() + s3.validate_bucket_name(module, "DocExampleBucket") + assert module.fail_json.called + module.fail_json.reset_mock() + s3.validate_bucket_name(module, "doc-example-bucket-") + assert module.fail_json.called + s3.validate_bucket_name(module, "my") + assert module.fail_json.called diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/test_tagging.py b/ansible_collections/amazon/aws/tests/unit/module_utils/test_tagging.py new file mode 100644 index 000000000..04ec96eb0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/test_tagging.py @@ -0,0 +1,203 @@ +# (c) 2017 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + + +class TestTagging(): + + # ======================================================== + # Setup some initial data that we can use within our tests + # ======================================================== + def setup_method(self): + + self.tag_example_boto3_list = [ + {'Key': 'lowerCamel', 'Value': 'lowerCamelValue'}, + {'Key': 'UpperCamel', 'Value': 'upperCamelValue'}, + {'Key': 'Normal case', 'Value': 'Normal Value'}, + {'Key': 'lower case', 'Value': 'lower case value'} + ] + + self.tag_example_dict = { + 'lowerCamel': 'lowerCamelValue', + 'UpperCamel': 'upperCamelValue', + 'Normal case': 'Normal Value', + 'lower case': 'lower case value' + } + + self.tag_minimal_boto3_list = [ + {'Key': 'mykey', 'Value': 'myvalue'}, + ] + + self.tag_minimal_dict = {'mykey': 'myvalue'} + + self.tag_aws_dict = {'aws:cloudformation:stack-name': 'ExampleStack'} + self.tag_aws_changed = {'aws:cloudformation:stack-name': 'AnotherStack'} + + # ======================================================== + # tagging.ansible_dict_to_boto3_tag_list + # ======================================================== + + def test_ansible_dict_to_boto3_tag_list(self): + converted_list = ansible_dict_to_boto3_tag_list(self.tag_example_dict) + sorted_converted_list = sorted(converted_list, key=lambda i: (i['Key'])) + sorted_list = sorted(self.tag_example_boto3_list, key=lambda i: (i['Key'])) + assert sorted_converted_list == sorted_list + + # ======================================================== + # tagging.boto3_tag_list_to_ansible_dict + # ======================================================== + + def test_boto3_tag_list_to_ansible_dict(self): + converted_dict = boto3_tag_list_to_ansible_dict(self.tag_example_boto3_list) + assert converted_dict == self.tag_example_dict + + def test_boto3_tag_list_to_ansible_dict_empty(self): + # AWS returns [] when there are no tags + assert boto3_tag_list_to_ansible_dict([]) == {} + # Minio returns [{}] when there are no tags + assert boto3_tag_list_to_ansible_dict([{}]) == {} + + # ======================================================== + # tagging.compare_aws_tags + # ======================================================== + + def test_compare_aws_tags_equal(self): + new_dict = dict(self.tag_example_dict) + keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict) + assert {} == keys_to_set + assert [] == keys_to_unset + keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=False) + assert {} == keys_to_set + assert [] == keys_to_unset + keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=True) + assert {} == keys_to_set + assert [] == keys_to_unset + + def test_compare_aws_tags_removed(self): + new_dict = dict(self.tag_example_dict) + del new_dict['lowerCamel'] + del new_dict['Normal case'] + keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict) + assert {} == keys_to_set + assert set(['lowerCamel', 'Normal case']) == set(keys_to_unset) + keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=False) + assert {} == keys_to_set + assert [] == keys_to_unset + keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=True) + assert {} == keys_to_set + assert set(['lowerCamel', 'Normal case']) == set(keys_to_unset) + + def test_compare_aws_tags_added(self): + new_dict = dict(self.tag_example_dict) + new_keys = {'add_me': 'lower case', 'Me too!': 'Contributing'} + new_dict.update(new_keys) + keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict) + assert new_keys == keys_to_set + assert [] == keys_to_unset + keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=False) + assert new_keys == keys_to_set + assert [] == keys_to_unset + keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=True) + assert new_keys == keys_to_set + assert [] == keys_to_unset + + def test_compare_aws_tags_changed(self): + new_dict = dict(self.tag_example_dict) + new_keys = {'UpperCamel': 'anotherCamelValue', 'Normal case': 'normal value'} + new_dict.update(new_keys) + keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict) + assert new_keys == keys_to_set + assert [] == keys_to_unset + keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=False) + assert new_keys == keys_to_set + assert [] == keys_to_unset + keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=True) + assert new_keys == keys_to_set + assert [] == keys_to_unset + + def test_compare_aws_tags_complex_update(self): + # Adds 'Me too!', Changes 'UpperCamel' and removes 'Normal case' + new_dict = dict(self.tag_example_dict) + new_keys = {'UpperCamel': 'anotherCamelValue', 'Me too!': 'Contributing'} + new_dict.update(new_keys) + del new_dict['Normal case'] + keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict) + assert new_keys == keys_to_set + assert ['Normal case'] == keys_to_unset + keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=False) + assert new_keys == keys_to_set + assert [] == keys_to_unset + keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=True) + assert new_keys == keys_to_set + assert ['Normal case'] == keys_to_unset + + def test_compare_aws_tags_aws(self): + starting_tags = dict(self.tag_aws_dict) + desired_tags = dict(self.tag_minimal_dict) + tags_to_set, tags_to_unset = compare_aws_tags(starting_tags, desired_tags, purge_tags=True) + assert desired_tags == tags_to_set + assert [] == tags_to_unset + # If someone explicitly passes a changed 'aws:' key the APIs will probably + # throw an error, but this is their responsibility. + desired_tags.update(self.tag_aws_changed) + tags_to_set, tags_to_unset = compare_aws_tags(starting_tags, desired_tags, purge_tags=True) + assert desired_tags == tags_to_set + assert [] == tags_to_unset + + def test_compare_aws_tags_aws_complex(self): + old_dict = dict(self.tag_example_dict) + old_dict.update(self.tag_aws_dict) + # Adds 'Me too!', Changes 'UpperCamel' and removes 'Normal case' + new_dict = dict(self.tag_example_dict) + new_keys = {'UpperCamel': 'anotherCamelValue', 'Me too!': 'Contributing'} + new_dict.update(new_keys) + del new_dict['Normal case'] + keys_to_set, keys_to_unset = compare_aws_tags(old_dict, new_dict) + assert new_keys == keys_to_set + assert ['Normal case'] == keys_to_unset + keys_to_set, keys_to_unset = compare_aws_tags(old_dict, new_dict, purge_tags=False) + assert new_keys == keys_to_set + assert [] == keys_to_unset + keys_to_set, keys_to_unset = compare_aws_tags(old_dict, new_dict, purge_tags=True) + assert new_keys == keys_to_set + assert ['Normal case'] == keys_to_unset + + # ======================================================== + # tagging.boto3_tag_specifications + # ======================================================== + + # Builds upon ansible_dict_to_boto3_tag_list, assume that if a minimal tag + # dictionary behaves as expected, then all will behave + def test_boto3_tag_specifications_no_type(self): + tag_specification = boto3_tag_specifications(self.tag_minimal_dict) + expected_specification = [{'Tags': self.tag_minimal_boto3_list}] + assert tag_specification == expected_specification + + def test_boto3_tag_specifications_string_type(self): + tag_specification = boto3_tag_specifications(self.tag_minimal_dict, 'instance') + expected_specification = [{'ResourceType': 'instance', 'Tags': self.tag_minimal_boto3_list}] + assert tag_specification == expected_specification + + def test_boto3_tag_specifications_single_type(self): + tag_specification = boto3_tag_specifications(self.tag_minimal_dict, ['instance']) + expected_specification = [{'ResourceType': 'instance', 'Tags': self.tag_minimal_boto3_list}] + assert tag_specification == expected_specification + + def test_boto3_tag_specifications_multipe_types(self): + tag_specification = boto3_tag_specifications(self.tag_minimal_dict, ['instance', 'volume']) + expected_specification = [ + {'ResourceType': 'instance', 'Tags': self.tag_minimal_boto3_list}, + {'ResourceType': 'volume', 'Tags': self.tag_minimal_boto3_list}, + ] + sorted_tag_spec = sorted(tag_specification, key=lambda i: (i['ResourceType'])) + sorted_expected = sorted(expected_specification, key=lambda i: (i['ResourceType'])) + assert sorted_tag_spec == sorted_expected diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/test_tower.py b/ansible_collections/amazon/aws/tests/unit/module_utils/test_tower.py new file mode 100644 index 000000000..9e1d90213 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/test_tower.py @@ -0,0 +1,40 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# import pytest + +import ansible_collections.amazon.aws.plugins.module_utils.tower as utils_tower + +WINDOWS_DOWNLOAD = "Invoke-Expression ((New-Object System.Net.Webclient).DownloadString(" \ + "'https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1'))" +EXAMPLE_PASSWORD = 'MY_EXAMPLE_PASSWORD' +WINDOWS_INVOKE = "$admin.PSBase.Invoke('SetPassword', 'MY_EXAMPLE_PASSWORD'" + +EXAMPLE_TOWER = "tower.example.com" +EXAMPLE_TEMPLATE = 'My Template' +EXAMPLE_KEY = '123EXAMPLE123' +LINUX_TRIGGER_V1 = 'https://tower.example.com/api/v1/job_templates/My%20Template/callback/' +LINUX_TRIGGER_V2 = 'https://tower.example.com/api/v2/job_templates/My%20Template/callback/' + + +def test_windows_callback_no_password(): + user_data = utils_tower._windows_callback_script() + assert WINDOWS_DOWNLOAD in user_data + assert 'SetPassword' not in user_data + + +def test_windows_callback_password(): + user_data = utils_tower._windows_callback_script(EXAMPLE_PASSWORD) + assert WINDOWS_DOWNLOAD in user_data + assert WINDOWS_INVOKE in user_data + + +def test_linux_callback_with_name(): + user_data = utils_tower._linux_callback_script(EXAMPLE_TOWER, EXAMPLE_TEMPLATE, EXAMPLE_KEY) + assert LINUX_TRIGGER_V1 in user_data + assert LINUX_TRIGGER_V2 in user_data diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_ansible_dict_to_boto3_filter_list.py b/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_ansible_dict_to_boto3_filter_list.py new file mode 100644 index 000000000..23c82b173 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_ansible_dict_to_boto3_filter_list.py @@ -0,0 +1,73 @@ +# (c) 2017 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + + +class TestAnsibleDictToBoto3FilterList(): + + # ======================================================== + # ec2.ansible_dict_to_boto3_filter_list + # ======================================================== + + def test_ansible_dict_with_string_to_boto3_filter_list(self): + filters = {'some-aws-id': 'i-01234567'} + filter_list_string = [ + { + 'Name': 'some-aws-id', + 'Values': [ + 'i-01234567', + ] + } + ] + + converted_filters_list = ansible_dict_to_boto3_filter_list(filters) + assert converted_filters_list == filter_list_string + + def test_ansible_dict_with_boolean_to_boto3_filter_list(self): + filters = {'enabled': True} + filter_list_boolean = [ + { + 'Name': 'enabled', + 'Values': [ + 'true', + ] + } + ] + + converted_filters_bool = ansible_dict_to_boto3_filter_list(filters) + assert converted_filters_bool == filter_list_boolean + + def test_ansible_dict_with_integer_to_boto3_filter_list(self): + filters = {'version': 1} + filter_list_integer = [ + { + 'Name': 'version', + 'Values': [ + '1', + ] + } + ] + + converted_filters_int = ansible_dict_to_boto3_filter_list(filters) + assert converted_filters_int == filter_list_integer + + def test_ansible_dict_with_list_to_boto3_filter_list(self): + filters = {'version': ['1', '2', '3']} + filter_list_integer = [ + { + 'Name': 'version', + 'Values': [ + '1', '2', '3' + ] + } + ] + + converted_filters_int = ansible_dict_to_boto3_filter_list(filters) + assert converted_filters_int == filter_list_integer diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_map_complex_type.py b/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_map_complex_type.py new file mode 100644 index 000000000..2300e2351 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_map_complex_type.py @@ -0,0 +1,100 @@ +# (c) 2017 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.amazon.aws.plugins.module_utils.transformation import map_complex_type + +from ansible_collections.amazon.aws.tests.unit.compat.mock import sentinel + + +def test_map_complex_type_over_dict(): + type_map = {'minimum_healthy_percent': 'int', 'maximum_percent': 'int'} + complex_type_dict = {'minimum_healthy_percent': "75", 'maximum_percent': "150"} + complex_type_expected = {'minimum_healthy_percent': 75, 'maximum_percent': 150} + + complex_type_mapped = map_complex_type(complex_type_dict, type_map) + + assert complex_type_mapped == complex_type_expected + + +def test_map_complex_type_empty(): + type_map = {'minimum_healthy_percent': 'int', 'maximum_percent': 'int'} + assert map_complex_type({}, type_map) == {} + assert map_complex_type([], type_map) == [] + assert map_complex_type(None, type_map) is None + + +def test_map_complex_type_no_type(): + type_map = {'some_entry': 'int'} + complex_dict = {'another_entry': sentinel.UNSPECIFIED_MAPPING} + mapped_dict = map_complex_type(complex_dict, type_map) + assert mapped_dict == complex_dict + # we should have the original sentinel object, even if it's a new dictionary + assert mapped_dict['another_entry'] is sentinel.UNSPECIFIED_MAPPING + + +def test_map_complex_type_list(): + type_map = {'some_entry': 'int'} + complex_dict = {'some_entry': ["1", "2", "3"]} + expected_dict = {'some_entry': [1, 2, 3]} + mapped_dict = map_complex_type(complex_dict, type_map) + assert mapped_dict == expected_dict + + +def test_map_complex_type_list_type(): + type_map = {'some_entry': ['int']} + complex_dict = {'some_entry': ["1", "2", "3"]} + expected_dict = {'some_entry': [1, 2, 3]} + mapped_dict = map_complex_type(complex_dict, type_map) + assert mapped_dict == expected_dict + + type_map = {'some_entry': ['int']} + complex_dict = {'some_entry': "1"} + expected_dict = {'some_entry': 1} + mapped_dict = map_complex_type(complex_dict, type_map) + assert mapped_dict == expected_dict + + +def test_map_complex_type_complex(): + type_map = { + 'my_integer': 'int', + 'my_bool': 'bool', + 'my_string': 'str', + 'my_typelist_of_int': ['int'], + 'my_maplist_of_int': 'int', + 'my_unused': 'bool', + } + complex_dict = { + 'my_integer': '-24', + 'my_bool': 'true', + 'my_string': 43, + 'my_typelist_of_int': '5', + 'my_maplist_of_int': ['-26', '47'], + 'my_unconverted': sentinel.UNSPECIFIED_MAPPING, + } + expected_dict = { + 'my_integer': -24, + 'my_bool': True, + 'my_string': '43', + 'my_typelist_of_int': 5, + 'my_maplist_of_int': [-26, 47], + 'my_unconverted': sentinel.UNSPECIFIED_MAPPING, + } + + mapped_dict = map_complex_type(complex_dict, type_map) + + assert mapped_dict == expected_dict + assert mapped_dict['my_unconverted'] is sentinel.UNSPECIFIED_MAPPING + assert mapped_dict['my_bool'] is True + + +def test_map_complex_type_nested_list(): + type_map = {'my_integer': 'int'} + complex_dict = [{'my_integer': '5'}, {'my_integer': '-24'}] + expected_dict = [{'my_integer': 5}, {'my_integer': -24}] + mapped_dict = map_complex_type(complex_dict, type_map) + assert mapped_dict == expected_dict diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_scrub_none_parameters.py b/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_scrub_none_parameters.py new file mode 100644 index 000000000..82fd41ed3 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_scrub_none_parameters.py @@ -0,0 +1,88 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters + +scrub_none_test_data = [ + (dict(), # Input + dict(), # Output with descend_into_lists=False + dict(), # Output with descend_into_lists=True + ), + (dict(param1=None, param2=None), + dict(), + dict(), + ), + (dict(param1='something'), + dict(param1='something'), + dict(param1='something'), + ), + (dict(param1=False), + dict(param1=False), + dict(param1=False), + ), + (dict(param1=None, param2=[]), + dict(param2=[]), + dict(param2=[]), + ), + (dict(param1=None, param2=["list_value"]), + dict(param2=["list_value"]), + dict(param2=["list_value"]), + ), + (dict(param1='something', param2='something_else'), + dict(param1='something', param2='something_else'), + dict(param1='something', param2='something_else'), + ), + (dict(param1='something', param2=dict()), + dict(param1='something', param2=dict()), + dict(param1='something', param2=dict()), + ), + (dict(param1='something', param2=None), + dict(param1='something'), + dict(param1='something'), + ), + (dict(param1='something', param2=None, param3=None), + dict(param1='something'), + dict(param1='something'), + ), + (dict(param1='something', param2=None, param3=None, param4='something_else'), + dict(param1='something', param4='something_else'), + dict(param1='something', param4='something_else'), + ), + (dict(param1=dict(sub_param1='something', sub_param2=dict(sub_sub_param1='another_thing')), param2=None, param3=None, param4='something_else'), + dict(param1=dict(sub_param1='something', sub_param2=dict(sub_sub_param1='another_thing')), param4='something_else'), + dict(param1=dict(sub_param1='something', sub_param2=dict(sub_sub_param1='another_thing')), param4='something_else'), + ), + (dict(param1=dict(sub_param1='something', sub_param2=dict()), param2=None, param3=None, param4='something_else'), + dict(param1=dict(sub_param1='something', sub_param2=dict()), param4='something_else'), + dict(param1=dict(sub_param1='something', sub_param2=dict()), param4='something_else'), + ), + (dict(param1=dict(sub_param1='something', sub_param2=False), param2=None, param3=None, param4='something_else'), + dict(param1=dict(sub_param1='something', sub_param2=False), param4='something_else'), + dict(param1=dict(sub_param1='something', sub_param2=False), param4='something_else'), + ), + (dict(param1=[dict(sub_param1='my_dict_nested_in_a_list_1', sub_param2='my_dict_nested_in_a_list_2')], param2=[]), + dict(param1=[dict(sub_param1='my_dict_nested_in_a_list_1', sub_param2='my_dict_nested_in_a_list_2')], param2=[]), + dict(param1=[dict(sub_param1='my_dict_nested_in_a_list_1', sub_param2='my_dict_nested_in_a_list_2')], param2=[]), + ), + (dict(param1=[dict(sub_param1='my_dict_nested_in_a_list_1', sub_param2=None)], param2=[]), + dict(param1=[dict(sub_param1='my_dict_nested_in_a_list_1', sub_param2=None)], param2=[]), + dict(param1=[dict(sub_param1='my_dict_nested_in_a_list_1')], param2=[]), + ), + (dict(param1=[dict(sub_param1=[dict(sub_sub_param1=None)], sub_param2=None)], param2=[]), + dict(param1=[dict(sub_param1=[dict(sub_sub_param1=None)], sub_param2=None)], param2=[]), + dict(param1=[dict(sub_param1=[dict()])], param2=[]), + ), + (dict(param1=[dict(sub_param1=[dict(sub_sub_param1=None)], sub_param2=None)], param2=None), + dict(param1=[dict(sub_param1=[dict(sub_sub_param1=None)], sub_param2=None)]), + dict(param1=[dict(sub_param1=[dict()])]), + ), +] + + +@pytest.mark.parametrize("input_params, output_params_no_descend, output_params_descend", scrub_none_test_data) +def test_scrub_none_parameters(input_params, output_params_no_descend, output_params_descend): + assert scrub_none_parameters(input_params) == output_params_descend + assert scrub_none_parameters(input_params, descend_into_lists=False) == output_params_no_descend + assert scrub_none_parameters(input_params, descend_into_lists=True) == output_params_descend diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/inventory/test_aws_ec2.py b/ansible_collections/amazon/aws/tests/unit/plugins/inventory/test_aws_ec2.py new file mode 100644 index 000000000..5386fe6c7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/inventory/test_aws_ec2.py @@ -0,0 +1,514 @@ +# -*- coding: utf-8 -*- + +# Copyright 2017 Sloane Hertel +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import datetime +from unittest.mock import Mock, MagicMock + +from ansible.errors import AnsibleError +from ansible.parsing.dataloader import DataLoader +from ansible_collections.amazon.aws.plugins.inventory.aws_ec2 import InventoryModule, instance_data_filter_to_boto_attr + + +instances = { + 'Instances': [ + {'Monitoring': {'State': 'disabled'}, + 'PublicDnsName': 'ec2-12-345-67-890.compute-1.amazonaws.com', + 'State': {'Code': 16, 'Name': 'running'}, + 'EbsOptimized': False, + 'LaunchTime': datetime.datetime(2017, 10, 31, 12, 59, 25), + 'PublicIpAddress': '12.345.67.890', + 'PrivateIpAddress': '098.76.54.321', + 'ProductCodes': [], + 'VpcId': 'vpc-12345678', + 'StateTransitionReason': '', + 'InstanceId': 'i-00000000000000000', + 'EnaSupport': True, + 'ImageId': 'ami-12345678', + 'PrivateDnsName': 'ip-098-76-54-321.ec2.internal', + 'KeyName': 'testkey', + 'SecurityGroups': [{'GroupName': 'default', 'GroupId': 'sg-12345678'}], + 'ClientToken': '', + 'SubnetId': 'subnet-12345678', + 'InstanceType': 't2.micro', + 'NetworkInterfaces': [ + {'Status': 'in-use', + 'MacAddress': '12:a0:50:42:3d:a4', + 'SourceDestCheck': True, + 'VpcId': 'vpc-12345678', + 'Description': '', + 'NetworkInterfaceId': 'eni-12345678', + 'PrivateIpAddresses': [ + {'PrivateDnsName': 'ip-098-76-54-321.ec2.internal', + 'PrivateIpAddress': '098.76.54.321', + 'Primary': True, + 'Association': + {'PublicIp': '12.345.67.890', + 'PublicDnsName': 'ec2-12-345-67-890.compute-1.amazonaws.com', + 'IpOwnerId': 'amazon'}}], + 'PrivateDnsName': 'ip-098-76-54-321.ec2.internal', + 'Attachment': + {'Status': 'attached', + 'DeviceIndex': 0, + 'DeleteOnTermination': True, + 'AttachmentId': 'eni-attach-12345678', + 'AttachTime': datetime.datetime(2017, 10, 31, 12, 59, 25)}, + 'Groups': [ + {'GroupName': 'default', + 'GroupId': 'sg-12345678'}], + 'Ipv6Addresses': [], + 'OwnerId': '123456789012', + 'PrivateIpAddress': '098.76.54.321', + 'SubnetId': 'subnet-12345678', + 'Association': + {'PublicIp': '12.345.67.890', + 'PublicDnsName': 'ec2-12-345-67-890.compute-1.amazonaws.com', + 'IpOwnerId': 'amazon'}}], + 'SourceDestCheck': True, + 'Placement': + {'Tenancy': 'default', + 'GroupName': '', + 'AvailabilityZone': 'us-east-1c'}, + 'Hypervisor': 'xen', + 'BlockDeviceMappings': [ + {'DeviceName': '/dev/xvda', + 'Ebs': + {'Status': 'attached', + 'DeleteOnTermination': True, + 'VolumeId': 'vol-01234567890000000', + 'AttachTime': datetime.datetime(2017, 10, 31, 12, 59, 26)}}], + 'Architecture': 'x86_64', + 'RootDeviceType': 'ebs', + 'RootDeviceName': '/dev/xvda', + 'VirtualizationType': 'hvm', + 'Tags': [{'Value': 'test', 'Key': 'ansible'}, {'Value': 'aws_ec2', 'Key': 'Name'}], + 'AmiLaunchIndex': 0}], + 'ReservationId': 'r-01234567890000000', + 'Groups': [], + 'OwnerId': '123456789012' +} + + +@pytest.fixture() +def inventory(): + inventory = InventoryModule() + inventory._options = { + "aws_profile": "first_precedence", + "aws_access_key": "test_access_key", + "aws_secret_key": "test_secret_key", + "aws_security_token": "test_security_token", + "iam_role_arn": None, + "use_contrib_script_compatible_ec2_tag_keys": False, + "hostvars_prefix": "", + "hostvars_suffix": "", + "strict": True, + "compose": {}, + "groups": {}, + "keyed_groups": [], + "regions": ["us-east-1"], + "filters": [], + "include_filters": [], + "exclude_filters": [], + "hostnames": [], + "strict_permissions": False, + "allow_duplicated_hosts": False, + "cache": False, + "include_extra_api_calls": False, + "use_contrib_script_compatible_sanitization": False, + } + inventory.inventory = MagicMock() + return inventory + + +def test_compile_values(inventory): + found_value = instances['Instances'][0] + chain_of_keys = instance_data_filter_to_boto_attr['instance.group-id'] + for attr in chain_of_keys: + found_value = inventory._compile_values(found_value, attr) + assert found_value == "sg-12345678" + + +def test_get_boto_attr_chain(inventory): + instance = instances['Instances'][0] + assert inventory._get_boto_attr_chain('network-interface.addresses.private-ip-address', instance) == "098.76.54.321" + + +def test_boto3_conn(inventory): + inventory._options = {"aws_profile": "first_precedence", + "aws_access_key": "test_access_key", + "aws_secret_key": "test_secret_key", + "aws_security_token": "test_security_token", + "iam_role_arn": None} + loader = DataLoader() + inventory._set_credentials(loader) + with pytest.raises(AnsibleError) as error_message: + for _connection, _region in inventory._boto3_conn(regions=['us-east-1']): + assert "Insufficient credentials found" in error_message + + +def testget_all_hostnames_default(inventory): + instance = instances['Instances'][0] + assert inventory.get_all_hostnames(instance, hostnames=None) == ["ec2-12-345-67-890.compute-1.amazonaws.com", "ip-098-76-54-321.ec2.internal"] + + +def testget_all_hostnames(inventory): + hostnames = ['ip-address', 'dns-name'] + instance = instances['Instances'][0] + assert inventory.get_all_hostnames(instance, hostnames) == ["12.345.67.890", "ec2-12-345-67-890.compute-1.amazonaws.com"] + + +def testget_all_hostnames_dict(inventory): + hostnames = [{'name': 'private-ip-address', 'separator': '_', 'prefix': 'tag:Name'}] + instance = instances['Instances'][0] + assert inventory.get_all_hostnames(instance, hostnames) == ["aws_ec2_098.76.54.321"] + + +def testget_all_hostnames_with_2_tags(inventory): + hostnames = ['tag:ansible', 'tag:Name'] + instance = instances['Instances'][0] + assert inventory.get_all_hostnames(instance, hostnames) == ["test", "aws_ec2"] + + +def test_get_preferred_hostname_default(inventory): + instance = instances['Instances'][0] + assert inventory._get_preferred_hostname(instance, hostnames=None) == "ec2-12-345-67-890.compute-1.amazonaws.com" + + +def test_get_preferred_hostname(inventory): + hostnames = ['ip-address', 'dns-name'] + instance = instances['Instances'][0] + assert inventory._get_preferred_hostname(instance, hostnames) == "12.345.67.890" + + +def test_get_preferred_hostname_dict(inventory): + hostnames = [{'name': 'private-ip-address', 'separator': '_', 'prefix': 'tag:Name'}] + instance = instances['Instances'][0] + assert inventory._get_preferred_hostname(instance, hostnames) == "aws_ec2_098.76.54.321" + + +def test_get_preferred_hostname_with_2_tags(inventory): + hostnames = ['tag:ansible', 'tag:Name'] + instance = instances['Instances'][0] + assert inventory._get_preferred_hostname(instance, hostnames) == "test" + + +def test_set_credentials(inventory): + inventory._options = {'aws_access_key': 'test_access_key', + 'aws_secret_key': 'test_secret_key', + 'aws_security_token': 'test_security_token', + 'aws_profile': 'test_profile', + 'iam_role_arn': 'arn:aws:iam::123456789012:role/test-role'} + loader = DataLoader() + inventory._set_credentials(loader) + + assert inventory.boto_profile == "test_profile" + assert inventory.aws_access_key_id == "test_access_key" + assert inventory.aws_secret_access_key == "test_secret_key" + assert inventory.aws_security_token == "test_security_token" + assert inventory.iam_role_arn == "arn:aws:iam::123456789012:role/test-role" + + +def test_insufficient_credentials(inventory): + inventory._options = { + 'aws_access_key': None, + 'aws_secret_key': None, + 'aws_security_token': None, + 'aws_profile': None, + 'iam_role_arn': None + } + with pytest.raises(AnsibleError) as error_message: + loader = DataLoader() + inventory._set_credentials(loader) + assert "Insufficient credentials found" in error_message + + +def test_verify_file_bad_config(inventory): + assert inventory.verify_file('not_aws_config.yml') is False + + +def test_include_filters_with_no_filter(inventory): + inventory._options = { + 'filters': {}, + 'include_filters': [], + } + print(inventory.build_include_filters()) + assert inventory.build_include_filters() == [{}] + + +def test_include_filters_with_include_filters_only(inventory): + inventory._options = { + 'filters': {}, + 'include_filters': [{"foo": "bar"}], + } + assert inventory.build_include_filters() == [{"foo": "bar"}] + + +def test_include_filters_with_filter_and_include_filters(inventory): + inventory._options = { + 'filters': {"from_filter": 1}, + 'include_filters': [{"from_include_filter": "bar"}], + } + print(inventory.build_include_filters()) + assert inventory.build_include_filters() == [ + {"from_filter": 1}, + {"from_include_filter": "bar"}] + + +def test_add_host_empty_hostnames(inventory): + hosts = [ + { + "Placement": { + "AvailabilityZone": "us-east-1a", + }, + "PublicDnsName": "ip-10-85-0-4.ec2.internal" + }, + ] + inventory._add_hosts(hosts, "aws_ec2", []) + inventory.inventory.add_host.assert_called_with("ip-10-85-0-4.ec2.internal", group="aws_ec2") + + +def test_add_host_with_hostnames_no_criteria(inventory): + hosts = [{}] + + inventory._add_hosts( + hosts, "aws_ec2", hostnames=["tag:Name", "private-dns-name", "dns-name"] + ) + assert inventory.inventory.add_host.call_count == 0 + + +def test_add_host_with_hostnames_and_one_criteria(inventory): + hosts = [ + { + "Placement": { + "AvailabilityZone": "us-east-1a", + }, + "PublicDnsName": "sample-host", + } + ] + + inventory._add_hosts( + hosts, "aws_ec2", hostnames=["tag:Name", "private-dns-name", "dns-name"] + ) + assert inventory.inventory.add_host.call_count == 1 + inventory.inventory.add_host.assert_called_with("sample-host", group="aws_ec2") + + +def test_add_host_with_hostnames_and_two_matching_criteria(inventory): + hosts = [ + { + "Placement": { + "AvailabilityZone": "us-east-1a", + }, + "PublicDnsName": "name-from-PublicDnsName", + "Tags": [{"Value": "name-from-tag-Name", "Key": "Name"}], + } + ] + + inventory._add_hosts( + hosts, "aws_ec2", hostnames=["tag:Name", "private-dns-name", "dns-name"] + ) + assert inventory.inventory.add_host.call_count == 1 + inventory.inventory.add_host.assert_called_with( + "name-from-tag-Name", group="aws_ec2" + ) + + +def test_add_host_with_hostnames_and_two_matching_criteria_and_allow_duplicated_hosts( + inventory, +): + hosts = [ + { + "Placement": { + "AvailabilityZone": "us-east-1a", + }, + "PublicDnsName": "name-from-PublicDnsName", + "Tags": [{"Value": "name-from-tag-Name", "Key": "Name"}], + } + ] + + inventory._add_hosts( + hosts, + "aws_ec2", + hostnames=["tag:Name", "private-dns-name", "dns-name"], + allow_duplicated_hosts=True, + ) + assert inventory.inventory.add_host.call_count == 2 + inventory.inventory.add_host.assert_any_call( + "name-from-PublicDnsName", group="aws_ec2" + ) + inventory.inventory.add_host.assert_any_call("name-from-tag-Name", group="aws_ec2") + + +def test_sanitize_hostname(inventory): + assert inventory._sanitize_hostname(1) == "1" + assert inventory._sanitize_hostname("a:b") == "a_b" + assert inventory._sanitize_hostname("a:/b") == "a__b" + assert inventory._sanitize_hostname("example") == "example" + + +def test_sanitize_hostname_legacy(inventory): + inventory._sanitize_group_name = ( + inventory._legacy_script_compatible_group_sanitization + ) + assert inventory._sanitize_hostname("a:/b") == "a__b" + + +@pytest.mark.parametrize( + "hostvars_prefix,hostvars_suffix,use_contrib_script_compatible_ec2_tag_keys,expectation", + [ + ( + None, + None, + False, + { + "my_var": 1, + "placement": {"availability_zone": "us-east-1a", "region": "us-east-1"}, + "tags": {"Name": "my-name"}, + }, + ), + ( + "pre", + "post", + False, + { + "premy_varpost": 1, + "preplacementpost": { + "availability_zone": "us-east-1a", + "region": "us-east-1", + }, + "pretagspost": {"Name": "my-name"}, + }, + ), + ( + None, + None, + True, + { + "my_var": 1, + "ec2_tag_Name": "my-name", + "placement": {"availability_zone": "us-east-1a", "region": "us-east-1"}, + "tags": {"Name": "my-name"}, + }, + ), + ], +) +def test_prepare_host_vars( + inventory, + hostvars_prefix, + hostvars_suffix, + use_contrib_script_compatible_ec2_tag_keys, + expectation, +): + original_host_vars = { + "my_var": 1, + "placement": {"availability_zone": "us-east-1a"}, + "Tags": [{"Key": "Name", "Value": "my-name"}], + } + assert ( + inventory.prepare_host_vars( + original_host_vars, + hostvars_prefix, + hostvars_suffix, + use_contrib_script_compatible_ec2_tag_keys, + ) + == expectation + ) + + +def test_iter_entry(inventory): + hosts = [ + { + "Placement": { + "AvailabilityZone": "us-east-1a", + }, + "PublicDnsName": "first-host://", + }, + { + "Placement": { + "AvailabilityZone": "us-east-1a", + }, + "PublicDnsName": "second-host", + "Tags": [{"Key": "Name", "Value": "my-name"}], + }, + ] + + entries = list(inventory.iter_entry(hosts, hostnames=[])) + assert len(entries) == 2 + assert entries[0][0] == "first_host___" + assert entries[1][0] == "second-host" + assert entries[1][1]["tags"]["Name"] == "my-name" + + entries = list( + inventory.iter_entry( + hosts, + hostnames=[], + hostvars_prefix="a_", + hostvars_suffix="_b", + use_contrib_script_compatible_ec2_tag_keys=True, + ) + ) + assert len(entries) == 2 + assert entries[0][0] == "first_host___" + assert entries[1][1]["a_tags_b"]["Name"] == "my-name" + + +def test_query_empty(inventory): + result = inventory._query("us-east-1", [], [], strict_permissions=True) + assert result == {"aws_ec2": []} + + +instance_foobar = {"InstanceId": "foobar"} +instance_barfoo = {"InstanceId": "barfoo"} + + +def test_query_empty_include_only(inventory): + inventory._get_instances_by_region = Mock(side_effect=[[instance_foobar]]) + result = inventory._query("us-east-1", [{"tag:Name": ["foobar"]}], [], strict_permissions=True) + assert result == {"aws_ec2": [instance_foobar]} + + +def test_query_empty_include_ordered(inventory): + inventory._get_instances_by_region = Mock(side_effect=[[instance_foobar], [instance_barfoo]]) + result = inventory._query("us-east-1", [{"tag:Name": ["foobar"]}, {"tag:Name": ["barfoo"]}], [], strict_permissions=True) + assert result == {"aws_ec2": [instance_barfoo, instance_foobar]} + inventory._get_instances_by_region.assert_called_with('us-east-1', [{'Name': 'tag:Name', 'Values': ['barfoo']}], True) + + +def test_query_empty_include_exclude(inventory): + inventory._get_instances_by_region = Mock(side_effect=[[instance_foobar], [instance_foobar]]) + result = inventory._query("us-east-1", [{"tag:Name": ["foobar"]}], [{"tag:Name": ["foobar"]}], strict_permissions=True) + assert result == {"aws_ec2": []} + + +def test_include_extra_api_calls_deprecated(inventory): + inventory.display.deprecate = Mock() + inventory._read_config_data = Mock() + inventory._set_credentials = Mock() + inventory._query = Mock(return_value=[]) + + inventory.parse(inventory=[], loader=None, path=None) + assert inventory.display.deprecate.call_count == 0 + + inventory._options["include_extra_api_calls"] = True + inventory.parse(inventory=[], loader=None, path=None) + assert inventory.display.deprecate.call_count == 1 diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/conftest.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/conftest.py new file mode 100644 index 000000000..a7d1e0475 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/conftest.py @@ -0,0 +1,31 @@ +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +import pytest + +from ansible.module_utils.six import string_types +from ansible.module_utils._text import to_bytes +from ansible.module_utils.common._collections_compat import MutableMapping + + +@pytest.fixture +def patch_ansible_module(request, mocker): + if isinstance(request.param, string_types): + args = request.param + elif isinstance(request.param, MutableMapping): + if 'ANSIBLE_MODULE_ARGS' not in request.param: + request.param = {'ANSIBLE_MODULE_ARGS': request.param} + if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']: + request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp' + if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']: + request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False + args = json.dumps(request.param) + else: + raise Exception('Malformed data to the patch_ansible_module pytest fixture') + + mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args)) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/test_build_run_instance_spec.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/test_build_run_instance_spec.py new file mode 100644 index 000000000..e889b676a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/test_build_run_instance_spec.py @@ -0,0 +1,126 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.amazon.aws.tests.unit.compat.mock import sentinel +import ansible_collections.amazon.aws.plugins.modules.ec2_instance as ec2_instance_module + + +@pytest.fixture +def params_object(): + params = { + 'iam_instance_profile': None, + 'exact_count': None, + 'count': None, + 'launch_template': None, + 'instance_type': None, + } + return params + + +@pytest.fixture +def ec2_instance(monkeypatch): + # monkey patches various ec2_instance module functions, we'll separately test the operation of + # these functions, we just care that it's passing the results into the right place in the + # instance spec. + monkeypatch.setattr(ec2_instance_module, 'build_top_level_options', lambda params: {'TOP_LEVEL_OPTIONS': sentinel.TOP_LEVEL}) + monkeypatch.setattr(ec2_instance_module, 'build_network_spec', lambda params: sentinel.NETWORK_SPEC) + monkeypatch.setattr(ec2_instance_module, 'build_volume_spec', lambda params: sentinel.VOlUME_SPEC) + monkeypatch.setattr(ec2_instance_module, 'build_instance_tags', lambda params: sentinel.TAG_SPEC) + monkeypatch.setattr(ec2_instance_module, 'determine_iam_role', lambda params: sentinel.IAM_PROFILE_ARN) + return ec2_instance_module + + +def _assert_defaults(instance_spec, to_skip=None): + if not to_skip: + to_skip = [] + + assert isinstance(instance_spec, dict) + + if 'TagSpecifications' not in to_skip: + assert 'TagSpecifications' in instance_spec + assert instance_spec['TagSpecifications'] is sentinel.TAG_SPEC + + if 'NetworkInterfaces' not in to_skip: + assert 'NetworkInterfaces' in instance_spec + assert instance_spec['NetworkInterfaces'] is sentinel.NETWORK_SPEC + + if 'BlockDeviceMappings' not in to_skip: + assert 'BlockDeviceMappings' in instance_spec + assert instance_spec['BlockDeviceMappings'] is sentinel.VOlUME_SPEC + + if 'IamInstanceProfile' not in to_skip: + # By default, this shouldn't be returned + assert 'IamInstanceProfile' not in instance_spec + + if 'MinCount' not in to_skip: + assert 'MinCount' in instance_spec + instance_spec['MinCount'] == 1 + + if 'MaxCount' not in to_skip: + assert 'MaxCount' in instance_spec + instance_spec['MaxCount'] == 1 + + if 'TOP_LEVEL_OPTIONS' not in to_skip: + assert 'TOP_LEVEL_OPTIONS' in instance_spec + assert instance_spec['TOP_LEVEL_OPTIONS'] is sentinel.TOP_LEVEL + + +def test_build_run_instance_spec_defaults(params_object, ec2_instance): + instance_spec = ec2_instance.build_run_instance_spec(params_object) + _assert_defaults(instance_spec) + + +def test_build_run_instance_spec_tagging(params_object, ec2_instance, monkeypatch): + # build_instance_tags can return None, RunInstance doesn't like this + monkeypatch.setattr(ec2_instance_module, 'build_instance_tags', lambda params: None) + instance_spec = ec2_instance.build_run_instance_spec(params_object) + _assert_defaults(instance_spec, ['TagSpecifications']) + assert 'TagSpecifications' not in instance_spec + + # if someone *explicitly* passes {} (rather than not setting it), then [] can be returned + monkeypatch.setattr(ec2_instance_module, 'build_instance_tags', lambda params: []) + instance_spec = ec2_instance.build_run_instance_spec(params_object) + _assert_defaults(instance_spec, ['TagSpecifications']) + assert 'TagSpecifications' in instance_spec + assert instance_spec['TagSpecifications'] == [] + + +def test_build_run_instance_spec_instance_profile(params_object, ec2_instance): + params_object['iam_instance_profile'] = sentinel.INSTANCE_PROFILE_NAME + instance_spec = ec2_instance.build_run_instance_spec(params_object) + _assert_defaults(instance_spec, ['IamInstanceProfile']) + assert 'IamInstanceProfile' in instance_spec + assert instance_spec['IamInstanceProfile'] == {'Arn': sentinel.IAM_PROFILE_ARN} + + +def test_build_run_instance_spec_count(params_object, ec2_instance): + # When someone passes 'count', that number of instances will be *launched* + params_object['count'] = sentinel.COUNT + instance_spec = ec2_instance.build_run_instance_spec(params_object) + _assert_defaults(instance_spec, ['MaxCount', 'MinCount']) + assert 'MaxCount' in instance_spec + assert 'MinCount' in instance_spec + assert instance_spec['MaxCount'] == sentinel.COUNT + assert instance_spec['MinCount'] == sentinel.COUNT + + +def test_build_run_instance_spec_exact_count(params_object, ec2_instance): + # The "exact_count" logic relies on enforce_count doing the math to figure out how many + # instances to start/stop. The enforce_count call is responsible for ensuring that 'to_launch' + # is set and is a positive integer. + params_object['exact_count'] = sentinel.EXACT_COUNT + params_object['to_launch'] = sentinel.TO_LAUNCH + instance_spec = ec2_instance.build_run_instance_spec(params_object) + + _assert_defaults(instance_spec, ['MaxCount', 'MinCount']) + assert 'MaxCount' in instance_spec + assert 'MinCount' in instance_spec + assert instance_spec['MaxCount'] == sentinel.TO_LAUNCH + assert instance_spec['MinCount'] == sentinel.TO_LAUNCH diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py new file mode 100644 index 000000000..cdde74c97 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py @@ -0,0 +1,102 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +from ansible_collections.amazon.aws.tests.unit.compat.mock import MagicMock +from ansible_collections.amazon.aws.tests.unit.compat.mock import sentinel +import ansible_collections.amazon.aws.plugins.modules.ec2_instance as ec2_instance_module +import ansible_collections.amazon.aws.plugins.module_utils.arn as utils_arn +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 + +try: + import botocore +except ImportError: + pass + +pytest.mark.skipif(not HAS_BOTO3, reason="test_determine_iam_role.py requires the python modules 'boto3' and 'botocore'") + + +def _client_error(code='GenericError'): + return botocore.exceptions.ClientError( + {'Error': {'Code': code, 'Message': 'Something went wrong'}, + 'ResponseMetadata': {'RequestId': '01234567-89ab-cdef-0123-456789abcdef'}}, + 'some_called_method') + + +@pytest.fixture +def params_object(): + params = { + 'instance_role': None, + 'exact_count': None, + 'count': None, + 'launch_template': None, + 'instance_type': None, + } + return params + + +class FailJsonException(Exception): + def __init__(self): + pass + + +@pytest.fixture +def ec2_instance(monkeypatch): + monkeypatch.setattr(ec2_instance_module, 'parse_aws_arn', lambda arn: None) + monkeypatch.setattr(ec2_instance_module, 'module', MagicMock()) + ec2_instance_module.module.fail_json.side_effect = FailJsonException() + ec2_instance_module.module.fail_json_aws.side_effect = FailJsonException() + return ec2_instance_module + + +def test_determine_iam_role_arn(params_object, ec2_instance, monkeypatch): + # Revert the default monkey patch to make it simple to try passing a valid ARNs + monkeypatch.setattr(ec2_instance, 'parse_aws_arn', utils_arn.parse_aws_arn) + + # Simplest example, someone passes a valid instance profile ARN + arn = ec2_instance.determine_iam_role('arn:aws:iam::123456789012:instance-profile/myprofile') + assert arn == 'arn:aws:iam::123456789012:instance-profile/myprofile' + + +def test_determine_iam_role_name(params_object, ec2_instance): + profile_description = {'InstanceProfile': {'Arn': sentinel.IAM_PROFILE_ARN}} + iam_client = MagicMock(**{"get_instance_profile.return_value": profile_description}) + ec2_instance_module.module.client.return_value = iam_client + + arn = ec2_instance.determine_iam_role(sentinel.IAM_PROFILE_NAME) + assert arn == sentinel.IAM_PROFILE_ARN + + +def test_determine_iam_role_missing(params_object, ec2_instance): + missing_exception = _client_error('NoSuchEntity') + iam_client = MagicMock(**{"get_instance_profile.side_effect": missing_exception}) + ec2_instance_module.module.client.return_value = iam_client + + with pytest.raises(FailJsonException) as exception: + arn = ec2_instance.determine_iam_role(sentinel.IAM_PROFILE_NAME) + + assert ec2_instance_module.module.fail_json_aws.call_count == 1 + assert ec2_instance_module.module.fail_json_aws.call_args.args[0] is missing_exception + assert 'Could not find' in ec2_instance_module.module.fail_json_aws.call_args.kwargs['msg'] + + +@pytest.mark.skipif(sys.version_info < (3, 8), reason='call_args behaviour changed in Python 3.8') +def test_determine_iam_role_missing(params_object, ec2_instance): + missing_exception = _client_error() + iam_client = MagicMock(**{"get_instance_profile.side_effect": missing_exception}) + ec2_instance_module.module.client.return_value = iam_client + + with pytest.raises(FailJsonException) as exception: + arn = ec2_instance.determine_iam_role(sentinel.IAM_PROFILE_NAME) + + assert ec2_instance_module.module.fail_json_aws.call_count == 1 + assert ec2_instance_module.module.fail_json_aws.call_args.args[0] is missing_exception + assert 'An error occurred while searching' in ec2_instance_module.module.fail_json_aws.call_args.kwargs['msg'] + assert 'Please try supplying the full ARN' in ec2_instance_module.module.fail_json_aws.call_args.kwargs['msg'] diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/a.pem b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/a.pem new file mode 100644 index 000000000..4412f3258 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/a.pem @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIFVTCCBD2gAwIBAgISAx4pnfwvGxYrrQhr/UXiN7HCMA0GCSqGSIb3DQEBCwUA +MEoxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MSMwIQYDVQQD +ExpMZXQncyBFbmNyeXB0IEF1dGhvcml0eSBYMzAeFw0xOTA3MjUwMDI4NTdaFw0x +OTEwMjMwMDI4NTdaMBoxGDAWBgNVBAMTD2NyeXB0b2dyYXBoeS5pbzCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAKJDpCL99DVo83587MrVp6gunmKRoUfY +vcgk5u2v0tB9OmZkcIY37z6AunHWr18Yj55zHmm6G8Nf35hmu3ql2A26WThCbmOe +WXbxhgarkningZI9opUWnI2dIllguVIsq99GzhpNnDdCb26s5+SRhJI4cr4hYaKC +XGDKooKWyXUX09SJTq7nW/1+pq3y9ZMvldRKjJALeAdwnC7kmUB6pK7q8J2VlpfQ +wqGu6q/WHVdgnhWARw3GEFJWDn9wkxBAF08CpzhVaEj+iK+Ut/1HBgNYwqI47h7S +q+qv0G2qklRVUtEM0zYRsp+y/6vivdbFLlPw8VaerbpJN3gLtpVNcGECAwEAAaOC +AmMwggJfMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB +BQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUjbe0bE1aZ8HiqtwqUfCe15bF +V8UwHwYDVR0jBBgwFoAUqEpqYwR93brm0Tm3pkVl7/Oo7KEwbwYIKwYBBQUHAQEE +YzBhMC4GCCsGAQUFBzABhiJodHRwOi8vb2NzcC5pbnQteDMubGV0c2VuY3J5cHQu +b3JnMC8GCCsGAQUFBzAChiNodHRwOi8vY2VydC5pbnQteDMubGV0c2VuY3J5cHQu +b3JnLzAaBgNVHREEEzARgg9jcnlwdG9ncmFwaHkuaW8wTAYDVR0gBEUwQzAIBgZn +gQwBAgEwNwYLKwYBBAGC3xMBAQEwKDAmBggrBgEFBQcCARYaaHR0cDovL2Nwcy5s +ZXRzZW5jcnlwdC5vcmcwggEDBgorBgEEAdZ5AgQCBIH0BIHxAO8AdgB0ftqDMa0z +EJEhnM4lT0Jwwr/9XkIgCMY3NXnmEHvMVgAAAWwmvtnXAAAEAwBHMEUCIFXHYX/E +xtbYCvjjQ3dN0HOLW1d8+aduktmax4mu3KszAiEAvTpxuSVVXJnVGA4tU2GOnI60 +sqTh/IK6hvrFN1k1HBUAdQApPFGWVMg5ZbqqUPxYB9S3b79Yeily3KTDDPTlRUf0 +eAAAAWwmvtm9AAAEAwBGMEQCIDn7sgzD+7JzR+XTvjKf7VyLWwX37O8uwCfCTKo7 ++tEhAiB05bHiICU5wkfRBrwcvqXf4bPF7NT5LVlRQYzJ/hbpvzANBgkqhkiG9w0B +AQsFAAOCAQEAcMU8E6D+5WC07QSeTppRTboC++7YgQg5NiSWm7OE2FlyiRZXnu0Y +uBoaqAkZIqj7dom9wy1c1UauxOfM9lUZKhYnDTBu9tIhBAvCS0J0avv1j1KQygQ1 +qV+urJsunUwqV/vPWo1GfWophvyXVN6MAycv34ZXZvAjtG7oDcoQVLLvK1SIo2vu +4/dNkOQzaeZez8q6Ij9762TbBWaK5C789VMdUWZCADWoToPIK533cWbDEp4IhBU/ +K73d7lGGl7S59SjT2V/XE6eJS9Zlj0M+A8pf/8tjM/ImHAjlOHB02sM/VfZ7HAuZ +61TPxohL+e+X1FYeqIXYGXJmCEuB8WEmBg== +-----END CERTIFICATE----- diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/b.pem b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/b.pem new file mode 100644 index 000000000..2be4bca53 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/b.pem @@ -0,0 +1,47 @@ +-----BEGIN CERTIFICATE----- +MIIIUjCCB/egAwIBAgIRALiJR3zQjp0MevT/Hk89sfAwCgYIKoZIzj0EAwIwgZIx +CzAJBgNVBAYTAkdCMRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAOBgNV +BAcTB1NhbGZvcmQxGjAYBgNVBAoTEUNPTU9ETyBDQSBMaW1pdGVkMTgwNgYDVQQD +Ey9DT01PRE8gRUNDIERvbWFpbiBWYWxpZGF0aW9uIFNlY3VyZSBTZXJ2ZXIgQ0Eg +MjAeFw0xOTA3MzEwMDAwMDBaFw0yMDAyMDYyMzU5NTlaMGwxITAfBgNVBAsTGERv +bWFpbiBDb250cm9sIFZhbGlkYXRlZDEhMB8GA1UECxMYUG9zaXRpdmVTU0wgTXVs +dGktRG9tYWluMSQwIgYDVQQDExtzc2wzODczMzcuY2xvdWRmbGFyZXNzbC5jb20w +WTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAARPFdjdnBIJRPnHCPsCBJ/MmPytXnZX +KV6lD2bbG5EVNuUQln4Na8heCY+sfpV+SPuuiNzZxgDA46GvyzdRYFhxo4IGUTCC +Bk0wHwYDVR0jBBgwFoAUQAlhZ/C8g3FP3hIILG/U1Ct2PZYwHQYDVR0OBBYEFGLh +bHk1KAYIRfVwXA3L+yDf0CxjMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8EAjAA +MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjBPBgNVHSAESDBGMDoGCysG +AQQBsjEBAgIHMCswKQYIKwYBBQUHAgEWHWh0dHBzOi8vc2VjdXJlLmNvbW9kby5j +b20vQ1BTMAgGBmeBDAECATBWBgNVHR8ETzBNMEugSaBHhkVodHRwOi8vY3JsLmNv +bW9kb2NhNC5jb20vQ09NT0RPRUNDRG9tYWluVmFsaWRhdGlvblNlY3VyZVNlcnZl +ckNBMi5jcmwwgYgGCCsGAQUFBwEBBHwwejBRBggrBgEFBQcwAoZFaHR0cDovL2Ny +dC5jb21vZG9jYTQuY29tL0NPTU9ET0VDQ0RvbWFpblZhbGlkYXRpb25TZWN1cmVT +ZXJ2ZXJDQTIuY3J0MCUGCCsGAQUFBzABhhlodHRwOi8vb2NzcC5jb21vZG9jYTQu +Y29tMIIDkAYDVR0RBIIDhzCCA4OCG3NzbDM4NzMzNy5jbG91ZGZsYXJlc3NsLmNv +bYIMKi5hanJ0Y3QuY29tghMqLmFrcmVwYnVyY3UuZ2VuLnRyghUqLmFuZHJlYXNr +YW5lbGxvcy5jb22CDSouYW5zaWJsZS5jb22CGSouYXJ0b2Z0b3VjaC1raW5nd29v +ZC5jb22CFyouYm91bGRlcnN3YXRlcmhvbGUuY29tghcqLmJyb2Nrc3RlY2hzdXBw +b3J0LmNvbYIQKi5idXJjbGFyLndlYi50coIcKi5ob3Blc29uZ2ZyZW5jaGJ1bGxk +b2dzLm5ldIIMKi5odXJyZW0uY29tghAqLmh5dmVsaWNvbnMuY29tghAqLmthcm1h +Zml0LmNvLnVrghUqLmxvd3J5c3lzdGVtc2luYy5jb22CDioubWFuaWNydW4uY29t +ghUqLm11dHVvZmluYW5jaWVyYS5jb22CDyoucGlsZ3JpbWFnZS5waIINKi5wa2dh +bWVzLm9yZ4IbKi5ybHBjb25zdWx0aW5nc2VydmljZXMuY29tghYqLnJ1eWF0YWJp +cmxlcmkuZ2VuLnRyghQqLnJ5YW5hcHBoeXNpY3NjLmNvbYIVKi53ZWFyaXRiYWNr +d2FyZHMub3Jngg8qLnlldGlzbmFjay5jb22CCmFqcnRjdC5jb22CEWFrcmVwYnVy +Y3UuZ2VuLnRyghNhbmRyZWFza2FuZWxsb3MuY29tggthbnNpYmxlLmNvbYIXYXJ0 +b2Z0b3VjaC1raW5nd29vZC5jb22CFWJvdWxkZXJzd2F0ZXJob2xlLmNvbYIVYnJv +Y2tzdGVjaHN1cHBvcnQuY29tgg5idXJjbGFyLndlYi50coIaaG9wZXNvbmdmcmVu +Y2hidWxsZG9ncy5uZXSCCmh1cnJlbS5jb22CDmh5dmVsaWNvbnMuY29tgg5rYXJt +YWZpdC5jby51a4ITbG93cnlzeXN0ZW1zaW5jLmNvbYIMbWFuaWNydW4uY29tghNt +dXR1b2ZpbmFuY2llcmEuY29tgg1waWxncmltYWdlLnBoggtwa2dhbWVzLm9yZ4IZ +cmxwY29uc3VsdGluZ3NlcnZpY2VzLmNvbYIUcnV5YXRhYmlybGVyaS5nZW4udHKC +EnJ5YW5hcHBoeXNpY3NjLmNvbYITd2Vhcml0YmFja3dhcmRzLm9yZ4INeWV0aXNu +YWNrLmNvbTCCAQQGCisGAQQB1nkCBAIEgfUEgfIA8AB2ALIeBcyLos2KIE6HZvkr +uYolIGdr2vpw57JJUy3vi5BeAAABbEVw8SgAAAQDAEcwRQIgE2YeTfb/d4BBUwpZ +ihWXSR+vRyNNUg8GlOak2MFMHv0CIQCLBvtU401m5/Psg9KirQZs321BSxgUKgSQ +m9M691d3eQB2AF6nc/nfVsDntTZIfdBJ4DJ6kZoMhKESEoQYdZaBcUVYAAABbEVw +8VgAAAQDAEcwRQIgGYsGfr3/mekjzMS9+ALAjx1ryfIfhXB/+UghTcw4Y8ICIQDS +K2L18WX3+Oh4TjJhjh5tV1iYyZVYivcwwbr7mtmOqjAKBggqhkjOPQQDAgNJADBG +AiEAjNt7LF78GV7snky9jwFcBsLH55ndzduvsrkJ7Ne1SgYCIQDsMJsTr9VP6kar +4Kv8V9zNBmpGrGNuE7A1GixBvzNaHA== +-----END CERTIFICATE----- diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.0.cert b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.0.cert new file mode 100644 index 000000000..6997766ac --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.0.cert @@ -0,0 +1,121 @@ +subject=/C=AU/ST=Victoria/L=Melbourne/O=Telstra Corporation Limited/OU=Telstra Energy/CN=dev.energy.inside.telstra.com +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3 +-----BEGIN CERTIFICATE----- +MIIIHTCCBgWgAwIBAgIUCqrrzSfjzaoyB3DOxst2kMxFp/MwDQYJKoZIhvcNAQEL +BQAwTTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxIzAh +BgNVBAMTGlF1b1ZhZGlzIEdsb2JhbCBTU0wgSUNBIEczMB4XDTE5MDgyMTIyMjIy +OFoXDTIxMDgyMTIyMzIwMFowgZsxCzAJBgNVBAYTAkFVMREwDwYDVQQIDAhWaWN0 +b3JpYTESMBAGA1UEBwwJTWVsYm91cm5lMSQwIgYDVQQKDBtUZWxzdHJhIENvcnBv +cmF0aW9uIExpbWl0ZWQxFzAVBgNVBAsMDlRlbHN0cmEgRW5lcmd5MSYwJAYDVQQD +DB1kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAMPAPH2y206qios2NMzlCNJv1mrwC1/8tH2HOqJGiYZB +O7QOBRSvJsV++IozCB8ap99e8B64OOAQPOyykrdXd2axhftmMb1SFMF56eukHSuz +KhKWRUgHs0UFRU51lDcBcOvphwJ+5SOgqrqKFFFBgJ0ZpcP54JpFwKIdh3ac10x2 +mBaW5ccqdv5X9oEMu1D/yivBmy34tsbLYyfttCjP76iVT7UVYHjHWynnIhsEyMsU +gdM90NzrTlrvTSi/EcCD1W3+8b0f+G1TI5rhHbKwR0n/mv5QLFm7EABoYPhxS8bX +B+9tE67yb0RyWbgvUiHySRynQLNMRpRx8Y9bA8uC8n8CAwEAAaOCA6QwggOgMAkG +A1UdEwQCMAAwHwYDVR0jBBgwFoAUsxKJtalLNbwVAPCA6dh4h/ETfHYwcwYIKwYB +BQUHAQEEZzBlMDcGCCsGAQUFBzAChitodHRwOi8vdHJ1c3QucXVvdmFkaXNnbG9i +YWwuY29tL3F2c3NsZzMuY3J0MCoGCCsGAQUFBzABhh5odHRwOi8vb2NzcC5xdW92 +YWRpc2dsb2JhbC5jb20wgZ8GA1UdEQSBlzCBlIIdZGV2LmVuZXJneS5pbnNpZGUu +dGVsc3RyYS5jb22CJXJlcG9ydHMuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5j +b22CJ2dyZWVuc3luYy5kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbYIjbmdv +c3MuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5jb20wUQYDVR0gBEowSDBGBgwr +BgEEAb5YAAJkAQEwNjA0BggrBgEFBQcCARYoaHR0cDovL3d3dy5xdW92YWRpc2ds +b2JhbC5jb20vcmVwb3NpdG9yeTAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH +AwEwOgYDVR0fBDMwMTAvoC2gK4YpaHR0cDovL2NybC5xdW92YWRpc2dsb2JhbC5j +b20vcXZzc2xnMy5jcmwwHQYDVR0OBBYEFEoJQRpPC/V5ZK3mMkszZE2v6vh+MA4G +A1UdDwEB/wQEAwIFoDCCAXwGCisGAQQB1nkCBAIEggFsBIIBaAFmAHUAVhQGmi/X +wuzT9eG9RLI+x0Z2ubyZEVzA75SYVdaJ0N0AAAFstk9Y+gAABAMARjBEAiBFMZa6 +O9iXVjy2kqQa54vgNFdU7shgFJJhm//fSAQZUAIgBIL/yPdh+XiuQS2xPhCzNYkh +bxf7BbN4qUISESgiZpsAdgBvU3asMfAxGdiZAKRRFf93FRwR2QLBACkGjbIImjfZ +EwAAAWy2T1nKAAAEAwBHMEUCIG0tp63jLsDsfCTDlcvV5ItjRkbUJBnkxlPdP2PH +88sTAiEApgaPofVdn2hdI12iDDex72ta+9wpwQ1MxoaJn2nt+qEAdQDuS723dc5g +uuFCaR+r4Z5mow9+X7By2IMAxHuJeqj9ywAAAWy2T1iJAAAEAwBGMEQCIE/mzEFp +CJUc71jvwJa4Px86R3ZYK4mHmUlQAUZqd0ZkAiBdEmT8xxTuleSUlYHEkKCK/FZX +L+vsYJpPrA9TsO5IsTANBgkqhkiG9w0BAQsFAAOCAgEApE9WLz3S8tqA9Dk3r9LF +rJy8km9cBt1O9SQZwFsduGKGdF3Fd+/Y0V7UrFDzrX+NIzqcmgBHKxaIXorMBF70 +ajMaaROP2ymkpEXnruEwoR47fbW+JRAWDRm2xnouQveQX9ZcgCLbBvAWBqpndQj2 +DGmLJhNz5GlFBjh3PQZlU1w8hU7TrDxa7M1GMtVnk8X+o3l/MX9iPeEs+PiC4dHD +hpj84RY1VQJz8+10rql47SB5YgbwcqaizTG4ax/OAv1JHNWtfAodIMX8Y8X00zoz +A20LQv880jCCNANVNbrXJ3h4X3xwW/C1X9vYk0shymZJbT5u17JbPD1cy39bA7kT +F4L7scdQRxvcqazYN4/IdgvgMji9OltiYufP88Ti8KB2tcl2accpiC5St/zllGD1 +hqEeYLMzjyvUKR/1uvURQQtc0DPvBRmvkB+aI4g+sLkTTFWj5bsA1vKU8SDCyMuB +RQV11DId5+RNNCmWnskORUZJQssvY49pnfCxCES2nt3l/XzTzVtLYmd6G9uAqVac +e2ibnmDrFVlmlyRsCiMfZl5/OTJzt7Cj3az59m5Syfw/lnS9YP82t/r/ufuKkO5Q +q5a9aI8DuNNmAjR4lpIJNqIpX/y+dG2aGmx4XTc31MR9szWtiTgOHe0MkMupOAL0 +qkHrBgwo1zjuTMf3QOg6Z5Q= +-----END CERTIFICATE----- + +subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3 +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +-----BEGIN CERTIFICATE----- +MIIGFzCCA/+gAwIBAgIUftbnnMmtgcTIGT75XUQodw40ExcwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjExMDYxNDUwMThaFw0y +MjExMDYxNDUwMThaME0xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMSMwIQYDVQQDExpRdW9WYWRpcyBHbG9iYWwgU1NMIElDQSBHMzCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANf8Od17be6c6lTGJDhEXpmkTs4y +Q39Rr5VJyBeWCg06nSS71s6xF3sZvKcV0MbXlXCYM2ZX7cNTbJ81gs7uDsKFp+vK +EymiKyEiI2SImOtECNnSg+RVR4np/xz/UlC0yFUisH75cZsJ8T1pkGMfiEouR0EM +7O0uFgoboRfUP582TTWy0F7ynSA6YfGKnKj0OFwZJmGHVkLs1VevWjhj3R1fsPan +H05P5moePFnpQdj1FofoSxUHZ0c7VB+sUimboHm/uHNY1LOsk77qiSuVC5/yrdg3 +2EEfP/mxJYT4r/5UiD7VahySzeZHzZ2OibQm2AfgfMN3l57lCM3/WPQBhMAPS1jz +kE+7MjajM2f0aZctimW4Hasrj8AQnfAdHqZehbhtXaAlffNEzCdpNK584oCTVR7N +UR9iZFx83ruTqpo+GcLP/iSYqhM4g7fy45sNhU+IS+ca03zbxTl3TTlkofXunI5B +xxE30eGSQpDZ5+iUJcEOAuVKrlYocFbB3KF45hwcbzPWQ1DcO2jFAapOtQzeS+MZ +yZzT2YseJ8hQHKu8YrXZWwKaNfyl8kFkHUBDICowNEoZvBwRCQp8sgqL6YRZy0uD +JGxmnC2e0BVKSjcIvmq/CRWH7yiTk9eWm73xrsg9iIyD/kwJEnLyIk8tR5V8p/hc +1H2AjDrZH12PsZ45AgMBAAGjgfMwgfAwEgYDVR0TAQH/BAgwBgEB/wIBATARBgNV +HSAECjAIMAYGBFUdIAAwOgYIKwYBBQUHAQEELjAsMCoGCCsGAQUFBzABhh5odHRw +Oi8vb2NzcC5xdW92YWRpc2dsb2JhbC5jb20wDgYDVR0PAQH/BAQDAgEGMB8GA1Ud +IwQYMBaAFO3nb3Zav2DsSVvGpXe7chZxm8Q9MDsGA1UdHwQ0MDIwMKAuoCyGKmh0 +dHA6Ly9jcmwucXVvdmFkaXNnbG9iYWwuY29tL3F2cmNhMmczLmNybDAdBgNVHQ4E +FgQUsxKJtalLNbwVAPCA6dh4h/ETfHYwDQYJKoZIhvcNAQELBQADggIBAFGm1Fqp +RMiKr7a6h707M+km36PVXZnX1NZocCn36MrfRvphotbOCDm+GmRkar9ZMGhc8c/A +Vn7JSCjwF9jNOFIOUyNLq0w4luk+Pt2YFDbgF8IDdx53xIo8Gv05e9xpTvQYaIto +qeHbQjGXfSGc91olfX6JUwZlxxbhdJH+rxTFAg0jcbqToJoScWTfXSr1QRcNbSTs +Y4CPG6oULsnhVvrzgldGSK+DxFi2OKcDsOKkV7W4IGg8Do2L/M588AfBnV8ERzpl +qgMBBQxC2+0N6RdFHbmZt0HQE/NIg1s0xcjGx1XW3YTOfje31rmAXKHOehm4Bu48 +gr8gePq5cdQ2W9tA0Dnytb9wzH2SyPPIXRI7yNxaX9H8wYeDeeiKSSmQtfh1v5cV +7RXvm8F6hLJkkco/HOW3dAUwZFcKsUH+1eUJKLN18eDGwB8yGawjHvOKqcfg5Lf/ +TvC7hgcx7pDYaCCaqHaekgUwXbB2Enzqr1fdwoU1c01W5YuQAtAx5wk1bf34Yq/J +ph7wNXGvo88N0/EfP9AdVGmJzy7VuRXeVAOyjKAIeADMlwpjBRhcbs9m3dkqvoMb +SXKJxv/hFmNgEOvOlaFsXX1dbKg1v+C1AzKAFdiuAIa62JzASiEhigqNSdqdTsOh +8W8hdONuKKpe9zKedhBFAvuxhDgKmnySglYc +-----END CERTIFICATE----- + +subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.1.cert b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.1.cert new file mode 100644 index 000000000..51f64f08d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.1.cert @@ -0,0 +1,69 @@ +subject=/C=AU/ST=Victoria/L=Melbourne/O=Telstra Corporation Limited/OU=Telstra Energy/CN=dev.energy.inside.telstra.com +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3 +-----BEGIN CERTIFICATE----- +MIIIHTCCBgWgAwIBAgIUCqrrzSfjzaoyB3DOxst2kMxFp/MwDQYJKoZIhvcNAQELBQAwTTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxIzAh +BgNVBAMTGlF1b1ZhZGlzIEdsb2JhbCBTU0wgSUNBIEczMB4XDTE5MDgyMTIyMjIyOFoXDTIxMDgyMTIyMzIwMFowgZsxCzAJBgNVBAYTAkFVMREwDwYDVQQIDAhWaWN0 +b3JpYTESMBAGA1UEBwwJTWVsYm91cm5lMSQwIgYDVQQKDBtUZWxzdHJhIENvcnBvcmF0aW9uIExpbWl0ZWQxFzAVBgNVBAsMDlRlbHN0cmEgRW5lcmd5MSYwJAYDVQQD +DB1kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMPAPH2y206qios2NMzlCNJv1mrwC1/8tH2HOqJGiYZB +O7QOBRSvJsV++IozCB8ap99e8B64OOAQPOyykrdXd2axhftmMb1SFMF56eukHSuzKhKWRUgHs0UFRU51lDcBcOvphwJ+5SOgqrqKFFFBgJ0ZpcP54JpFwKIdh3ac10x2 +mBaW5ccqdv5X9oEMu1D/yivBmy34tsbLYyfttCjP76iVT7UVYHjHWynnIhsEyMsUgdM90NzrTlrvTSi/EcCD1W3+8b0f+G1TI5rhHbKwR0n/mv5QLFm7EABoYPhxS8bX +B+9tE67yb0RyWbgvUiHySRynQLNMRpRx8Y9bA8uC8n8CAwEAAaOCA6QwggOgMAkGA1UdEwQCMAAwHwYDVR0jBBgwFoAUsxKJtalLNbwVAPCA6dh4h/ETfHYwcwYIKwYB +BQUHAQEEZzBlMDcGCCsGAQUFBzAChitodHRwOi8vdHJ1c3QucXVvdmFkaXNnbG9iYWwuY29tL3F2c3NsZzMuY3J0MCoGCCsGAQUFBzABhh5odHRwOi8vb2NzcC5xdW92 +YWRpc2dsb2JhbC5jb20wgZ8GA1UdEQSBlzCBlIIdZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5jb22CJXJlcG9ydHMuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5j +b22CJ2dyZWVuc3luYy5kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbYIjbmdvc3MuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5jb20wUQYDVR0gBEowSDBGBgwr +BgEEAb5YAAJkAQEwNjA0BggrBgEFBQcCARYoaHR0cDovL3d3dy5xdW92YWRpc2dsb2JhbC5jb20vcmVwb3NpdG9yeTAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH +AwEwOgYDVR0fBDMwMTAvoC2gK4YpaHR0cDovL2NybC5xdW92YWRpc2dsb2JhbC5jb20vcXZzc2xnMy5jcmwwHQYDVR0OBBYEFEoJQRpPC/V5ZK3mMkszZE2v6vh+MA4G +A1UdDwEB/wQEAwIFoDCCAXwGCisGAQQB1nkCBAIEggFsBIIBaAFmAHUAVhQGmi/XwuzT9eG9RLI+x0Z2ubyZEVzA75SYVdaJ0N0AAAFstk9Y+gAABAMARjBEAiBFMZa6 +O9iXVjy2kqQa54vgNFdU7shgFJJhm//fSAQZUAIgBIL/yPdh+XiuQS2xPhCzNYkhbxf7BbN4qUISESgiZpsAdgBvU3asMfAxGdiZAKRRFf93FRwR2QLBACkGjbIImjfZ +EwAAAWy2T1nKAAAEAwBHMEUCIG0tp63jLsDsfCTDlcvV5ItjRkbUJBnkxlPdP2PH88sTAiEApgaPofVdn2hdI12iDDex72ta+9wpwQ1MxoaJn2nt+qEAdQDuS723dc5g +uuFCaR+r4Z5mow9+X7By2IMAxHuJeqj9ywAAAWy2T1iJAAAEAwBGMEQCIE/mzEFpCJUc71jvwJa4Px86R3ZYK4mHmUlQAUZqd0ZkAiBdEmT8xxTuleSUlYHEkKCK/FZX +L+vsYJpPrA9TsO5IsTANBgkqhkiG9w0BAQsFAAOCAgEApE9WLz3S8tqA9Dk3r9LFrJy8km9cBt1O9SQZwFsduGKGdF3Fd+/Y0V7UrFDzrX+NIzqcmgBHKxaIXorMBF70 +ajMaaROP2ymkpEXnruEwoR47fbW+JRAWDRm2xnouQveQX9ZcgCLbBvAWBqpndQj2DGmLJhNz5GlFBjh3PQZlU1w8hU7TrDxa7M1GMtVnk8X+o3l/MX9iPeEs+PiC4dHD +hpj84RY1VQJz8+10rql47SB5YgbwcqaizTG4ax/OAv1JHNWtfAodIMX8Y8X00zozA20LQv880jCCNANVNbrXJ3h4X3xwW/C1X9vYk0shymZJbT5u17JbPD1cy39bA7kT +F4L7scdQRxvcqazYN4/IdgvgMji9OltiYufP88Ti8KB2tcl2accpiC5St/zllGD1hqEeYLMzjyvUKR/1uvURQQtc0DPvBRmvkB+aI4g+sLkTTFWj5bsA1vKU8SDCyMuB +RQV11DId5+RNNCmWnskORUZJQssvY49pnfCxCES2nt3l/XzTzVtLYmd6G9uAqVace2ibnmDrFVlmlyRsCiMfZl5/OTJzt7Cj3az59m5Syfw/lnS9YP82t/r/ufuKkO5Q +q5a9aI8DuNNmAjR4lpIJNqIpX/y+dG2aGmx4XTc31MR9szWtiTgOHe0MkMupOAL0qkHrBgwo1zjuTMf3QOg6Z5Q= +-----END CERTIFICATE----- + +subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3 +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +-----BEGIN CERTIFICATE----- +MIIGFzCCA/+gAwIBAgIUftbnnMmtgcTIGT75XUQodw40ExcwDQYJKoZIhvcNAQELBQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjExMDYxNDUwMThaFw0yMjExMDYxNDUwMThaME0xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMSMwIQYDVQQDExpRdW9WYWRpcyBHbG9iYWwgU1NMIElDQSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANf8Od17be6c6lTGJDhEXpmkTs4y +Q39Rr5VJyBeWCg06nSS71s6xF3sZvKcV0MbXlXCYM2ZX7cNTbJ81gs7uDsKFp+vKEymiKyEiI2SImOtECNnSg+RVR4np/xz/UlC0yFUisH75cZsJ8T1pkGMfiEouR0EM +7O0uFgoboRfUP582TTWy0F7ynSA6YfGKnKj0OFwZJmGHVkLs1VevWjhj3R1fsPanH05P5moePFnpQdj1FofoSxUHZ0c7VB+sUimboHm/uHNY1LOsk77qiSuVC5/yrdg3 +2EEfP/mxJYT4r/5UiD7VahySzeZHzZ2OibQm2AfgfMN3l57lCM3/WPQBhMAPS1jzkE+7MjajM2f0aZctimW4Hasrj8AQnfAdHqZehbhtXaAlffNEzCdpNK584oCTVR7N +UR9iZFx83ruTqpo+GcLP/iSYqhM4g7fy45sNhU+IS+ca03zbxTl3TTlkofXunI5BxxE30eGSQpDZ5+iUJcEOAuVKrlYocFbB3KF45hwcbzPWQ1DcO2jFAapOtQzeS+MZ +yZzT2YseJ8hQHKu8YrXZWwKaNfyl8kFkHUBDICowNEoZvBwRCQp8sgqL6YRZy0uDJGxmnC2e0BVKSjcIvmq/CRWH7yiTk9eWm73xrsg9iIyD/kwJEnLyIk8tR5V8p/hc +1H2AjDrZH12PsZ45AgMBAAGjgfMwgfAwEgYDVR0TAQH/BAgwBgEB/wIBATARBgNVHSAECjAIMAYGBFUdIAAwOgYIKwYBBQUHAQEELjAsMCoGCCsGAQUFBzABhh5odHRw +Oi8vb2NzcC5xdW92YWRpc2dsb2JhbC5jb20wDgYDVR0PAQH/BAQDAgEGMB8GA1UdIwQYMBaAFO3nb3Zav2DsSVvGpXe7chZxm8Q9MDsGA1UdHwQ0MDIwMKAuoCyGKmh0 +dHA6Ly9jcmwucXVvdmFkaXNnbG9iYWwuY29tL3F2cmNhMmczLmNybDAdBgNVHQ4EFgQUsxKJtalLNbwVAPCA6dh4h/ETfHYwDQYJKoZIhvcNAQELBQADggIBAFGm1Fqp +RMiKr7a6h707M+km36PVXZnX1NZocCn36MrfRvphotbOCDm+GmRkar9ZMGhc8c/AVn7JSCjwF9jNOFIOUyNLq0w4luk+Pt2YFDbgF8IDdx53xIo8Gv05e9xpTvQYaIto +qeHbQjGXfSGc91olfX6JUwZlxxbhdJH+rxTFAg0jcbqToJoScWTfXSr1QRcNbSTsY4CPG6oULsnhVvrzgldGSK+DxFi2OKcDsOKkV7W4IGg8Do2L/M588AfBnV8ERzpl +qgMBBQxC2+0N6RdFHbmZt0HQE/NIg1s0xcjGx1XW3YTOfje31rmAXKHOehm4Bu48gr8gePq5cdQ2W9tA0Dnytb9wzH2SyPPIXRI7yNxaX9H8wYeDeeiKSSmQtfh1v5cV +7RXvm8F6hLJkkco/HOW3dAUwZFcKsUH+1eUJKLN18eDGwB8yGawjHvOKqcfg5Lf/TvC7hgcx7pDYaCCaqHaekgUwXbB2Enzqr1fdwoU1c01W5YuQAtAx5wk1bf34Yq/J +ph7wNXGvo88N0/EfP9AdVGmJzy7VuRXeVAOyjKAIeADMlwpjBRhcbs9m3dkqvoMbSXKJxv/hFmNgEOvOlaFsXX1dbKg1v+C1AzKAFdiuAIa62JzASiEhigqNSdqdTsOh +8W8hdONuKKpe9zKedhBFAvuxhDgKmnySglYc +-----END CERTIFICATE----- + +subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQELBQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMWn4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0jIaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0GaW/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE+V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtddbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeMHVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.2.cert b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.2.cert new file mode 100644 index 000000000..ce2992411 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.2.cert @@ -0,0 +1,113 @@ +-----BEGIN CERTIFICATE----- +MIIIHTCCBgWgAwIBAgIUCqrrzSfjzaoyB3DOxst2kMxFp/MwDQYJKoZIhvcNAQEL +BQAwTTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxIzAh +BgNVBAMTGlF1b1ZhZGlzIEdsb2JhbCBTU0wgSUNBIEczMB4XDTE5MDgyMTIyMjIy +OFoXDTIxMDgyMTIyMzIwMFowgZsxCzAJBgNVBAYTAkFVMREwDwYDVQQIDAhWaWN0 +b3JpYTESMBAGA1UEBwwJTWVsYm91cm5lMSQwIgYDVQQKDBtUZWxzdHJhIENvcnBv +cmF0aW9uIExpbWl0ZWQxFzAVBgNVBAsMDlRlbHN0cmEgRW5lcmd5MSYwJAYDVQQD +DB1kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAMPAPH2y206qios2NMzlCNJv1mrwC1/8tH2HOqJGiYZB +O7QOBRSvJsV++IozCB8ap99e8B64OOAQPOyykrdXd2axhftmMb1SFMF56eukHSuz +KhKWRUgHs0UFRU51lDcBcOvphwJ+5SOgqrqKFFFBgJ0ZpcP54JpFwKIdh3ac10x2 +mBaW5ccqdv5X9oEMu1D/yivBmy34tsbLYyfttCjP76iVT7UVYHjHWynnIhsEyMsU +gdM90NzrTlrvTSi/EcCD1W3+8b0f+G1TI5rhHbKwR0n/mv5QLFm7EABoYPhxS8bX +B+9tE67yb0RyWbgvUiHySRynQLNMRpRx8Y9bA8uC8n8CAwEAAaOCA6QwggOgMAkG +A1UdEwQCMAAwHwYDVR0jBBgwFoAUsxKJtalLNbwVAPCA6dh4h/ETfHYwcwYIKwYB +BQUHAQEEZzBlMDcGCCsGAQUFBzAChitodHRwOi8vdHJ1c3QucXVvdmFkaXNnbG9i +YWwuY29tL3F2c3NsZzMuY3J0MCoGCCsGAQUFBzABhh5odHRwOi8vb2NzcC5xdW92 +YWRpc2dsb2JhbC5jb20wgZ8GA1UdEQSBlzCBlIIdZGV2LmVuZXJneS5pbnNpZGUu +dGVsc3RyYS5jb22CJXJlcG9ydHMuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5j +b22CJ2dyZWVuc3luYy5kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbYIjbmdv +c3MuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5jb20wUQYDVR0gBEowSDBGBgwr +BgEEAb5YAAJkAQEwNjA0BggrBgEFBQcCARYoaHR0cDovL3d3dy5xdW92YWRpc2ds +b2JhbC5jb20vcmVwb3NpdG9yeTAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH +AwEwOgYDVR0fBDMwMTAvoC2gK4YpaHR0cDovL2NybC5xdW92YWRpc2dsb2JhbC5j +b20vcXZzc2xnMy5jcmwwHQYDVR0OBBYEFEoJQRpPC/V5ZK3mMkszZE2v6vh+MA4G +A1UdDwEB/wQEAwIFoDCCAXwGCisGAQQB1nkCBAIEggFsBIIBaAFmAHUAVhQGmi/X +wuzT9eG9RLI+x0Z2ubyZEVzA75SYVdaJ0N0AAAFstk9Y+gAABAMARjBEAiBFMZa6 +O9iXVjy2kqQa54vgNFdU7shgFJJhm//fSAQZUAIgBIL/yPdh+XiuQS2xPhCzNYkh +bxf7BbN4qUISESgiZpsAdgBvU3asMfAxGdiZAKRRFf93FRwR2QLBACkGjbIImjfZ +EwAAAWy2T1nKAAAEAwBHMEUCIG0tp63jLsDsfCTDlcvV5ItjRkbUJBnkxlPdP2PH +88sTAiEApgaPofVdn2hdI12iDDex72ta+9wpwQ1MxoaJn2nt+qEAdQDuS723dc5g +uuFCaR+r4Z5mow9+X7By2IMAxHuJeqj9ywAAAWy2T1iJAAAEAwBGMEQCIE/mzEFp +CJUc71jvwJa4Px86R3ZYK4mHmUlQAUZqd0ZkAiBdEmT8xxTuleSUlYHEkKCK/FZX +L+vsYJpPrA9TsO5IsTANBgkqhkiG9w0BAQsFAAOCAgEApE9WLz3S8tqA9Dk3r9LF +rJy8km9cBt1O9SQZwFsduGKGdF3Fd+/Y0V7UrFDzrX+NIzqcmgBHKxaIXorMBF70 +ajMaaROP2ymkpEXnruEwoR47fbW+JRAWDRm2xnouQveQX9ZcgCLbBvAWBqpndQj2 +DGmLJhNz5GlFBjh3PQZlU1w8hU7TrDxa7M1GMtVnk8X+o3l/MX9iPeEs+PiC4dHD +hpj84RY1VQJz8+10rql47SB5YgbwcqaizTG4ax/OAv1JHNWtfAodIMX8Y8X00zoz +A20LQv880jCCNANVNbrXJ3h4X3xwW/C1X9vYk0shymZJbT5u17JbPD1cy39bA7kT +F4L7scdQRxvcqazYN4/IdgvgMji9OltiYufP88Ti8KB2tcl2accpiC5St/zllGD1 +hqEeYLMzjyvUKR/1uvURQQtc0DPvBRmvkB+aI4g+sLkTTFWj5bsA1vKU8SDCyMuB +RQV11DId5+RNNCmWnskORUZJQssvY49pnfCxCES2nt3l/XzTzVtLYmd6G9uAqVac +e2ibnmDrFVlmlyRsCiMfZl5/OTJzt7Cj3az59m5Syfw/lnS9YP82t/r/ufuKkO5Q +q5a9aI8DuNNmAjR4lpIJNqIpX/y+dG2aGmx4XTc31MR9szWtiTgOHe0MkMupOAL0 +qkHrBgwo1zjuTMf3QOg6Z5Q= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIGFzCCA/+gAwIBAgIUftbnnMmtgcTIGT75XUQodw40ExcwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjExMDYxNDUwMThaFw0y +MjExMDYxNDUwMThaME0xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMSMwIQYDVQQDExpRdW9WYWRpcyBHbG9iYWwgU1NMIElDQSBHMzCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANf8Od17be6c6lTGJDhEXpmkTs4y +Q39Rr5VJyBeWCg06nSS71s6xF3sZvKcV0MbXlXCYM2ZX7cNTbJ81gs7uDsKFp+vK +EymiKyEiI2SImOtECNnSg+RVR4np/xz/UlC0yFUisH75cZsJ8T1pkGMfiEouR0EM +7O0uFgoboRfUP582TTWy0F7ynSA6YfGKnKj0OFwZJmGHVkLs1VevWjhj3R1fsPan +H05P5moePFnpQdj1FofoSxUHZ0c7VB+sUimboHm/uHNY1LOsk77qiSuVC5/yrdg3 +2EEfP/mxJYT4r/5UiD7VahySzeZHzZ2OibQm2AfgfMN3l57lCM3/WPQBhMAPS1jz +kE+7MjajM2f0aZctimW4Hasrj8AQnfAdHqZehbhtXaAlffNEzCdpNK584oCTVR7N +UR9iZFx83ruTqpo+GcLP/iSYqhM4g7fy45sNhU+IS+ca03zbxTl3TTlkofXunI5B +xxE30eGSQpDZ5+iUJcEOAuVKrlYocFbB3KF45hwcbzPWQ1DcO2jFAapOtQzeS+MZ +yZzT2YseJ8hQHKu8YrXZWwKaNfyl8kFkHUBDICowNEoZvBwRCQp8sgqL6YRZy0uD +JGxmnC2e0BVKSjcIvmq/CRWH7yiTk9eWm73xrsg9iIyD/kwJEnLyIk8tR5V8p/hc +1H2AjDrZH12PsZ45AgMBAAGjgfMwgfAwEgYDVR0TAQH/BAgwBgEB/wIBATARBgNV +HSAECjAIMAYGBFUdIAAwOgYIKwYBBQUHAQEELjAsMCoGCCsGAQUFBzABhh5odHRw +Oi8vb2NzcC5xdW92YWRpc2dsb2JhbC5jb20wDgYDVR0PAQH/BAQDAgEGMB8GA1Ud +IwQYMBaAFO3nb3Zav2DsSVvGpXe7chZxm8Q9MDsGA1UdHwQ0MDIwMKAuoCyGKmh0 +dHA6Ly9jcmwucXVvdmFkaXNnbG9iYWwuY29tL3F2cmNhMmczLmNybDAdBgNVHQ4E +FgQUsxKJtalLNbwVAPCA6dh4h/ETfHYwDQYJKoZIhvcNAQELBQADggIBAFGm1Fqp +RMiKr7a6h707M+km36PVXZnX1NZocCn36MrfRvphotbOCDm+GmRkar9ZMGhc8c/A +Vn7JSCjwF9jNOFIOUyNLq0w4luk+Pt2YFDbgF8IDdx53xIo8Gv05e9xpTvQYaIto +qeHbQjGXfSGc91olfX6JUwZlxxbhdJH+rxTFAg0jcbqToJoScWTfXSr1QRcNbSTs +Y4CPG6oULsnhVvrzgldGSK+DxFi2OKcDsOKkV7W4IGg8Do2L/M588AfBnV8ERzpl +qgMBBQxC2+0N6RdFHbmZt0HQE/NIg1s0xcjGx1XW3YTOfje31rmAXKHOehm4Bu48 +gr8gePq5cdQ2W9tA0Dnytb9wzH2SyPPIXRI7yNxaX9H8wYeDeeiKSSmQtfh1v5cV +7RXvm8F6hLJkkco/HOW3dAUwZFcKsUH+1eUJKLN18eDGwB8yGawjHvOKqcfg5Lf/ +TvC7hgcx7pDYaCCaqHaekgUwXbB2Enzqr1fdwoU1c01W5YuQAtAx5wk1bf34Yq/J +ph7wNXGvo88N0/EfP9AdVGmJzy7VuRXeVAOyjKAIeADMlwpjBRhcbs9m3dkqvoMb +SXKJxv/hFmNgEOvOlaFsXX1dbKg1v+C1AzKAFdiuAIa62JzASiEhigqNSdqdTsOh +8W8hdONuKKpe9zKedhBFAvuxhDgKmnySglYc +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.3.cert b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.3.cert new file mode 100644 index 000000000..0c947b17b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.3.cert @@ -0,0 +1,124 @@ +subject=/C=AU/ST=Victoria/L=Melbourne/O=Telstra Corporation Limited/OU=Telstra Energy/CN=dev.energy.inside.telstra.com +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3 +-----BEGIN CERTIFICATE----- +MIIIHTCCBgWgAwIBAgIUCqrrzSfjzaoyB3DOxst2kMxFp/MwDQYJKoZIhvcNAQEL +BQAwTTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxIzAh +BgNVBAMTGlF1b1ZhZGlzIEdsb2JhbCBTU0wgSUNBIEczMB4XDTE5MDgyMTIyMjIy +OFoXDTIxMDgyMTIyMzIwMFowgZsxCzAJBgNVBAYTAkFVMREwDwYDVQQIDAhWaWN0 +b3JpYTESMBAGA1UEBwwJTWVsYm91cm5lMSQwIgYDVQQKDBtUZWxzdHJhIENvcnBv +cmF0aW9uIExpbWl0ZWQxFzAVBgNVBAsMDlRlbHN0cmEgRW5lcmd5MSYwJAYDVQQD +DB1kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAMPAPH2y206qios2NMzlCNJv1mrwC1/8tH2HOqJGiYZB +O7QOBRSvJsV++IozCB8ap99e8B64OOAQPOyykrdXd2axhftmMb1SFMF56eukHSuz +KhKWRUgHs0UFRU51lDcBcOvphwJ+5SOgqrqKFFFBgJ0ZpcP54JpFwKIdh3ac10x2 +mBaW5ccqdv5X9oEMu1D/yivBmy34tsbLYyfttCjP76iVT7UVYHjHWynnIhsEyMsU +gdM90NzrTlrvTSi/EcCD1W3+8b0f+G1TI5rhHbKwR0n/mv5QLFm7EABoYPhxS8bX +B+9tE67yb0RyWbgvUiHySRynQLNMRpRx8Y9bA8uC8n8CAwEAAaOCA6QwggOgMAkG +A1UdEwQCMAAwHwYDVR0jBBgwFoAUsxKJtalLNbwVAPCA6dh4h/ETfHYwcwYIKwYB +BQUHAQEEZzBlMDcGCCsGAQUFBzAChitodHRwOi8vdHJ1c3QucXVvdmFkaXNnbG9i +YWwuY29tL3F2c3NsZzMuY3J0MCoGCCsGAQUFBzABhh5odHRwOi8vb2NzcC5xdW92 +YWRpc2dsb2JhbC5jb20wgZ8GA1UdEQSBlzCBlIIdZGV2LmVuZXJneS5pbnNpZGUu +dGVsc3RyYS5jb22CJXJlcG9ydHMuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5j +b22CJ2dyZWVuc3luYy5kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbYIjbmdv +c3MuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5jb20wUQYDVR0gBEowSDBGBgwr +BgEEAb5YAAJkAQEwNjA0BggrBgEFBQcCARYoaHR0cDovL3d3dy5xdW92YWRpc2ds +b2JhbC5jb20vcmVwb3NpdG9yeTAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH +AwEwOgYDVR0fBDMwMTAvoC2gK4YpaHR0cDovL2NybC5xdW92YWRpc2dsb2JhbC5j +b20vcXZzc2xnMy5jcmwwHQYDVR0OBBYEFEoJQRpPC/V5ZK3mMkszZE2v6vh+MA4G +A1UdDwEB/wQEAwIFoDCCAXwGCisGAQQB1nkCBAIEggFsBIIBaAFmAHUAVhQGmi/X +wuzT9eG9RLI+x0Z2ubyZEVzA75SYVdaJ0N0AAAFstk9Y+gAABAMARjBEAiBFMZa6 +O9iXVjy2kqQa54vgNFdU7shgFJJhm//fSAQZUAIgBIL/yPdh+XiuQS2xPhCzNYkh +bxf7BbN4qUISESgiZpsAdgBvU3asMfAxGdiZAKRRFf93FRwR2QLBACkGjbIImjfZ +EwAAAWy2T1nKAAAEAwBHMEUCIG0tp63jLsDsfCTDlcvV5ItjRkbUJBnkxlPdP2PH +88sTAiEApgaPofVdn2hdI12iDDex72ta+9wpwQ1MxoaJn2nt+qEAdQDuS723dc5g +uuFCaR+r4Z5mow9+X7By2IMAxHuJeqj9ywAAAWy2T1iJAAAEAwBGMEQCIE/mzEFp +CJUc71jvwJa4Px86R3ZYK4mHmUlQAUZqd0ZkAiBdEmT8xxTuleSUlYHEkKCK/FZX +L+vsYJpPrA9TsO5IsTANBgkqhkiG9w0BAQsFAAOCAgEApE9WLz3S8tqA9Dk3r9LF +rJy8km9cBt1O9SQZwFsduGKGdF3Fd+/Y0V7UrFDzrX+NIzqcmgBHKxaIXorMBF70 +ajMaaROP2ymkpEXnruEwoR47fbW+JRAWDRm2xnouQveQX9ZcgCLbBvAWBqpndQj2 +DGmLJhNz5GlFBjh3PQZlU1w8hU7TrDxa7M1GMtVnk8X+o3l/MX9iPeEs+PiC4dHD +hpj84RY1VQJz8+10rql47SB5YgbwcqaizTG4ax/OAv1JHNWtfAodIMX8Y8X00zoz +A20LQv880jCCNANVNbrXJ3h4X3xwW/C1X9vYk0shymZJbT5u17JbPD1cy39bA7kT +F4L7scdQRxvcqazYN4/IdgvgMji9OltiYufP88Ti8KB2tcl2accpiC5St/zllGD1 +hqEeYLMzjyvUKR/1uvURQQtc0DPvBRmvkB+aI4g+sLkTTFWj5bsA1vKU8SDCyMuB +RQV11DId5+RNNCmWnskORUZJQssvY49pnfCxCES2nt3l/XzTzVtLYmd6G9uAqVac +e2ibnmDrFVlmlyRsCiMfZl5/OTJzt7Cj3az59m5Syfw/lnS9YP82t/r/ufuKkO5Q +q5a9aI8DuNNmAjR4lpIJNqIpX/y+dG2aGmx4XTc31MR9szWtiTgOHe0MkMupOAL0 +qkHrBgwo1zjuTMf3QOg6Z5Q= +-----END CERTIFICATE----- + + +subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + + + + +subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3 +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +-----BEGIN CERTIFICATE----- +MIIGFzCCA/+gAwIBAgIUftbnnMmtgcTIGT75XUQodw40ExcwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjExMDYxNDUwMThaFw0y +MjExMDYxNDUwMThaME0xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMSMwIQYDVQQDExpRdW9WYWRpcyBHbG9iYWwgU1NMIElDQSBHMzCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANf8Od17be6c6lTGJDhEXpmkTs4y +Q39Rr5VJyBeWCg06nSS71s6xF3sZvKcV0MbXlXCYM2ZX7cNTbJ81gs7uDsKFp+vK +EymiKyEiI2SImOtECNnSg+RVR4np/xz/UlC0yFUisH75cZsJ8T1pkGMfiEouR0EM +7O0uFgoboRfUP582TTWy0F7ynSA6YfGKnKj0OFwZJmGHVkLs1VevWjhj3R1fsPan +H05P5moePFnpQdj1FofoSxUHZ0c7VB+sUimboHm/uHNY1LOsk77qiSuVC5/yrdg3 +2EEfP/mxJYT4r/5UiD7VahySzeZHzZ2OibQm2AfgfMN3l57lCM3/WPQBhMAPS1jz +kE+7MjajM2f0aZctimW4Hasrj8AQnfAdHqZehbhtXaAlffNEzCdpNK584oCTVR7N +UR9iZFx83ruTqpo+GcLP/iSYqhM4g7fy45sNhU+IS+ca03zbxTl3TTlkofXunI5B +xxE30eGSQpDZ5+iUJcEOAuVKrlYocFbB3KF45hwcbzPWQ1DcO2jFAapOtQzeS+MZ +yZzT2YseJ8hQHKu8YrXZWwKaNfyl8kFkHUBDICowNEoZvBwRCQp8sgqL6YRZy0uD +JGxmnC2e0BVKSjcIvmq/CRWH7yiTk9eWm73xrsg9iIyD/kwJEnLyIk8tR5V8p/hc +1H2AjDrZH12PsZ45AgMBAAGjgfMwgfAwEgYDVR0TAQH/BAgwBgEB/wIBATARBgNV +HSAECjAIMAYGBFUdIAAwOgYIKwYBBQUHAQEELjAsMCoGCCsGAQUFBzABhh5odHRw +Oi8vb2NzcC5xdW92YWRpc2dsb2JhbC5jb20wDgYDVR0PAQH/BAQDAgEGMB8GA1Ud +IwQYMBaAFO3nb3Zav2DsSVvGpXe7chZxm8Q9MDsGA1UdHwQ0MDIwMKAuoCyGKmh0 +dHA6Ly9jcmwucXVvdmFkaXNnbG9iYWwuY29tL3F2cmNhMmczLmNybDAdBgNVHQ4E +FgQUsxKJtalLNbwVAPCA6dh4h/ETfHYwDQYJKoZIhvcNAQELBQADggIBAFGm1Fqp +RMiKr7a6h707M+km36PVXZnX1NZocCn36MrfRvphotbOCDm+GmRkar9ZMGhc8c/A +Vn7JSCjwF9jNOFIOUyNLq0w4luk+Pt2YFDbgF8IDdx53xIo8Gv05e9xpTvQYaIto +qeHbQjGXfSGc91olfX6JUwZlxxbhdJH+rxTFAg0jcbqToJoScWTfXSr1QRcNbSTs +Y4CPG6oULsnhVvrzgldGSK+DxFi2OKcDsOKkV7W4IGg8Do2L/M588AfBnV8ERzpl +qgMBBQxC2+0N6RdFHbmZt0HQE/NIg1s0xcjGx1XW3YTOfje31rmAXKHOehm4Bu48 +gr8gePq5cdQ2W9tA0Dnytb9wzH2SyPPIXRI7yNxaX9H8wYeDeeiKSSmQtfh1v5cV +7RXvm8F6hLJkkco/HOW3dAUwZFcKsUH+1eUJKLN18eDGwB8yGawjHvOKqcfg5Lf/ +TvC7hgcx7pDYaCCaqHaekgUwXbB2Enzqr1fdwoU1c01W5YuQAtAx5wk1bf34Yq/J +ph7wNXGvo88N0/EfP9AdVGmJzy7VuRXeVAOyjKAIeADMlwpjBRhcbs9m3dkqvoMb +SXKJxv/hFmNgEOvOlaFsXX1dbKg1v+C1AzKAFdiuAIa62JzASiEhigqNSdqdTsOh +8W8hdONuKKpe9zKedhBFAvuxhDgKmnySglYc +-----END CERTIFICATE----- diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.4.cert b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.4.cert new file mode 100644 index 000000000..adbb8edca --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.4.cert @@ -0,0 +1,86 @@ +subject=/C=AU/ST=Victoria/L=Melbourne/O=Telstra Corporation Limited/OU=Telstra Energy/CN=dev.energy.inside.telstra.com +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3 +-----BEGIN CERTIFICATE----- +MIIIHTCCBgWgAwIBAgIUCqrrzSfjzaoyB3DOxst2kMxFp/MwDQYJKoZIhvcNAQEL +BQAwTTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxIzAh +BgNVBAMTGlF1b1ZhZGlzIEdsb2JhbCBTU0wgSUNBIEczMB4XDTE5MDgyMTIyMjIy +OFoXDTIxMDgyMTIyMzIwMFowgZsxCzAJBgNVBAYTAkFVMREwDwYDVQQIDAhWaWN0 +b3JpYTESMBAGA1UEBwwJTWVsYm91cm5lMSQwIgYDVQQKDBtUZWxzdHJhIENvcnBv +cmF0aW9uIExpbWl0ZWQxFzAVBgNVBAsMDlRlbHN0cmEgRW5lcmd5MSYwJAYDVQQD +DB1kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAMPAPH2y206qios2NMzlCNJv1mrwC1/8tH2HOqJGiYZB +O7QOBRSvJsV++IozCB8ap99e8B64OOAQPOyykrdXd2axhftmMb1SFMF56eukHSuz +KhKWRUgHs0UFRU51lDcBcOvphwJ+5SOgqrqKFFFBgJ0ZpcP54JpFwKIdh3ac10x2 +mBaW5ccqdv5X9oEMu1D/yivBmy34tsbLYyfttCjP76iVT7UVYHjHWynnIhsEyMsU +gdM90NzrTlrvTSi/EcCD1W3+8b0f+G1TI5rhHbKwR0n/mv5QLFm7EABoYPhxS8bX +B+9tE67yb0RyWbgvUiHySRynQLNMRpRx8Y9bA8uC8n8CAwEAAaOCA6QwggOgMAkG +A1UdEwQCMAAwHwYDVR0jBBgwFoAUsxKJtalLNbwVAPCA6dh4h/ETfHYwcwYIKwYB +BQUHAQEEZzBlMDcGCCsGAQUFBzAChitodHRwOi8vdHJ1c3QucXVvdmFkaXNnbG9i +YWwuY29tL3F2c3NsZzMuY3J0MCoGCCsGAQUFBzABhh5odHRwOi8vb2NzcC5xdW92 +YWRpc2dsb2JhbC5jb20wgZ8GA1UdEQSBlzCBlIIdZGV2LmVuZXJneS5pbnNpZGUu +dGVsc3RyYS5jb22CJXJlcG9ydHMuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5j +b22CJ2dyZWVuc3luYy5kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbYIjbmdv +c3MuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5jb20wUQYDVR0gBEowSDBGBgwr +BgEEAb5YAAJkAQEwNjA0BggrBgEFBQcCARYoaHR0cDovL3d3dy5xdW92YWRpc2ds +b2JhbC5jb20vcmVwb3NpdG9yeTAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH +AwEwOgYDVR0fBDMwMTAvoC2gK4YpaHR0cDovL2NybC5xdW92YWRpc2dsb2JhbC5j +b20vcXZzc2xnMy5jcmwwHQYDVR0OBBYEFEoJQRpPC/V5ZK3mMkszZE2v6vh+MA4G +A1UdDwEB/wQEAwIFoDCCAXwGCisGAQQB1nkCBAIEggFsBIIBaAFmAHUAVhQGmi/X +wuzT9eG9RLI+x0Z2ubyZEVzA75SYVdaJ0N0AAAFstk9Y+gAABAMARjBEAiBFMZa6 +O9iXVjy2kqQa54vgNFdU7shgFJJhm//fSAQZUAIgBIL/yPdh+XiuQS2xPhCzNYkh +bxf7BbN4qUISESgiZpsAdgBvU3asMfAxGdiZAKRRFf93FRwR2QLBACkGjbIImjfZ +EwAAAWy2T1nKAAAEAwBHMEUCIG0tp63jLsDsfCTDlcvV5ItjRkbUJBnkxlPdP2PH +88sTAiEApgaPofVdn2hdI12iDDex72ta+9wpwQ1MxoaJn2nt+qEAdQDuS723dc5g +uuFCaR+r4Z5mow9+X7By2IMAxHuJeqj9ywAAAWy2T1iJAAAEAwBGMEQCIE/mzEFp +CJUc71jvwJa4Px86R3ZYK4mHmUlQAUZqd0ZkAiBdEmT8xxTuleSUlYHEkKCK/FZX +L+vsYJpPrA9TsO5IsTANBgkqhkiG9w0BAQsFAAOCAgEApE9WLz3S8tqA9Dk3r9LF +rJy8km9cBt1O9SQZwFsduGKGdF3Fd+/Y0V7UrFDzrX+NIzqcmgBHKxaIXorMBF70 +ajMaaROP2ymkpEXnruEwoR47fbW+JRAWDRm2xnouQveQX9ZcgCLbBvAWBqpndQj2 +DGmLJhNz5GlFBjh3PQZlU1w8hU7TrDxa7M1GMtVnk8X+o3l/MX9iPeEs+PiC4dHD +hpj84RY1VQJz8+10rql47SB5YgbwcqaizTG4ax/OAv1JHNWtfAodIMX8Y8X00zoz +A20LQv880jCCNANVNbrXJ3h4X3xwW/C1X9vYk0shymZJbT5u17JbPD1cy39bA7kT +F4L7scdQRxvcqazYN4/IdgvgMji9OltiYufP88Ti8KB2tcl2accpiC5St/zllGD1 +hqEeYLMzjyvUKR/1uvURQQtc0DPvBRmvkB+aI4g+sLkTTFWj5bsA1vKU8SDCyMuB +RQV11DId5+RNNCmWnskORUZJQssvY49pnfCxCES2nt3l/XzTzVtLYmd6G9uAqVac +e2ibnmDrFVlmlyRsCiMfZl5/OTJzt7Cj3az59m5Syfw/lnS9YP82t/r/ufuKkO5Q +q5a9aI8DuNNmAjR4lpIJNqIpX/y+dG2aGmx4XTc31MR9szWtiTgOHe0MkMupOAL0 +qkHrBgwo1zjuTMf3QOg6Z5Q= +-----END CERTIFICATE----- + +subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3 +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +-----BEGIN CERTIFICATE----- +MIIGFzCCA/+gAwIBAgIUftbnnMmtgcTIGT75XUQodw40ExcwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjExMDYxNDUwMThaFw0y +MjExMDYxNDUwMThaME0xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMSMwIQYDVQQDExpRdW9WYWRpcyBHbG9iYWwgU1NMIElDQSBHMzCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANf8Od17be6c6lTGJDhEXpmkTs4y +Q39Rr5VJyBeWCg06nSS71s6xF3sZvKcV0MbXlXCYM2ZX7cNTbJ81gs7uDsKFp+vK +EymiKyEiI2SImOtECNnSg+RVR4np/xz/UlC0yFUisH75cZsJ8T1pkGMfiEouR0EM +7O0uFgoboRfUP582TTWy0F7ynSA6YfGKnKj0OFwZJmGHVkLs1VevWjhj3R1fsPan +H05P5moePFnpQdj1FofoSxUHZ0c7VB+sUimboHm/uHNY1LOsk77qiSuVC5/yrdg3 +2EEfP/mxJYT4r/5UiD7VahySzeZHzZ2OibQm2AfgfMN3l57lCM3/WPQBhMAPS1jz +kE+7MjajM2f0aZctimW4Hasrj8AQnfAdHqZehbhtXaAlffNEzCdpNK584oCTVR7N +UR9iZFx83ruTqpo+GcLP/iSYqhM4g7fy45sNhU+IS+ca03zbxTl3TTlkofXunI5B +xxE30eGSQpDZ5+iUJcEOAuVKrlYocFbB3KF45hwcbzPWQ1DcO2jFAapOtQzeS+MZ +yZzT2YseJ8hQHKu8YrXZWwKaNfyl8kFkHUBDICowNEoZvBwRCQp8sgqL6YRZy0uD +JGxmnC2e0BVKSjcIvmq/CRWH7yiTk9eWm73xrsg9iIyD/kwJEnLyIk8tR5V8p/hc +1H2AjDrZH12PsZ45AgMBAAGjgfMwgfAwEgYDVR0TAQH/BAgwBgEB/wIBATARBgNV +HSAECjAIMAYGBFUdIAAwOgYIKwYBBQUHAQEELjAsMCoGCCsGAQUFBzABhh5odHRw +Oi8vb2NzcC5xdW92YWRpc2dsb2JhbC5jb20wDgYDVR0PAQH/BAQDAgEGMB8GA1Ud +IwQYMBaAFO3nb3Zav2DsSVvGpXe7chZxm8Q9MDsGA1UdHwQ0MDIwMKAuoCyGKmh0 +dHA6Ly9jcmwucXVvdmFkaXNnbG9iYWwuY29tL3F2cmNhMmczLmNybDAdBgNVHQ4E +FgQUsxKJtalLNbwVAPCA6dh4h/ETfHYwDQYJKoZIhvcNAQELBQADggIBAFGm1Fqp +RMiKr7a6h707M+km36PVXZnX1NZocCn36MrfRvphotbOCDm+GmRkar9ZMGhc8c/A +Vn7JSCjwF9jNOFIOUyNLq0w4luk+Pt2YFDbgF8IDdx53xIo8Gv05e9xpTvQYaIto +qeHbQjGXfSGc91olfX6JUwZlxxbhdJH+rxTFAg0jcbqToJoScWTfXSr1QRcNbSTs +Y4CPG6oULsnhVvrzgldGSK+DxFi2OKcDsOKkV7W4IGg8Do2L/M588AfBnV8ERzpl +qgMBBQxC2+0N6RdFHbmZt0HQE/NIg1s0xcjGx1XW3YTOfje31rmAXKHOehm4Bu48 +gr8gePq5cdQ2W9tA0Dnytb9wzH2SyPPIXRI7yNxaX9H8wYeDeeiKSSmQtfh1v5cV +7RXvm8F6hLJkkco/HOW3dAUwZFcKsUH+1eUJKLN18eDGwB8yGawjHvOKqcfg5Lf/ +TvC7hgcx7pDYaCCaqHaekgUwXbB2Enzqr1fdwoU1c01W5YuQAtAx5wk1bf34Yq/J +ph7wNXGvo88N0/EfP9AdVGmJzy7VuRXeVAOyjKAIeADMlwpjBRhcbs9m3dkqvoMb +SXKJxv/hFmNgEOvOlaFsXX1dbKg1v+C1AzKAFdiuAIa62JzASiEhigqNSdqdTsOh +8W8hdONuKKpe9zKedhBFAvuxhDgKmnySglYc +-----END CERTIFICATE----- diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-4.cert b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-4.cert new file mode 100644 index 000000000..2b82edf6c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-4.cert @@ -0,0 +1,121 @@ +subject=/C=AU/ST=Victoria/L=Melbourne/O=Telstra Corporation Limited/OU=Telstra Energy/CN=prod.energy.inside.telstra.com +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3 +-----BEGIN CERTIFICATE----- +MIIIJDCCBgygAwIBAgIUP9S/56XvOFzWk1vp1+7JJT17brEwDQYJKoZIhvcNAQEL +BQAwTTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxIzAh +BgNVBAMTGlF1b1ZhZGlzIEdsb2JhbCBTU0wgSUNBIEczMB4XDTE5MDgyNzAzMTU1 +NFoXDTIxMDgyNzAzMjUwMFowgZwxCzAJBgNVBAYTAkFVMREwDwYDVQQIDAhWaWN0 +b3JpYTESMBAGA1UEBwwJTWVsYm91cm5lMSQwIgYDVQQKDBtUZWxzdHJhIENvcnBv +cmF0aW9uIExpbWl0ZWQxFzAVBgNVBAsMDlRlbHN0cmEgRW5lcmd5MScwJQYDVQQD +DB5wcm9kLmVuZXJneS5pbnNpZGUudGVsc3RyYS5jb20wggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCrRouNZFOZwM1qyAU6v6ag9fzSx3y8zz36nR8HuqbA +/wqrbMmnpofwdx/9u1bilsHfJzIODv0hm7aGk+neTK3DIapiII3m0HKW0v+GLsl7 +JkDuc2o3XlakcXlA45qDKCZXbXZtY4/kdxKG0OSUZi7oQqohhYl/c/ojrTiey+4G +KhEVqWwOuQ1OC1DRw4qMH54d0koFxxSLPJ8JiiztLlK/e9n8BoJikj5fBqWy5R1F +bGXCdzjcfmPV6iSOzJShpUgj4ga91mO6j3S6LLfK5ibbTlY+pmUxUT+m9nKMon3h +mFptTYo9t9vUF/a/owjRxNLg01fJLNjYn8QV2vQvODGfAgMBAAGjggOqMIIDpjAJ +BgNVHRMEAjAAMB8GA1UdIwQYMBaAFLMSibWpSzW8FQDwgOnYeIfxE3x2MHMGCCsG +AQUFBwEBBGcwZTA3BggrBgEFBQcwAoYraHR0cDovL3RydXN0LnF1b3ZhZGlzZ2xv +YmFsLmNvbS9xdnNzbGczLmNydDAqBggrBgEFBQcwAYYeaHR0cDovL29jc3AucXVv +dmFkaXNnbG9iYWwuY29tMIGjBgNVHREEgZswgZiCHnByb2QuZW5lcmd5Lmluc2lk +ZS50ZWxzdHJhLmNvbYImcmVwb3J0cy5wcm9kLmVuZXJneS5pbnNpZGUudGVsc3Ry +YS5jb22CKGdyZWVuc3luYy5wcm9kLmVuZXJneS5pbnNpZGUudGVsc3RyYS5jb22C +JG5nb3NzLnByb2QuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbTBRBgNVHSAESjBI +MEYGDCsGAQQBvlgAAmQBATA2MDQGCCsGAQUFBwIBFihodHRwOi8vd3d3LnF1b3Zh +ZGlzZ2xvYmFsLmNvbS9yZXBvc2l0b3J5MB0GA1UdJQQWMBQGCCsGAQUFBwMCBggr +BgEFBQcDATA6BgNVHR8EMzAxMC+gLaArhilodHRwOi8vY3JsLnF1b3ZhZGlzZ2xv +YmFsLmNvbS9xdnNzbGczLmNybDAdBgNVHQ4EFgQUoIME5TykVAI8VF5g0zeh0xdv +i3owDgYDVR0PAQH/BAQDAgWgMIIBfgYKKwYBBAHWeQIEAgSCAW4EggFqAWgAdgBW +FAaaL9fC7NP14b1Esj7HRna5vJkRXMDvlJhV1onQ3QAAAWzRG8r0AAAEAwBHMEUC +IQDShuQyYMiy7KKxWOzffolVIcPRgWD7ClNEbIcUATHKyQIgXnTZBXcpcbXBQXLs +tFuvY36TbKIYc2ql2nmdydGQ9wcAdgCkuQmQtBhYFIe7E6LMZ3AKPDWYBPkb37jj +d80OyA3cEAAAAWzRG8sAAAAEAwBHMEUCIGsLEoA9S7pNE3VoNZHxl2IAdeP3Dy2Q +Mk0rM46hp6CRAiEA08rOjswSdcn7qgDEoiyvlcrOTIFJAEcMlxSY65yLVUwAdgBV +gdTCFpA2AUrqC5tXPFPwwOQ4eHAlCBcvo6odBxPTDAAAAWzRG8q7AAAEAwBHMEUC +IAkVCcTFG8MBDI58JKIhMlPbzkdrKnYY3Kp9KqWuTAvMAiEAipeI7RCLBk8+T/p+ +gY7+vtFZxKDthcJMUpZz7qmica0wDQYJKoZIhvcNAQELBQADggIBAESe0U1qArxL +F2uk65q6x6HBcZuSocpceokzcUBv07Kxs6UJU9ybTbl8VYPuC+OUdpvut1kOJCJm +1TRrr5KMh+9as42xkbKRZnh5TQt7aHmVcLHLfA4x0UrELfNX3fVTDxwDAPAhE5oM +0w+d1foLakh7dXKKSxobEI3KRwFp19iuZeIqwI8XMWMr9ajhTC0T7D2QvKotpNBS +sNDHiIE3IXoa9o7UiOG8IfW0wAt7CEygv0F7ctHRTcQSP/SJIGYOUZ7uotULVL5i +elG31Y83Jx3sPNCy4IZfCip6Gw7MgsN2CZGApqi49edSqDWyRIfmCeXtMc7XI7Md +kqqWxbqGGTdYJCucoGqahqRR+BI9anEqTD9T5Gy0TpCi2pgp1i7czza71nfz0PcN +R0pw/1lqb9AqmJ2XELpBpo82B9XGple9thpincai7jPk3ezY5eEvDTmkHRlUFCp8 +8M66Ga19hZTgnHPWDKZYZzuZ7Lcl2WbapFOYYHJggSpBRy4GkH6eTSkUB9G9k8vU +gbvtS7sR5ggecbCBu0M4TWYmnUojR8UXtr0oOTlXysTHVGs5Tx9ChhOLyUqhX8tM +1zSDT8JJvbbw4RqpGzBKTNaO5nxRLgKVQOQdM8f1kjMr9/U58Lc4UiaTkJM14VfK +8GfV8+K/vRCBtME53ILvm1l18jtakG3c +-----END CERTIFICATE----- + +subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3 +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +-----BEGIN CERTIFICATE----- +MIIGFzCCA/+gAwIBAgIUftbnnMmtgcTIGT75XUQodw40ExcwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjExMDYxNDUwMThaFw0y +MjExMDYxNDUwMThaME0xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMSMwIQYDVQQDExpRdW9WYWRpcyBHbG9iYWwgU1NMIElDQSBHMzCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANf8Od17be6c6lTGJDhEXpmkTs4y +Q39Rr5VJyBeWCg06nSS71s6xF3sZvKcV0MbXlXCYM2ZX7cNTbJ81gs7uDsKFp+vK +EymiKyEiI2SImOtECNnSg+RVR4np/xz/UlC0yFUisH75cZsJ8T1pkGMfiEouR0EM +7O0uFgoboRfUP582TTWy0F7ynSA6YfGKnKj0OFwZJmGHVkLs1VevWjhj3R1fsPan +H05P5moePFnpQdj1FofoSxUHZ0c7VB+sUimboHm/uHNY1LOsk77qiSuVC5/yrdg3 +2EEfP/mxJYT4r/5UiD7VahySzeZHzZ2OibQm2AfgfMN3l57lCM3/WPQBhMAPS1jz +kE+7MjajM2f0aZctimW4Hasrj8AQnfAdHqZehbhtXaAlffNEzCdpNK584oCTVR7N +UR9iZFx83ruTqpo+GcLP/iSYqhM4g7fy45sNhU+IS+ca03zbxTl3TTlkofXunI5B +xxE30eGSQpDZ5+iUJcEOAuVKrlYocFbB3KF45hwcbzPWQ1DcO2jFAapOtQzeS+MZ +yZzT2YseJ8hQHKu8YrXZWwKaNfyl8kFkHUBDICowNEoZvBwRCQp8sgqL6YRZy0uD +JGxmnC2e0BVKSjcIvmq/CRWH7yiTk9eWm73xrsg9iIyD/kwJEnLyIk8tR5V8p/hc +1H2AjDrZH12PsZ45AgMBAAGjgfMwgfAwEgYDVR0TAQH/BAgwBgEB/wIBATARBgNV +HSAECjAIMAYGBFUdIAAwOgYIKwYBBQUHAQEELjAsMCoGCCsGAQUFBzABhh5odHRw +Oi8vb2NzcC5xdW92YWRpc2dsb2JhbC5jb20wDgYDVR0PAQH/BAQDAgEGMB8GA1Ud +IwQYMBaAFO3nb3Zav2DsSVvGpXe7chZxm8Q9MDsGA1UdHwQ0MDIwMKAuoCyGKmh0 +dHA6Ly9jcmwucXVvdmFkaXNnbG9iYWwuY29tL3F2cmNhMmczLmNybDAdBgNVHQ4E +FgQUsxKJtalLNbwVAPCA6dh4h/ETfHYwDQYJKoZIhvcNAQELBQADggIBAFGm1Fqp +RMiKr7a6h707M+km36PVXZnX1NZocCn36MrfRvphotbOCDm+GmRkar9ZMGhc8c/A +Vn7JSCjwF9jNOFIOUyNLq0w4luk+Pt2YFDbgF8IDdx53xIo8Gv05e9xpTvQYaIto +qeHbQjGXfSGc91olfX6JUwZlxxbhdJH+rxTFAg0jcbqToJoScWTfXSr1QRcNbSTs +Y4CPG6oULsnhVvrzgldGSK+DxFi2OKcDsOKkV7W4IGg8Do2L/M588AfBnV8ERzpl +qgMBBQxC2+0N6RdFHbmZt0HQE/NIg1s0xcjGx1XW3YTOfje31rmAXKHOehm4Bu48 +gr8gePq5cdQ2W9tA0Dnytb9wzH2SyPPIXRI7yNxaX9H8wYeDeeiKSSmQtfh1v5cV +7RXvm8F6hLJkkco/HOW3dAUwZFcKsUH+1eUJKLN18eDGwB8yGawjHvOKqcfg5Lf/ +TvC7hgcx7pDYaCCaqHaekgUwXbB2Enzqr1fdwoU1c01W5YuQAtAx5wk1bf34Yq/J +ph7wNXGvo88N0/EfP9AdVGmJzy7VuRXeVAOyjKAIeADMlwpjBRhcbs9m3dkqvoMb +SXKJxv/hFmNgEOvOlaFsXX1dbKg1v+C1AzKAFdiuAIa62JzASiEhigqNSdqdTsOh +8W8hdONuKKpe9zKedhBFAvuxhDgKmnySglYc +-----END CERTIFICATE----- + +subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/simple-chain-a.cert b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/simple-chain-a.cert new file mode 100644 index 000000000..1d9bbe213 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/simple-chain-a.cert @@ -0,0 +1,18 @@ +subject=/C=AU/ST=Victoria/L=Melbourne/O=Telstra Corporation Limited/OU=Telstra Energy/CN=dev.energy.inside.telstra.com +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3 +-----BEGIN CERTIFICATE----- +aaa +-----END CERTIFICATE----- + +subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3 +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +-----BEGIN CERTIFICATE----- +bbb +-----END CERTIFICATE----- + +subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +-----BEGIN CERTIFICATE----- +ccc +-----END CERTIFICATE----- + diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/simple-chain-b.cert b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/simple-chain-b.cert new file mode 100644 index 000000000..1d9bbe213 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/simple-chain-b.cert @@ -0,0 +1,18 @@ +subject=/C=AU/ST=Victoria/L=Melbourne/O=Telstra Corporation Limited/OU=Telstra Energy/CN=dev.energy.inside.telstra.com +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3 +-----BEGIN CERTIFICATE----- +aaa +-----END CERTIFICATE----- + +subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3 +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +-----BEGIN CERTIFICATE----- +bbb +-----END CERTIFICATE----- + +subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3 +-----BEGIN CERTIFICATE----- +ccc +-----END CERTIFICATE----- + diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/thezip.zip b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/thezip.zip new file mode 100644 index 000000000..6eaefdd5e Binary files /dev/null and b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/thezip.zip differ diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/.gitkeep b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.CreateStack_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.CreateStack_1.json new file mode 100644 index 000000000..36f1489ba --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.CreateStack_1.json @@ -0,0 +1,17 @@ +{ + "status_code": 200, + "data": { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "03fbfc36-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "03fbfc36-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:07 GMT", + "content-length": "393", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DeleteStack_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DeleteStack_1.json new file mode 100644 index 000000000..d526155a5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DeleteStack_1.json @@ -0,0 +1,16 @@ +{ + "status_code": 200, + "data": { + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "170d1e02-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "170d1e02-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:39 GMT", + "content-length": "212", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_1.json new file mode 100644 index 000000000..3758c77b7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_1.json @@ -0,0 +1,38 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "User Initiated", + "StackName": "ansible-test-basic-yaml", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "LogicalResourceId": "ansible-test-basic-yaml" + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "043d4a05-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "043d4a05-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:08 GMT", + "content-length": "1183", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_2.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_2.json new file mode 100644 index 000000000..2c5a7655e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_2.json @@ -0,0 +1,80 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 12, + "microsecond": 754000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "Resource creation Initiated", + "StackName": "ansible-test-basic-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 11, + "microsecond": 159000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-basic-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "User Initiated", + "StackName": "ansible-test-basic-yaml", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "LogicalResourceId": "ansible-test-basic-yaml" + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "075d9d71-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "075d9d71-b5d0-11e7-ae09-550cfe4b2358", + "vary": "Accept-Encoding", + "content-length": "2730", + "content-type": "text/xml", + "date": "Fri, 20 Oct 2017 19:51:13 GMT" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_3.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_3.json new file mode 100644 index 000000000..cf2c24502 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_3.json @@ -0,0 +1,80 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 12, + "microsecond": 754000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "Resource creation Initiated", + "StackName": "ansible-test-basic-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 11, + "microsecond": 159000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-basic-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "User Initiated", + "StackName": "ansible-test-basic-yaml", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "LogicalResourceId": "ansible-test-basic-yaml" + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "0a7eb31b-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "0a7eb31b-b5d0-11e7-ae09-550cfe4b2358", + "vary": "Accept-Encoding", + "content-length": "2730", + "content-type": "text/xml", + "date": "Fri, 20 Oct 2017 19:51:19 GMT" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_4.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_4.json new file mode 100644 index 000000000..32ee9c1c5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_4.json @@ -0,0 +1,80 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 12, + "microsecond": 754000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "Resource creation Initiated", + "StackName": "ansible-test-basic-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 11, + "microsecond": 159000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-basic-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "User Initiated", + "StackName": "ansible-test-basic-yaml", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "LogicalResourceId": "ansible-test-basic-yaml" + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "0d9e1c06-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "0d9e1c06-b5d0-11e7-ae09-550cfe4b2358", + "vary": "Accept-Encoding", + "content-length": "2730", + "content-type": "text/xml", + "date": "Fri, 20 Oct 2017 19:51:24 GMT" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_5.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_5.json new file mode 100644 index 000000000..b547cd4d8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_5.json @@ -0,0 +1,80 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 12, + "microsecond": 754000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "Resource creation Initiated", + "StackName": "ansible-test-basic-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 11, + "microsecond": 159000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-basic-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "User Initiated", + "StackName": "ansible-test-basic-yaml", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "LogicalResourceId": "ansible-test-basic-yaml" + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "10bd84ca-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "10bd84ca-b5d0-11e7-ae09-550cfe4b2358", + "vary": "Accept-Encoding", + "content-length": "2730", + "content-type": "text/xml", + "date": "Fri, 20 Oct 2017 19:51:29 GMT" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_6.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_6.json new file mode 100644 index 000000000..15bd043ab --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_6.json @@ -0,0 +1,100 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_COMPLETE-2017-10-20T19:51:33.200Z", + "ResourceStatus": "CREATE_COMPLETE", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 33, + "microsecond": 200000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-basic-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 12, + "microsecond": 754000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "Resource creation Initiated", + "StackName": "ansible-test-basic-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 11, + "microsecond": 159000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-basic-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "User Initiated", + "StackName": "ansible-test-basic-yaml", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "LogicalResourceId": "ansible-test-basic-yaml" + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "13dbb3fd-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "13dbb3fd-b5d0-11e7-ae09-550cfe4b2358", + "vary": "Accept-Encoding", + "content-length": "3490", + "content-type": "text/xml", + "date": "Fri, 20 Oct 2017 19:51:34 GMT" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_7.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_7.json new file mode 100644 index 000000000..87db7c59e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_7.json @@ -0,0 +1,119 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "140d7220-b5d0-11e7-933f-50a686be7356", + "ResourceStatus": "CREATE_COMPLETE", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 35, + "microsecond": 121000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-basic-yaml", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "LogicalResourceId": "ansible-test-basic-yaml" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_COMPLETE-2017-10-20T19:51:33.200Z", + "ResourceStatus": "CREATE_COMPLETE", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 33, + "microsecond": 200000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-basic-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 12, + "microsecond": 754000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "Resource creation Initiated", + "StackName": "ansible-test-basic-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 11, + "microsecond": 159000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-basic-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "User Initiated", + "StackName": "ansible-test-basic-yaml", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "LogicalResourceId": "ansible-test-basic-yaml" + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "16faf590-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "16faf590-b5d0-11e7-ae09-550cfe4b2358", + "vary": "Accept-Encoding", + "content-length": "4276", + "content-type": "text/xml", + "date": "Fri, 20 Oct 2017 19:51:39 GMT" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_1.json new file mode 100644 index 000000000..7acdb3acf --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_1.json @@ -0,0 +1,40 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EnableTerminationProtection": false, + "Description": "Basic template that creates an S3 bucket", + "Tags": [], + "StackStatusReason": "User Initiated", + "CreationTime": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-basic-yaml", + "NotificationARNs": [], + "StackStatus": "CREATE_IN_PROGRESS", + "DisableRollback": false, + "RollbackConfiguration": {} + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "042974db-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "042974db-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:08 GMT", + "content-length": "975", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_2.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_2.json new file mode 100644 index 000000000..0ed674b20 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_2.json @@ -0,0 +1,39 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "Description": "Basic template that creates an S3 bucket", + "Tags": [], + "EnableTerminationProtection": false, + "CreationTime": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-basic-yaml", + "NotificationARNs": [], + "StackStatus": "CREATE_IN_PROGRESS", + "DisableRollback": false, + "RollbackConfiguration": {} + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "074b26dc-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "074b26dc-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:13 GMT", + "content-length": "913", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_3.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_3.json new file mode 100644 index 000000000..633c5e159 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_3.json @@ -0,0 +1,39 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "Description": "Basic template that creates an S3 bucket", + "Tags": [], + "EnableTerminationProtection": false, + "CreationTime": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-basic-yaml", + "NotificationARNs": [], + "StackStatus": "CREATE_IN_PROGRESS", + "DisableRollback": false, + "RollbackConfiguration": {} + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "0a6cb1b3-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "0a6cb1b3-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:18 GMT", + "content-length": "913", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_4.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_4.json new file mode 100644 index 000000000..e5ca69dda --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_4.json @@ -0,0 +1,39 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "Description": "Basic template that creates an S3 bucket", + "Tags": [], + "EnableTerminationProtection": false, + "CreationTime": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-basic-yaml", + "NotificationARNs": [], + "StackStatus": "CREATE_IN_PROGRESS", + "DisableRollback": false, + "RollbackConfiguration": {} + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "0d8cddf1-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "0d8cddf1-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:23 GMT", + "content-length": "913", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_5.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_5.json new file mode 100644 index 000000000..31a3057cd --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_5.json @@ -0,0 +1,39 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "Description": "Basic template that creates an S3 bucket", + "Tags": [], + "EnableTerminationProtection": false, + "CreationTime": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-basic-yaml", + "NotificationARNs": [], + "StackStatus": "CREATE_IN_PROGRESS", + "DisableRollback": false, + "RollbackConfiguration": {} + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "10ac94d5-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "10ac94d5-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:28 GMT", + "content-length": "913", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_6.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_6.json new file mode 100644 index 000000000..90ca7467c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_6.json @@ -0,0 +1,39 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "Description": "Basic template that creates an S3 bucket", + "Tags": [], + "EnableTerminationProtection": false, + "CreationTime": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-basic-yaml", + "NotificationARNs": [], + "StackStatus": "CREATE_IN_PROGRESS", + "DisableRollback": false, + "RollbackConfiguration": {} + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "13caeb1b-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "13caeb1b-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:33 GMT", + "content-length": "913", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_7.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_7.json new file mode 100644 index 000000000..905c04f48 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_7.json @@ -0,0 +1,45 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "Description": "Basic template that creates an S3 bucket", + "Tags": [], + "Outputs": [ + { + "OutputKey": "TheName", + "OutputValue": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4" + } + ], + "EnableTerminationProtection": false, + "CreationTime": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-basic-yaml", + "NotificationARNs": [], + "StackStatus": "CREATE_COMPLETE", + "DisableRollback": false, + "RollbackConfiguration": {} + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "16ea53bb-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "16ea53bb-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:39 GMT", + "content-length": "1115", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.CreateStack_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.CreateStack_1.json new file mode 100644 index 000000000..9084936a4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.CreateStack_1.json @@ -0,0 +1,17 @@ +{ + "status_code": 200, + "data": { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "03fbfc36-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "03fbfc36-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:07 GMT", + "content-length": "393", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DeleteStack_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DeleteStack_1.json new file mode 100644 index 000000000..d526155a5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DeleteStack_1.json @@ -0,0 +1,16 @@ +{ + "status_code": 200, + "data": { + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "170d1e02-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "170d1e02-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:39 GMT", + "content-length": "212", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_1.json new file mode 100644 index 000000000..399eab496 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_1.json @@ -0,0 +1,39 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "User Initiated", + "StackName": "ansible-test-client-request-token-yaml", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "ansible-test-client-request-token-yaml" + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "043d4a05-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "043d4a05-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:08 GMT", + "content-length": "1183", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_2.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_2.json new file mode 100644 index 000000000..f57dbf536 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_2.json @@ -0,0 +1,83 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 12, + "microsecond": 754000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "Resource creation Initiated", + "StackName": "ansible-test-client-request-token-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 11, + "microsecond": 159000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-client-request-token-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "User Initiated", + "StackName": "ansible-test-client-request-token-yaml", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "ansible-test-client-request-token-yaml" + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "075d9d71-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "075d9d71-b5d0-11e7-ae09-550cfe4b2358", + "vary": "Accept-Encoding", + "content-length": "2730", + "content-type": "text/xml", + "date": "Fri, 20 Oct 2017 19:51:13 GMT" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_3.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_3.json new file mode 100644 index 000000000..c8b4d694d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_3.json @@ -0,0 +1,83 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 12, + "microsecond": 754000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "Resource creation Initiated", + "StackName": "ansible-test-client-request-token-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 11, + "microsecond": 159000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-client-request-token-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "User Initiated", + "StackName": "ansible-test-client-request-token-yaml", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "ansible-test-client-request-token-yaml" + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "0a7eb31b-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "0a7eb31b-b5d0-11e7-ae09-550cfe4b2358", + "vary": "Accept-Encoding", + "content-length": "2730", + "content-type": "text/xml", + "date": "Fri, 20 Oct 2017 19:51:19 GMT" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_4.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_4.json new file mode 100644 index 000000000..8bb03eded --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_4.json @@ -0,0 +1,83 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 12, + "microsecond": 754000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "Resource creation Initiated", + "StackName": "ansible-test-client-request-token-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 11, + "microsecond": 159000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-client-request-token-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "User Initiated", + "StackName": "ansible-test-client-request-token-yaml", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "ansible-test-client-request-token-yaml" + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "0d9e1c06-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "0d9e1c06-b5d0-11e7-ae09-550cfe4b2358", + "vary": "Accept-Encoding", + "content-length": "2730", + "content-type": "text/xml", + "date": "Fri, 20 Oct 2017 19:51:24 GMT" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_5.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_5.json new file mode 100644 index 000000000..311949d08 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_5.json @@ -0,0 +1,83 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 12, + "microsecond": 754000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "Resource creation Initiated", + "StackName": "ansible-test-client-request-token-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 11, + "microsecond": 159000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-client-request-token-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "User Initiated", + "StackName": "ansible-test-client-request-token-yaml", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "ansible-test-client-request-token-yaml" + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "10bd84ca-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "10bd84ca-b5d0-11e7-ae09-550cfe4b2358", + "vary": "Accept-Encoding", + "content-length": "2730", + "content-type": "text/xml", + "date": "Fri, 20 Oct 2017 19:51:29 GMT" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_6.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_6.json new file mode 100644 index 000000000..ddab94a51 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_6.json @@ -0,0 +1,104 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_COMPLETE-2017-10-20T19:51:33.200Z", + "ResourceStatus": "CREATE_COMPLETE", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 33, + "microsecond": 200000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-client-request-token-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 12, + "microsecond": 754000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "Resource creation Initiated", + "StackName": "ansible-test-client-request-token-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 11, + "microsecond": 159000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-client-request-token-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "User Initiated", + "StackName": "ansible-test-client-request-token-yaml", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "ansible-test-client-request-token-yaml" + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "13dbb3fd-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "13dbb3fd-b5d0-11e7-ae09-550cfe4b2358", + "vary": "Accept-Encoding", + "content-length": "3490", + "content-type": "text/xml", + "date": "Fri, 20 Oct 2017 19:51:34 GMT" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_7.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_7.json new file mode 100644 index 000000000..86da5fb45 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_7.json @@ -0,0 +1,124 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "140d7220-b5d0-11e7-933f-50a686be7356", + "ResourceStatus": "CREATE_COMPLETE", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 35, + "microsecond": 121000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-client-request-token-yaml", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "ansible-test-client-request-token-yaml" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_COMPLETE-2017-10-20T19:51:33.200Z", + "ResourceStatus": "CREATE_COMPLETE", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 33, + "microsecond": 200000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-client-request-token-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 12, + "microsecond": 754000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "Resource creation Initiated", + "StackName": "ansible-test-client-request-token-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::S3::Bucket", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 11, + "microsecond": 159000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-client-request-token-yaml", + "ResourceProperties": "{}\n", + "PhysicalResourceId": "", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "MyBucket" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5", + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "ResourceStatusReason": "User Initiated", + "StackName": "ansible-test-client-request-token-yaml", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", + "LogicalResourceId": "ansible-test-client-request-token-yaml" + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "16faf590-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "16faf590-b5d0-11e7-ae09-550cfe4b2358", + "vary": "Accept-Encoding", + "content-length": "4276", + "content-type": "text/xml", + "date": "Fri, 20 Oct 2017 19:51:39 GMT" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_1.json new file mode 100644 index 000000000..7734b0ca3 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_1.json @@ -0,0 +1,40 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "EnableTerminationProtection": false, + "Description": "Basic template that creates an S3 bucket", + "Tags": [], + "StackStatusReason": "User Initiated", + "CreationTime": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-client-request-token-yaml", + "NotificationARNs": [], + "StackStatus": "CREATE_IN_PROGRESS", + "DisableRollback": false, + "RollbackConfiguration": {} + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "042974db-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "042974db-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:08 GMT", + "content-length": "975", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_2.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_2.json new file mode 100644 index 000000000..0a1e74d70 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_2.json @@ -0,0 +1,39 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "Description": "Basic template that creates an S3 bucket", + "Tags": [], + "EnableTerminationProtection": false, + "CreationTime": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-client-request-token-yaml", + "NotificationARNs": [], + "StackStatus": "CREATE_IN_PROGRESS", + "DisableRollback": false, + "RollbackConfiguration": {} + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "074b26dc-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "074b26dc-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:13 GMT", + "content-length": "913", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_3.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_3.json new file mode 100644 index 000000000..12d5839f8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_3.json @@ -0,0 +1,39 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "Description": "Basic template that creates an S3 bucket", + "Tags": [], + "EnableTerminationProtection": false, + "CreationTime": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-client-request-token-yaml", + "NotificationARNs": [], + "StackStatus": "CREATE_IN_PROGRESS", + "DisableRollback": false, + "RollbackConfiguration": {} + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "0a6cb1b3-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "0a6cb1b3-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:18 GMT", + "content-length": "913", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_4.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_4.json new file mode 100644 index 000000000..a3cb0a8ca --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_4.json @@ -0,0 +1,39 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "Description": "Basic template that creates an S3 bucket", + "Tags": [], + "EnableTerminationProtection": false, + "CreationTime": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-client-request-token-yaml", + "NotificationARNs": [], + "StackStatus": "CREATE_IN_PROGRESS", + "DisableRollback": false, + "RollbackConfiguration": {} + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "0d8cddf1-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "0d8cddf1-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:23 GMT", + "content-length": "913", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_5.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_5.json new file mode 100644 index 000000000..251d71fa1 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_5.json @@ -0,0 +1,39 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "Description": "Basic template that creates an S3 bucket", + "Tags": [], + "EnableTerminationProtection": false, + "CreationTime": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-client-request-token-yaml", + "NotificationARNs": [], + "StackStatus": "CREATE_IN_PROGRESS", + "DisableRollback": false, + "RollbackConfiguration": {} + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "10ac94d5-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "10ac94d5-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:28 GMT", + "content-length": "913", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_6.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_6.json new file mode 100644 index 000000000..2251125f6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_6.json @@ -0,0 +1,39 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "Description": "Basic template that creates an S3 bucket", + "Tags": [], + "EnableTerminationProtection": false, + "CreationTime": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-client-request-token-yaml", + "NotificationARNs": [], + "StackStatus": "CREATE_IN_PROGRESS", + "DisableRollback": false, + "RollbackConfiguration": {} + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "13caeb1b-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "13caeb1b-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:33 GMT", + "content-length": "913", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_7.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_7.json new file mode 100644 index 000000000..aa8c7fd09 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_7.json @@ -0,0 +1,45 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5", + "Description": "Basic template that creates an S3 bucket", + "Tags": [], + "Outputs": [ + { + "OutputKey": "TheName", + "OutputValue": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4" + } + ], + "EnableTerminationProtection": false, + "CreationTime": { + "hour": 19, + "__class__": "datetime", + "month": 10, + "second": 8, + "microsecond": 324000, + "year": 2017, + "day": 20, + "minute": 51 + }, + "StackName": "ansible-test-client-request-token-yaml", + "NotificationARNs": [], + "StackStatus": "CREATE_COMPLETE", + "DisableRollback": false, + "RollbackConfiguration": {} + } + ], + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 200, + "RequestId": "16ea53bb-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "16ea53bb-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:39 GMT", + "content-length": "1115", + "content-type": "text/xml" + } + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_1.json new file mode 100644 index 000000000..109feacd9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_1.json @@ -0,0 +1,22 @@ +{ + "status_code": 400, + "data": { + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 400, + "RequestId": "179d9e46-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "179d9e46-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:40 GMT", + "content-length": "301", + "content-type": "text/xml", + "connection": "close" + } + }, + "Error": { + "Message": "Stack [ansible-test-nonexist] does not exist", + "Code": "ValidationError", + "Type": "Sender" + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_2.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_2.json new file mode 100644 index 000000000..589f92cc6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_2.json @@ -0,0 +1,22 @@ +{ + "status_code": 400, + "data": { + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 400, + "RequestId": "17d80f44-b5d0-11e7-80c4-9f499f779cdb", + "HTTPHeaders": { + "x-amzn-requestid": "17d80f44-b5d0-11e7-80c4-9f499f779cdb", + "date": "Fri, 20 Oct 2017 19:51:40 GMT", + "content-length": "301", + "content-type": "text/xml", + "connection": "close" + } + }, + "Error": { + "Message": "Stack [ansible-test-nonexist] does not exist", + "Code": "ValidationError", + "Type": "Sender" + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStacks_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStacks_1.json new file mode 100644 index 000000000..ea227415c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStacks_1.json @@ -0,0 +1,22 @@ +{ + "status_code": 400, + "data": { + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 400, + "RequestId": "175fab26-b5d0-11e7-9d9b-45815c77100a", + "HTTPHeaders": { + "x-amzn-requestid": "175fab26-b5d0-11e7-9d9b-45815c77100a", + "date": "Fri, 20 Oct 2017 19:51:40 GMT", + "content-length": "307", + "content-type": "text/xml", + "connection": "close" + } + }, + "Error": { + "Message": "Stack with id ansible-test-nonexist does not exist", + "Code": "ValidationError", + "Type": "Sender" + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/get_nonexistent_stack/cloudformation.DescribeStacks_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/get_nonexistent_stack/cloudformation.DescribeStacks_1.json new file mode 100644 index 000000000..cf29c6c76 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/get_nonexistent_stack/cloudformation.DescribeStacks_1.json @@ -0,0 +1,22 @@ +{ + "status_code": 400, + "data": { + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 400, + "RequestId": "181566c8-b5d0-11e7-9d9b-45815c77100a", + "HTTPHeaders": { + "x-amzn-requestid": "181566c8-b5d0-11e7-9d9b-45815c77100a", + "date": "Fri, 20 Oct 2017 19:51:41 GMT", + "content-length": "307", + "content-type": "text/xml", + "connection": "close" + } + }, + "Error": { + "Message": "Stack with id ansible-test-nonexist does not exist", + "Code": "ValidationError", + "Type": "Sender" + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/invalid_template_json/cloudformation.CreateStack_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/invalid_template_json/cloudformation.CreateStack_1.json new file mode 100644 index 000000000..7ad6cac96 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/invalid_template_json/cloudformation.CreateStack_1.json @@ -0,0 +1,22 @@ +{ + "status_code": 400, + "data": { + "ResponseMetadata": { + "RetryAttempts": 0, + "HTTPStatusCode": 400, + "RequestId": "03b1107f-b5d0-11e7-ae09-550cfe4b2358", + "HTTPHeaders": { + "x-amzn-requestid": "03b1107f-b5d0-11e7-ae09-550cfe4b2358", + "date": "Fri, 20 Oct 2017 19:51:07 GMT", + "content-length": "320", + "content-type": "text/xml", + "connection": "close" + } + }, + "Error": { + "Message": "Template format error: JSON not well-formed. (line 4, column 4)", + "Code": "ValidationError", + "Type": "Sender" + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.CreateStack_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.CreateStack_1.json new file mode 100644 index 000000000..64c8e1f23 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.CreateStack_1.json @@ -0,0 +1,17 @@ +{ + "status_code": 200, + "data": { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "ResponseMetadata": { + "RequestId": "c741ebcd-3a0e-11e9-b25f-d1217e6893bf", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "c741ebcd-3a0e-11e9-b25f-d1217e6893bf", + "content-type": "text/xml", + "content-length": "407", + "date": "Tue, 26 Feb 2019 21:37:55 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_1.json new file mode 100644 index 000000000..7a6a49644 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_1.json @@ -0,0 +1,38 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "c74b1310-3a0e-11e9-9a48-067794494828", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ansible-test-on-create-failure-delete", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 37, + "second": 55, + "microsecond": 909000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "User Initiated" + } + ], + "ResponseMetadata": { + "RequestId": "c7b0b337-3a0e-11e9-b25f-d1217e6893bf", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "c7b0b337-3a0e-11e9-b25f-d1217e6893bf", + "content-type": "text/xml", + "content-length": "1153", + "date": "Tue, 26 Feb 2019 21:37:56 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_2.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_2.json new file mode 100644 index 000000000..6218ed8b8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_2.json @@ -0,0 +1,101 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "ECRRepo-CREATE_FAILED-2019-02-26T21:38:01.107Z", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 1, + "microsecond": 107000 + }, + "ResourceStatus": "CREATE_FAILED", + "ResourceStatusReason": "Invalid parameter at 'PolicyText' failed to satisfy constraint: 'Invalid repository policy provided' (Service: AmazonECR; Status Code: 400; Error Code: InvalidParameterException; Request ID: ca5769ae-3a0e-11e9-a183-3f277586a4cb)", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:38:00.657Z", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 0, + "microsecond": 657000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "Resource creation Initiated", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:38:00.221Z", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 0, + "microsecond": 221000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "c74b1310-3a0e-11e9-9a48-067794494828", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ansible-test-on-create-failure-delete", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 37, + "second": 55, + "microsecond": 909000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "User Initiated" + } + ], + "ResponseMetadata": { + "RequestId": "caf667e9-3a0e-11e9-b25f-d1217e6893bf", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "caf667e9-3a0e-11e9-b25f-d1217e6893bf", + "content-type": "text/xml", + "content-length": "4312", + "vary": "Accept-Encoding", + "date": "Tue, 26 Feb 2019 21:38:01 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_3.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_3.json new file mode 100644 index 000000000..cde6beb8e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_3.json @@ -0,0 +1,121 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "cafc8250-3a0e-11e9-86c5-02035744c0fa", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ansible-test-on-create-failure-delete", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 2, + "microsecond": 76000 + }, + "ResourceStatus": "DELETE_IN_PROGRESS", + "ResourceStatusReason": "The following resource(s) failed to create: [ECRRepo]. . Delete requested by user." + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "ECRRepo-CREATE_FAILED-2019-02-26T21:38:01.107Z", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 1, + "microsecond": 107000 + }, + "ResourceStatus": "CREATE_FAILED", + "ResourceStatusReason": "Invalid parameter at 'PolicyText' failed to satisfy constraint: 'Invalid repository policy provided' (Service: AmazonECR; Status Code: 400; Error Code: InvalidParameterException; Request ID: ca5769ae-3a0e-11e9-a183-3f277586a4cb)", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:38:00.657Z", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 0, + "microsecond": 657000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "Resource creation Initiated", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:38:00.221Z", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 0, + "microsecond": 221000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "c74b1310-3a0e-11e9-9a48-067794494828", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ansible-test-on-create-failure-delete", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 37, + "second": 55, + "microsecond": 909000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "User Initiated" + } + ], + "ResponseMetadata": { + "RequestId": "ce498af1-3a0e-11e9-b25f-d1217e6893bf", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "ce498af1-3a0e-11e9-b25f-d1217e6893bf", + "content-type": "text/xml", + "content-length": "5207", + "vary": "Accept-Encoding", + "date": "Tue, 26 Feb 2019 21:38:06 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_4.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_4.json new file mode 100644 index 000000000..4f35d6ddc --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_4.json @@ -0,0 +1,180 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "d19c8600-3a0e-11e9-a4ba-0a3524ef8042", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ansible-test-on-create-failure-delete", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 13, + "microsecond": 177000 + }, + "ResourceStatus": "DELETE_COMPLETE" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "ECRRepo-DELETE_COMPLETE-2019-02-26T21:38:12.486Z", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 12, + "microsecond": 486000 + }, + "ResourceStatus": "DELETE_COMPLETE", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "ECRRepo-DELETE_IN_PROGRESS-2019-02-26T21:38:12.139Z", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 12, + "microsecond": 139000 + }, + "ResourceStatus": "DELETE_IN_PROGRESS", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "cafc8250-3a0e-11e9-86c5-02035744c0fa", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ansible-test-on-create-failure-delete", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 2, + "microsecond": 76000 + }, + "ResourceStatus": "DELETE_IN_PROGRESS", + "ResourceStatusReason": "The following resource(s) failed to create: [ECRRepo]. . Delete requested by user." + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "ECRRepo-CREATE_FAILED-2019-02-26T21:38:01.107Z", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 1, + "microsecond": 107000 + }, + "ResourceStatus": "CREATE_FAILED", + "ResourceStatusReason": "Invalid parameter at 'PolicyText' failed to satisfy constraint: 'Invalid repository policy provided' (Service: AmazonECR; Status Code: 400; Error Code: InvalidParameterException; Request ID: ca5769ae-3a0e-11e9-a183-3f277586a4cb)", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:38:00.657Z", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 0, + "microsecond": 657000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "Resource creation Initiated", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:38:00.221Z", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 0, + "microsecond": 221000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "c74b1310-3a0e-11e9-9a48-067794494828", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ansible-test-on-create-failure-delete", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 37, + "second": 55, + "microsecond": 909000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "User Initiated" + } + ], + "ResponseMetadata": { + "RequestId": "d19fbb1b-3a0e-11e9-b25f-d1217e6893bf", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "d19fbb1b-3a0e-11e9-b25f-d1217e6893bf", + "content-type": "text/xml", + "content-length": "7857", + "vary": "Accept-Encoding", + "date": "Tue, 26 Feb 2019 21:38:12 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_5.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_5.json new file mode 100644 index 000000000..68a743f89 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_5.json @@ -0,0 +1,180 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "d19c8600-3a0e-11e9-a4ba-0a3524ef8042", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ansible-test-on-create-failure-delete", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 13, + "microsecond": 177000 + }, + "ResourceStatus": "DELETE_COMPLETE" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "ECRRepo-DELETE_COMPLETE-2019-02-26T21:38:12.486Z", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 12, + "microsecond": 486000 + }, + "ResourceStatus": "DELETE_COMPLETE", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "ECRRepo-DELETE_IN_PROGRESS-2019-02-26T21:38:12.139Z", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 12, + "microsecond": 139000 + }, + "ResourceStatus": "DELETE_IN_PROGRESS", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "cafc8250-3a0e-11e9-86c5-02035744c0fa", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ansible-test-on-create-failure-delete", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 2, + "microsecond": 76000 + }, + "ResourceStatus": "DELETE_IN_PROGRESS", + "ResourceStatusReason": "The following resource(s) failed to create: [ECRRepo]. . Delete requested by user." + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "ECRRepo-CREATE_FAILED-2019-02-26T21:38:01.107Z", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 1, + "microsecond": 107000 + }, + "ResourceStatus": "CREATE_FAILED", + "ResourceStatusReason": "Invalid parameter at 'PolicyText' failed to satisfy constraint: 'Invalid repository policy provided' (Service: AmazonECR; Status Code: 400; Error Code: InvalidParameterException; Request ID: ca5769ae-3a0e-11e9-a183-3f277586a4cb)", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:38:00.657Z", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 0, + "microsecond": 657000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "Resource creation Initiated", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:38:00.221Z", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 0, + "microsecond": 221000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "EventId": "c74b1310-3a0e-11e9-9a48-067794494828", + "StackName": "ansible-test-on-create-failure-delete", + "LogicalResourceId": "ansible-test-on-create-failure-delete", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 37, + "second": 55, + "microsecond": 909000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "User Initiated" + } + ], + "ResponseMetadata": { + "RequestId": "d4fbddab-3a0e-11e9-b25f-d1217e6893bf", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "d4fbddab-3a0e-11e9-b25f-d1217e6893bf", + "content-type": "text/xml", + "content-length": "7857", + "vary": "Accept-Encoding", + "date": "Tue, 26 Feb 2019 21:38:18 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_1.json new file mode 100644 index 000000000..cf5f86acb --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_1.json @@ -0,0 +1,42 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "StackName": "ansible-test-on-create-failure-delete", + "CreationTime": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 37, + "second": 55, + "microsecond": 909000 + }, + "RollbackConfiguration": {}, + "StackStatus": "CREATE_IN_PROGRESS", + "StackStatusReason": "User Initiated", + "DisableRollback": false, + "NotificationARNs": [], + "Tags": [], + "EnableTerminationProtection": false, + "DriftInformation": { + "StackDriftStatus": "NOT_CHECKED" + } + } + ], + "ResponseMetadata": { + "RequestId": "c77fb823-3a0e-11e9-b25f-d1217e6893bf", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "c77fb823-3a0e-11e9-b25f-d1217e6893bf", + "content-type": "text/xml", + "content-length": "1041", + "date": "Tue, 26 Feb 2019 21:37:56 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_2.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_2.json new file mode 100644 index 000000000..71a9f54b6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_2.json @@ -0,0 +1,41 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "StackName": "ansible-test-on-create-failure-delete", + "CreationTime": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 37, + "second": 55, + "microsecond": 909000 + }, + "RollbackConfiguration": {}, + "StackStatus": "CREATE_IN_PROGRESS", + "DisableRollback": false, + "NotificationARNs": [], + "Tags": [], + "EnableTerminationProtection": false, + "DriftInformation": { + "StackDriftStatus": "NOT_CHECKED" + } + } + ], + "ResponseMetadata": { + "RequestId": "cad153b2-3a0e-11e9-b25f-d1217e6893bf", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "cad153b2-3a0e-11e9-b25f-d1217e6893bf", + "content-type": "text/xml", + "content-length": "979", + "date": "Tue, 26 Feb 2019 21:38:01 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_3.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_3.json new file mode 100644 index 000000000..c2028183b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_3.json @@ -0,0 +1,52 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "StackName": "ansible-test-on-create-failure-delete", + "CreationTime": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 37, + "second": 55, + "microsecond": 909000 + }, + "DeletionTime": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 2, + "microsecond": 76000 + }, + "RollbackConfiguration": {}, + "StackStatus": "DELETE_IN_PROGRESS", + "StackStatusReason": "The following resource(s) failed to create: [ECRRepo]. . Delete requested by user.", + "DisableRollback": false, + "NotificationARNs": [], + "Tags": [], + "EnableTerminationProtection": false, + "DriftInformation": { + "StackDriftStatus": "NOT_CHECKED" + } + } + ], + "ResponseMetadata": { + "RequestId": "ce24289a-3a0e-11e9-b25f-d1217e6893bf", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "ce24289a-3a0e-11e9-b25f-d1217e6893bf", + "content-type": "text/xml", + "content-length": "1171", + "date": "Tue, 26 Feb 2019 21:38:06 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_4.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_4.json new file mode 100644 index 000000000..89f835531 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_4.json @@ -0,0 +1,51 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "StackName": "ansible-test-on-create-failure-delete", + "CreationTime": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 37, + "second": 55, + "microsecond": 909000 + }, + "DeletionTime": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 2, + "microsecond": 76000 + }, + "RollbackConfiguration": {}, + "StackStatus": "DELETE_IN_PROGRESS", + "DisableRollback": false, + "NotificationARNs": [], + "Tags": [], + "EnableTerminationProtection": false, + "DriftInformation": { + "StackDriftStatus": "NOT_CHECKED" + } + } + ], + "ResponseMetadata": { + "RequestId": "d16c27f2-3a0e-11e9-b25f-d1217e6893bf", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "d16c27f2-3a0e-11e9-b25f-d1217e6893bf", + "content-type": "text/xml", + "content-length": "1041", + "date": "Tue, 26 Feb 2019 21:38:12 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_5.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_5.json new file mode 100644 index 000000000..739c82937 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_5.json @@ -0,0 +1,50 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828", + "StackName": "ansible-test-on-create-failure-delete", + "CreationTime": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 37, + "second": 55, + "microsecond": 909000 + }, + "DeletionTime": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 38, + "second": 2, + "microsecond": 76000 + }, + "RollbackConfiguration": {}, + "StackStatus": "DELETE_COMPLETE", + "DisableRollback": false, + "NotificationARNs": [], + "Tags": [], + "DriftInformation": { + "StackDriftStatus": "NOT_CHECKED" + } + } + ], + "ResponseMetadata": { + "RequestId": "d4c90dd6-3a0e-11e9-b25f-d1217e6893bf", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "d4c90dd6-3a0e-11e9-b25f-d1217e6893bf", + "content-type": "text/xml", + "content-length": "965", + "date": "Tue, 26 Feb 2019 21:38:18 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.CreateStack_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.CreateStack_1.json new file mode 100644 index 000000000..86f1945fd --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.CreateStack_1.json @@ -0,0 +1,17 @@ +{ + "status_code": 200, + "data": { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950", + "ResponseMetadata": { + "RequestId": "a396a58a-3a0f-11e9-b7db-3fe3824c73cb", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "a396a58a-3a0f-11e9-b7db-3fe3824c73cb", + "content-type": "text/xml", + "content-length": "411", + "date": "Tue, 26 Feb 2019 21:44:05 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DeleteStack_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DeleteStack_1.json new file mode 100644 index 000000000..1a3a67c64 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DeleteStack_1.json @@ -0,0 +1,16 @@ +{ + "status_code": 200, + "data": { + "ResponseMetadata": { + "RequestId": "a78f0832-3a0f-11e9-b7db-3fe3824c73cb", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "a78f0832-3a0f-11e9-b7db-3fe3824c73cb", + "content-type": "text/xml", + "content-length": "212", + "date": "Tue, 26 Feb 2019 21:44:11 GMT" + }, + "RetryAttempts": 0 + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_1.json new file mode 100644 index 000000000..58d7a89e4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_1.json @@ -0,0 +1,38 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950", + "EventId": "a39e6ce0-3a0f-11e9-96ca-02f46dd00950", + "StackName": "ansible-test-on-create-failure-do-nothing", + "LogicalResourceId": "ansible-test-on-create-failure-do-nothing", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 44, + "second": 5, + "microsecond": 553000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "User Initiated" + } + ], + "ResponseMetadata": { + "RequestId": "a406cc84-3a0f-11e9-b7db-3fe3824c73cb", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "a406cc84-3a0f-11e9-b7db-3fe3824c73cb", + "content-type": "text/xml", + "content-length": "1169", + "date": "Tue, 26 Feb 2019 21:44:06 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_2.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_2.json new file mode 100644 index 000000000..0a7e32e46 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_2.json @@ -0,0 +1,121 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950", + "EventId": "a6c32c80-3a0f-11e9-ac5e-06deb474fa52", + "StackName": "ansible-test-on-create-failure-do-nothing", + "LogicalResourceId": "ansible-test-on-create-failure-do-nothing", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 44, + "second": 10, + "microsecond": 804000 + }, + "ResourceStatus": "CREATE_FAILED", + "ResourceStatusReason": "The following resource(s) failed to create: [ECRRepo]. " + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950", + "EventId": "ECRRepo-CREATE_FAILED-2019-02-26T21:44:09.905Z", + "StackName": "ansible-test-on-create-failure-do-nothing", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-a8g0mh5il4t5", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 44, + "second": 9, + "microsecond": 905000 + }, + "ResourceStatus": "CREATE_FAILED", + "ResourceStatusReason": "Invalid parameter at 'PolicyText' failed to satisfy constraint: 'Invalid repository policy provided' (Service: AmazonECR; Status Code: 400; Error Code: InvalidParameterException; Request ID: a62a6f71-3a0f-11e9-9164-457e0a3a5e1b)", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950", + "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:44:09.497Z", + "StackName": "ansible-test-on-create-failure-do-nothing", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-a8g0mh5il4t5", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 44, + "second": 9, + "microsecond": 497000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "Resource creation Initiated", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950", + "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:44:09.076Z", + "StackName": "ansible-test-on-create-failure-do-nothing", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 44, + "second": 9, + "microsecond": 76000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950", + "EventId": "a39e6ce0-3a0f-11e9-96ca-02f46dd00950", + "StackName": "ansible-test-on-create-failure-do-nothing", + "LogicalResourceId": "ansible-test-on-create-failure-do-nothing", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 44, + "second": 5, + "microsecond": 553000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "User Initiated" + } + ], + "ResponseMetadata": { + "RequestId": "a75fbad0-3a0f-11e9-b7db-3fe3824c73cb", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "a75fbad0-3a0f-11e9-b7db-3fe3824c73cb", + "content-type": "text/xml", + "content-length": "5231", + "vary": "Accept-Encoding", + "date": "Tue, 26 Feb 2019 21:44:11 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_1.json new file mode 100644 index 000000000..532143313 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_1.json @@ -0,0 +1,42 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950", + "StackName": "ansible-test-on-create-failure-do-nothing", + "CreationTime": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 44, + "second": 5, + "microsecond": 553000 + }, + "RollbackConfiguration": {}, + "StackStatus": "CREATE_IN_PROGRESS", + "StackStatusReason": "User Initiated", + "DisableRollback": true, + "NotificationARNs": [], + "Tags": [], + "EnableTerminationProtection": false, + "DriftInformation": { + "StackDriftStatus": "NOT_CHECKED" + } + } + ], + "ResponseMetadata": { + "RequestId": "a3d44acf-3a0f-11e9-b7db-3fe3824c73cb", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "a3d44acf-3a0f-11e9-b7db-3fe3824c73cb", + "content-type": "text/xml", + "content-length": "1048", + "date": "Tue, 26 Feb 2019 21:44:05 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_2.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_2.json new file mode 100644 index 000000000..df17f5a73 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_2.json @@ -0,0 +1,42 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950", + "StackName": "ansible-test-on-create-failure-do-nothing", + "CreationTime": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 44, + "second": 5, + "microsecond": 553000 + }, + "RollbackConfiguration": {}, + "StackStatus": "CREATE_FAILED", + "StackStatusReason": "The following resource(s) failed to create: [ECRRepo]. ", + "DisableRollback": true, + "NotificationARNs": [], + "Tags": [], + "EnableTerminationProtection": false, + "DriftInformation": { + "StackDriftStatus": "NOT_CHECKED" + } + } + ], + "ResponseMetadata": { + "RequestId": "a7301f4a-3a0f-11e9-b7db-3fe3824c73cb", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "a7301f4a-3a0f-11e9-b7db-3fe3824c73cb", + "content-type": "text/xml", + "content-length": "1084", + "date": "Tue, 26 Feb 2019 21:44:11 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.CreateStack_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.CreateStack_1.json new file mode 100644 index 000000000..f71422b92 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.CreateStack_1.json @@ -0,0 +1,17 @@ +{ + "status_code": 200, + "data": { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "ResponseMetadata": { + "RequestId": "9139de54-3a0f-11e9-b938-97983b40cabe", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "9139de54-3a0f-11e9-b938-97983b40cabe", + "content-type": "text/xml", + "content-length": "409", + "date": "Tue, 26 Feb 2019 21:43:34 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DeleteStack_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DeleteStack_1.json new file mode 100644 index 000000000..111dc90d8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DeleteStack_1.json @@ -0,0 +1,16 @@ +{ + "status_code": 200, + "data": { + "ResponseMetadata": { + "RequestId": "988b3097-3a0f-11e9-b938-97983b40cabe", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "988b3097-3a0f-11e9-b938-97983b40cabe", + "content-type": "text/xml", + "content-length": "212", + "date": "Tue, 26 Feb 2019 21:43:46 GMT" + }, + "RetryAttempts": 0 + } + } +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_1.json new file mode 100644 index 000000000..2bcac7f0e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_1.json @@ -0,0 +1,38 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "EventId": "9140bc10-3a0f-11e9-94bf-0a9edf17d014", + "StackName": "ansible-test-on-create-failure-rollback", + "LogicalResourceId": "ansible-test-on-create-failure-rollback", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 34, + "microsecond": 740000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "User Initiated" + } + ], + "ResponseMetadata": { + "RequestId": "9199b1a7-3a0f-11e9-b938-97983b40cabe", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "9199b1a7-3a0f-11e9-b938-97983b40cabe", + "content-type": "text/xml", + "content-length": "1161", + "date": "Tue, 26 Feb 2019 21:43:35 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_2.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_2.json new file mode 100644 index 000000000..3992fd397 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_2.json @@ -0,0 +1,121 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "EventId": "945b90a0-3a0f-11e9-adaf-0211d8bec7e2", + "StackName": "ansible-test-on-create-failure-rollback", + "LogicalResourceId": "ansible-test-on-create-failure-rollback", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 39, + "microsecond": 920000 + }, + "ResourceStatus": "ROLLBACK_IN_PROGRESS", + "ResourceStatusReason": "The following resource(s) failed to create: [ECRRepo]. . Rollback requested by user." + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "EventId": "ECRRepo-CREATE_FAILED-2019-02-26T21:43:39.210Z", + "StackName": "ansible-test-on-create-failure-rollback", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-1lsnxu2zpb20l", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 39, + "microsecond": 210000 + }, + "ResourceStatus": "CREATE_FAILED", + "ResourceStatusReason": "Invalid parameter at 'PolicyText' failed to satisfy constraint: 'Invalid repository policy provided' (Service: AmazonECR; Status Code: 400; Error Code: InvalidParameterException; Request ID: 93e0bb60-3a0f-11e9-a53c-7162bb423e4d)", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:43:38.793Z", + "StackName": "ansible-test-on-create-failure-rollback", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-1lsnxu2zpb20l", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 38, + "microsecond": 793000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "Resource creation Initiated", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:43:38.266Z", + "StackName": "ansible-test-on-create-failure-rollback", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 38, + "microsecond": 266000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "EventId": "9140bc10-3a0f-11e9-94bf-0a9edf17d014", + "StackName": "ansible-test-on-create-failure-rollback", + "LogicalResourceId": "ansible-test-on-create-failure-rollback", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 34, + "microsecond": 740000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "User Initiated" + } + ], + "ResponseMetadata": { + "RequestId": "94e16307-3a0f-11e9-b938-97983b40cabe", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "94e16307-3a0f-11e9-b938-97983b40cabe", + "content-type": "text/xml", + "content-length": "5241", + "vary": "Accept-Encoding", + "date": "Tue, 26 Feb 2019 21:43:40 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_3.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_3.json new file mode 100644 index 000000000..e272c734b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_3.json @@ -0,0 +1,180 @@ +{ + "status_code": 200, + "data": { + "StackEvents": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "EventId": "9743bc70-3a0f-11e9-b335-0ade61d04ee6", + "StackName": "ansible-test-on-create-failure-rollback", + "LogicalResourceId": "ansible-test-on-create-failure-rollback", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 44, + "microsecond": 797000 + }, + "ResourceStatus": "ROLLBACK_COMPLETE" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "EventId": "ECRRepo-DELETE_COMPLETE-2019-02-26T21:43:43.908Z", + "StackName": "ansible-test-on-create-failure-rollback", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-1lsnxu2zpb20l", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 43, + "microsecond": 908000 + }, + "ResourceStatus": "DELETE_COMPLETE", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "EventId": "ECRRepo-DELETE_IN_PROGRESS-2019-02-26T21:43:43.478Z", + "StackName": "ansible-test-on-create-failure-rollback", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-1lsnxu2zpb20l", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 43, + "microsecond": 478000 + }, + "ResourceStatus": "DELETE_IN_PROGRESS", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "EventId": "945b90a0-3a0f-11e9-adaf-0211d8bec7e2", + "StackName": "ansible-test-on-create-failure-rollback", + "LogicalResourceId": "ansible-test-on-create-failure-rollback", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 39, + "microsecond": 920000 + }, + "ResourceStatus": "ROLLBACK_IN_PROGRESS", + "ResourceStatusReason": "The following resource(s) failed to create: [ECRRepo]. . Rollback requested by user." + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "EventId": "ECRRepo-CREATE_FAILED-2019-02-26T21:43:39.210Z", + "StackName": "ansible-test-on-create-failure-rollback", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-1lsnxu2zpb20l", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 39, + "microsecond": 210000 + }, + "ResourceStatus": "CREATE_FAILED", + "ResourceStatusReason": "Invalid parameter at 'PolicyText' failed to satisfy constraint: 'Invalid repository policy provided' (Service: AmazonECR; Status Code: 400; Error Code: InvalidParameterException; Request ID: 93e0bb60-3a0f-11e9-a53c-7162bb423e4d)", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:43:38.793Z", + "StackName": "ansible-test-on-create-failure-rollback", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "ansib-ecrre-1lsnxu2zpb20l", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 38, + "microsecond": 793000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "Resource creation Initiated", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:43:38.266Z", + "StackName": "ansible-test-on-create-failure-rollback", + "LogicalResourceId": "ECRRepo", + "PhysicalResourceId": "", + "ResourceType": "AWS::ECR::Repository", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 38, + "microsecond": 266000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}" + }, + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "EventId": "9140bc10-3a0f-11e9-94bf-0a9edf17d014", + "StackName": "ansible-test-on-create-failure-rollback", + "LogicalResourceId": "ansible-test-on-create-failure-rollback", + "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "ResourceType": "AWS::CloudFormation::Stack", + "Timestamp": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 34, + "microsecond": 740000 + }, + "ResourceStatus": "CREATE_IN_PROGRESS", + "ResourceStatusReason": "User Initiated" + } + ], + "ResponseMetadata": { + "RequestId": "982d0bff-3a0f-11e9-b938-97983b40cabe", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "982d0bff-3a0f-11e9-b938-97983b40cabe", + "content-type": "text/xml", + "content-length": "7911", + "vary": "Accept-Encoding", + "date": "Tue, 26 Feb 2019 21:43:45 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_1.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_1.json new file mode 100644 index 000000000..25facea18 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_1.json @@ -0,0 +1,42 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "StackName": "ansible-test-on-create-failure-rollback", + "CreationTime": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 34, + "microsecond": 740000 + }, + "RollbackConfiguration": {}, + "StackStatus": "CREATE_IN_PROGRESS", + "StackStatusReason": "User Initiated", + "DisableRollback": false, + "NotificationARNs": [], + "Tags": [], + "EnableTerminationProtection": false, + "DriftInformation": { + "StackDriftStatus": "NOT_CHECKED" + } + } + ], + "ResponseMetadata": { + "RequestId": "91725383-3a0f-11e9-b938-97983b40cabe", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "91725383-3a0f-11e9-b938-97983b40cabe", + "content-type": "text/xml", + "content-length": "1045", + "date": "Tue, 26 Feb 2019 21:43:35 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_2.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_2.json new file mode 100644 index 000000000..55a80d8af --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_2.json @@ -0,0 +1,52 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "StackName": "ansible-test-on-create-failure-rollback", + "CreationTime": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 34, + "microsecond": 740000 + }, + "DeletionTime": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 39, + "microsecond": 920000 + }, + "RollbackConfiguration": {}, + "StackStatus": "ROLLBACK_IN_PROGRESS", + "StackStatusReason": "The following resource(s) failed to create: [ECRRepo]. . Rollback requested by user.", + "DisableRollback": false, + "NotificationARNs": [], + "Tags": [], + "EnableTerminationProtection": false, + "DriftInformation": { + "StackDriftStatus": "NOT_CHECKED" + } + } + ], + "ResponseMetadata": { + "RequestId": "94bb1651-3a0f-11e9-b938-97983b40cabe", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "94bb1651-3a0f-11e9-b938-97983b40cabe", + "content-type": "text/xml", + "content-length": "1179", + "date": "Tue, 26 Feb 2019 21:43:40 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_3.json b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_3.json new file mode 100644 index 000000000..7c00a8364 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_3.json @@ -0,0 +1,51 @@ +{ + "status_code": 200, + "data": { + "Stacks": [ + { + "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014", + "StackName": "ansible-test-on-create-failure-rollback", + "CreationTime": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 34, + "microsecond": 740000 + }, + "DeletionTime": { + "__class__": "datetime", + "year": 2019, + "month": 2, + "day": 26, + "hour": 21, + "minute": 43, + "second": 39, + "microsecond": 920000 + }, + "RollbackConfiguration": {}, + "StackStatus": "ROLLBACK_COMPLETE", + "DisableRollback": false, + "NotificationARNs": [], + "Tags": [], + "EnableTerminationProtection": false, + "DriftInformation": { + "StackDriftStatus": "NOT_CHECKED" + } + } + ], + "ResponseMetadata": { + "RequestId": "98016814-3a0f-11e9-b938-97983b40cabe", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amzn-requestid": "98016814-3a0f-11e9-b938-97983b40cabe", + "content-type": "text/xml", + "content-length": "1044", + "date": "Tue, 26 Feb 2019 21:43:45 GMT" + }, + "RetryAttempts": 0 + } + } +} diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_cloudformation.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_cloudformation.py new file mode 100644 index 000000000..f46bc1113 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_cloudformation.py @@ -0,0 +1,227 @@ +# (c) 2017 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +# Magic... +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep, placeboify # pylint: disable=unused-import + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import boto_exception +from ansible_collections.amazon.aws.plugins.module_utils.modules import _RetryingBotoClientWrapper +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + +from ansible_collections.amazon.aws.plugins.modules import cloudformation as cfn_module + +basic_yaml_tpl = """ +--- +AWSTemplateFormatVersion: '2010-09-09' +Description: 'Basic template that creates an S3 bucket' +Resources: + MyBucket: + Type: "AWS::S3::Bucket" +Outputs: + TheName: + Value: + !Ref MyBucket +""" + +bad_json_tpl = """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Broken template, no comma here ->" + "Resources": { + "MyBucket": { + "Type": "AWS::S3::Bucket" + } + } +}""" + +failing_yaml_tpl = """ +--- +AWSTemplateFormatVersion: 2010-09-09 +Resources: + ECRRepo: + Type: AWS::ECR::Repository + Properties: + RepositoryPolicyText: + Version: 3000-10-17 # <--- invalid version + Statement: + - Effect: Allow + Action: + - 'ecr:*' + Principal: + AWS: !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:root +""" + +default_events_limit = 10 + + +class FakeModule(object): + def __init__(self, **kwargs): + self.params = kwargs + + def fail_json(self, *args, **kwargs): + self.exit_args = args + self.exit_kwargs = kwargs + raise Exception('FAIL') + + def fail_json_aws(self, *args, **kwargs): + self.exit_args = args + self.exit_kwargs = kwargs + raise Exception('FAIL') + + def exit_json(self, *args, **kwargs): + self.exit_args = args + self.exit_kwargs = kwargs + raise Exception('EXIT') + + +def _create_wrapped_client(placeboify): + connection = placeboify.client('cloudformation') + retry_decorator = AWSRetry.jittered_backoff() + wrapped_conn = _RetryingBotoClientWrapper(connection, retry_decorator) + return wrapped_conn + + +def test_invalid_template_json(placeboify): + connection = _create_wrapped_client(placeboify) + params = { + 'StackName': 'ansible-test-wrong-json', + 'TemplateBody': bad_json_tpl, + } + m = FakeModule(disable_rollback=False) + with pytest.raises(Exception) as exc_info: + cfn_module.create_stack(m, params, connection, default_events_limit) + pytest.fail('Expected malformed JSON to have caused the call to fail') + + assert exc_info.match('FAIL') + assert "ValidationError" in boto_exception(m.exit_args[0]) + + +def test_client_request_token_s3_stack(maybe_sleep, placeboify): + connection = _create_wrapped_client(placeboify) + params = { + 'StackName': 'ansible-test-client-request-token-yaml', + 'TemplateBody': basic_yaml_tpl, + 'ClientRequestToken': '3faf3fb5-b289-41fc-b940-44151828f6cf', + } + m = FakeModule(disable_rollback=False) + result = cfn_module.create_stack(m, params, connection, default_events_limit) + assert result['changed'] + assert len(result['events']) > 1 + # require that the final recorded stack state was CREATE_COMPLETE + # events are retrieved newest-first, so 0 is the latest + assert 'CREATE_COMPLETE' in result['events'][0] + connection.delete_stack(StackName='ansible-test-client-request-token-yaml') + + +def test_basic_s3_stack(maybe_sleep, placeboify): + connection = _create_wrapped_client(placeboify) + params = { + 'StackName': 'ansible-test-basic-yaml', + 'TemplateBody': basic_yaml_tpl + } + m = FakeModule(disable_rollback=False) + result = cfn_module.create_stack(m, params, connection, default_events_limit) + assert result['changed'] + assert len(result['events']) > 1 + # require that the final recorded stack state was CREATE_COMPLETE + # events are retrieved newest-first, so 0 is the latest + assert 'CREATE_COMPLETE' in result['events'][0] + connection.delete_stack(StackName='ansible-test-basic-yaml') + + +def test_delete_nonexistent_stack(maybe_sleep, placeboify): + connection = _create_wrapped_client(placeboify) + # module is only used if we threw an unexpected error + module = None + result = cfn_module.stack_operation(module, connection, 'ansible-test-nonexist', 'DELETE', default_events_limit) + assert result['changed'] + assert 'Stack does not exist.' in result['log'] + + +def test_get_nonexistent_stack(placeboify): + connection = _create_wrapped_client(placeboify) + # module is only used if we threw an unexpected error + module = None + assert cfn_module.get_stack_facts(module, connection, 'ansible-test-nonexist') is None + + +def test_missing_template_body(): + m = FakeModule() + with pytest.raises(Exception) as exc_info: + cfn_module.create_stack( + module=m, + stack_params={}, + cfn=None, + events_limit=default_events_limit + ) + pytest.fail('Expected module to have failed with no template') + + assert exc_info.match('FAIL') + assert not m.exit_args + assert "Either 'template', 'template_body' or 'template_url' is required when the stack does not exist." == m.exit_kwargs['msg'] + + +def test_on_create_failure_delete(maybe_sleep, placeboify): + m = FakeModule( + on_create_failure='DELETE', + disable_rollback=False, + ) + connection = _create_wrapped_client(placeboify) + params = { + 'StackName': 'ansible-test-on-create-failure-delete', + 'TemplateBody': failing_yaml_tpl + } + result = cfn_module.create_stack(m, params, connection, default_events_limit) + assert result['changed'] + assert result['failed'] + assert len(result['events']) > 1 + # require that the final recorded stack state was DELETE_COMPLETE + # events are retrieved newest-first, so 0 is the latest + assert 'DELETE_COMPLETE' in result['events'][0] + + +def test_on_create_failure_rollback(maybe_sleep, placeboify): + m = FakeModule( + on_create_failure='ROLLBACK', + disable_rollback=False, + ) + connection = _create_wrapped_client(placeboify) + params = { + 'StackName': 'ansible-test-on-create-failure-rollback', + 'TemplateBody': failing_yaml_tpl + } + result = cfn_module.create_stack(m, params, connection, default_events_limit) + assert result['changed'] + assert result['failed'] + assert len(result['events']) > 1 + # require that the final recorded stack state was ROLLBACK_COMPLETE + # events are retrieved newest-first, so 0 is the latest + assert 'ROLLBACK_COMPLETE' in result['events'][0] + connection.delete_stack(StackName=params['StackName']) + + +def test_on_create_failure_do_nothing(maybe_sleep, placeboify): + m = FakeModule( + on_create_failure='DO_NOTHING', + disable_rollback=False, + ) + connection = _create_wrapped_client(placeboify) + params = { + 'StackName': 'ansible-test-on-create-failure-do-nothing', + 'TemplateBody': failing_yaml_tpl + } + result = cfn_module.create_stack(m, params, connection, default_events_limit) + assert result['changed'] + assert result['failed'] + assert len(result['events']) > 1 + # require that the final recorded stack state was CREATE_FAILED + # events are retrieved newest-first, so 0 is the latest + assert 'CREATE_FAILED' in result['events'][0] + connection.delete_stack(StackName=params['StackName']) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_ami.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_ami.py new file mode 100644 index 000000000..5e8140d4a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_ami.py @@ -0,0 +1,44 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock, Mock, patch, call + +import pytest + +from ansible_collections.amazon.aws.plugins.modules import ec2_ami + +module_name = "ansible_collections.amazon.aws.plugins.modules.ec2_ami" + + +@patch(module_name + ".get_image_by_id") +def test_create_image_uefi_data(m_get_image_by_id): + module = MagicMock() + connection = MagicMock() + + m_get_image_by_id.return_value = { + "ImageId": "ami-0c7a795306730b288", + "BootMode": "uefi", + "TpmSupport": "v2.0", + } + + module.params = { + "name": "my-image", + "boot_mode": "uefi", + "tpm_support": "v2.0", + "uefi_data": "QU1aTlVFRkk9xcN0AAAAAHj5a7fZ9+3aT2gcVRgA8Ek3NipiPST0pCiCIlTJtj20FzENCcQa", + } + + ec2_ami.create_image(module, connection) + assert connection.register_image.call_count == 1 + connection.register_image.assert_has_calls( + [ + call( + aws_retry=True, + Description=None, + Name="my-image", + BootMode="uefi", + TpmSupport="v2.0", + UefiData="QU1aTlVFRkk9xcN0AAAAAHj5a7fZ9+3aT2gcVRgA8Ek3NipiPST0pCiCIlTJtj20FzENCcQa" + ) + ] + ) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_key.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_key.py new file mode 100644 index 000000000..2660ced63 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_key.py @@ -0,0 +1,654 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock +from unittest.mock import patch +from unittest.mock import call, ANY + +import pytest +import botocore +import datetime +from dateutil.tz import tzutc +from ansible.module_utils._text import to_bytes + +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + +from ansible_collections.amazon.aws.plugins.modules import ec2_key + +module_name = "ansible_collections.amazon.aws.plugins.modules.ec2_key" + + +def raise_botocore_exception_clienterror(action): + + params = { + 'Error': { + 'Code': 1, + 'Message': 'error creating key' + }, + 'ResponseMetadata': { + 'RequestId': '01234567-89ab-cdef-0123-456789abcdef' + } + } + + if action == 'create_key_pair': + params['Error']['Message'] = 'error creating key' + + elif action == 'describe_key_pair': + params['Error']['Code'] = 'InvalidKeyPair.NotFound' + params['Error']['Message'] = 'The key pair does not exist' + + elif action == 'import_key_pair': + params['Error']['Message'] = 'error importing key' + + elif action == 'delete_key_pair': + params['Error']['Message'] = 'error deleting key' + + return botocore.exceptions.ClientError(params, action) + + +def test__import_key_pair(): + ec2_client = MagicMock() + name = 'my_keypair' + key_material = "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com" + + expected_params = { + 'KeyName': name, + 'PublicKeyMaterial': to_bytes(key_material), + } + + ec2_client.import_key_pair.return_value = { + 'KeyFingerprint': 'd7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62', + 'KeyName': 'my_keypair', + 'KeyPairId': 'key-012345678905a208d' + } + + result = ec2_key._import_key_pair(ec2_client, name, key_material) + + assert result == ec2_client.import_key_pair.return_value + assert ec2_client.import_key_pair.call_count == 1 + ec2_client.import_key_pair.assert_called_with(aws_retry=True, **expected_params) + + +def test_api_failure__import_key_pair(): + ec2_client = MagicMock() + name = 'my_keypair' + key_material = "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com" + + expected_params = { + 'KeyName': name, + 'PublicKeyMaterial': to_bytes(key_material), + } + + ec2_client.import_key_pair.side_effect = raise_botocore_exception_clienterror('import_key_pair') + + with pytest.raises(ec2_key.Ec2KeyFailure): + ec2_key._import_key_pair(ec2_client, name, key_material) + + +def test_extract_key_data_describe_key_pairs(): + + key = { + "CreateTime": datetime.datetime(2022, 9, 15, 20, 10, 15, tzinfo=tzutc()), + "KeyFingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "KeyName": "my_keypair", + "KeyPairId": "key-043046ef2a9a80b56", + "Tags": [], + } + + key_type = "rsa" + + expected_result = { + "name": "my_keypair", + "fingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "id": "key-043046ef2a9a80b56", + "tags": {}, + "type": "rsa" + } + + result = ec2_key.extract_key_data(key, key_type) + + assert result == expected_result + + +def test_extract_key_data_create_key_pair(): + + key = { + 'KeyFingerprint': '11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa', + 'KeyName': 'my_keypair', + 'KeyPairId': 'key-043046ef2a9a80b56' + } + + key_type = "rsa" + + expected_result = { + "name": "my_keypair", + "fingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "id": "key-043046ef2a9a80b56", + "tags": {}, + "type": "rsa" + } + + result = ec2_key.extract_key_data(key, key_type) + + assert result == expected_result + + +@patch(module_name + '.delete_key_pair') +@patch(module_name + '._import_key_pair') +@patch(module_name + '.find_key_pair') +def test_get_key_fingerprint(m_find_key_pair, m_import_key_pair, m_delete_key_pair): + + module = MagicMock() + ec2_client = MagicMock() + + m_find_key_pair.return_value = None + + m_import_key_pair.return_value = { + 'KeyFingerprint': 'd7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62', + 'KeyName': 'my_keypair', + 'KeyPairId': 'key-043046ef2a9a80b56' + } + + m_delete_key_pair.return_value = { + 'changed': True, + 'key': None, + 'msg': 'key deleted' + } + + expected_result = 'd7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62' + + key_material = "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com" + + result = ec2_key.get_key_fingerprint(module, ec2_client, key_material) + + assert result == expected_result + assert m_find_key_pair.call_count == 1 + assert m_import_key_pair.call_count == 1 + assert m_delete_key_pair.call_count == 1 + + +def test_find_key_pair(): + ec2_client = MagicMock() + name = 'my_keypair' + + ec2_client.describe_key_pairs.return_value = { + 'KeyPairs': [ + { + 'CreateTime': datetime.datetime(2022, 9, 15, 20, 10, 15, tzinfo=tzutc()), + 'KeyFingerprint': '11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa', + 'KeyName': 'my_keypair', + 'KeyPairId': 'key-043046ef2a9a80b56', + 'KeyType': 'rsa', + 'Tags': [] + } + ], + } + + ec2_key.find_key_pair(ec2_client, name) + + assert ec2_client.describe_key_pairs.call_count == 1 + ec2_client.describe_key_pairs.assert_called_with(aws_retry=True, KeyNames=[name]) + + +def test_api_failure_find_key_pair(): + ec2_client = MagicMock() + name = 'non_existing_keypair' + + ec2_client.describe_key_pairs.side_effect = botocore.exceptions.BotoCoreError + + with pytest.raises(ec2_key.Ec2KeyFailure): + ec2_key.find_key_pair(ec2_client, name) + + +def test_invalid_key_pair_find_key_pair(): + ec2_client = MagicMock() + name = 'non_existing_keypair' + + ec2_client.describe_key_pairs.side_effect = raise_botocore_exception_clienterror('describe_key_pair') + + result = ec2_key.find_key_pair(ec2_client, name) + + assert result is None + + +def test__create_key_pair(): + ec2_client = MagicMock() + name = 'my_keypair' + tag_spec = None + key_type = None + + expected_params = {'KeyName': name} + + ec2_client.create_key_pair.return_value = { + "KeyFingerprint": "d7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62", + "KeyMaterial": ( + "-----BEGIN RSA PRIVATE KEY-----\n" # gitleaks:allow + "MIIEXm7/Bi9wba2m0Qtclu\nCXQw2paSIZb\n" + "-----END RSA PRIVATE KEY-----" + ), + "KeyName": "my_keypair", + "KeyPairId": "key-012345678905a208d", + } + + result = ec2_key._create_key_pair(ec2_client, name, tag_spec, key_type) + + assert result == ec2_client.create_key_pair.return_value + assert ec2_client.create_key_pair.call_count == 1 + ec2_client.create_key_pair.assert_called_with(aws_retry=True, **expected_params) + + +def test_api_failure__create_key_pair(): + ec2_client = MagicMock() + name = 'my_keypair' + tag_spec = None + key_type = None + + ec2_client.create_key_pair.side_effect = raise_botocore_exception_clienterror('create_key_pair') + + with pytest.raises(ec2_key.Ec2KeyFailure): + ec2_key._create_key_pair(ec2_client, name, tag_spec, key_type) + + +@patch(module_name + '.extract_key_data') +@patch(module_name + '._import_key_pair') +def test_create_new_key_pair_key_material(m_import_key_pair, m_extract_key_data): + module = MagicMock() + ec2_client = MagicMock() + + name = 'my_keypair' + key_material = "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com" + key_type = 'rsa' + tags = None + + module.check_mode = False + + m_import_key_pair.return_value = { + 'KeyFingerprint': 'd7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62', + 'KeyName': 'my_keypair', + 'KeyPairId': 'key-012345678905a208d' + } + + m_extract_key_data.return_value = { + "name": "my_keypair", + "fingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "id": "key-043046ef2a9a80b56", + "tags": {}, + "type": "rsa" + } + + expected_result = {'changed': True, 'key': m_extract_key_data.return_value, 'msg': 'key pair created'} + + result = ec2_key.create_new_key_pair(ec2_client, name, key_material, key_type, tags, module.check_mode) + + assert result == expected_result + assert m_import_key_pair.call_count == 1 + assert m_extract_key_data.call_count == 1 + + +@patch(module_name + '.extract_key_data') +@patch(module_name + '._create_key_pair') +def test_create_new_key_pair_no_key_material(m_create_key_pair, m_extract_key_data): + module = MagicMock() + ec2_client = MagicMock() + + name = 'my_keypair' + key_type = 'rsa' + key_material = None + tags = None + + module.check_mode = False + + m_create_key_pair.return_value = { + 'KeyFingerprint': 'd7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62', + 'KeyName': 'my_keypair', + 'KeyPairId': 'key-012345678905a208d' + } + + m_extract_key_data.return_value = { + "name": "my_keypair", + "fingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "id": "key-043046ef2a9a80b56", + "tags": {}, + "type": "rsa" + } + + expected_result = {'changed': True, 'key': m_extract_key_data.return_value, 'msg': 'key pair created'} + + result = ec2_key.create_new_key_pair(ec2_client, name, key_material, key_type, tags, module.check_mode) + + assert result == expected_result + assert m_create_key_pair.call_count == 1 + assert m_extract_key_data.call_count == 1 + + +def test__delete_key_pair(): + ec2_client = MagicMock() + + key_name = 'my_keypair' + ec2_key._delete_key_pair(ec2_client, key_name) + + assert ec2_client.delete_key_pair.call_count == 1 + ec2_client.delete_key_pair.assert_called_with(aws_retry=True, KeyName=key_name) + + +def test_api_failure__delete_key_pair(): + ec2_client = MagicMock() + name = 'my_keypair' + + ec2_client.delete_key_pair.side_effect = raise_botocore_exception_clienterror('delete_key_pair') + + with pytest.raises(ec2_key.Ec2KeyFailure): + ec2_key._delete_key_pair(ec2_client, name) + + +@patch(module_name + '.extract_key_data') +@patch(module_name + '._import_key_pair') +@patch(module_name + '.delete_key_pair') +@patch(module_name + '.get_key_fingerprint') +def test_update_key_pair_by_key_material_update_needed(m_get_key_fingerprint, m_delete_key_pair, m__import_key_pair, m_extract_key_data): + module = MagicMock() + ec2_client = MagicMock() + + name = 'my_keypair' + key_material = "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com" + tag_spec = None + key = { + "KeyName": "my_keypair", + "KeyFingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "KeyPairId": "key-043046ef2a9a80b56", + "Tags": {}, + } + + module.check_mode = False + + m_get_key_fingerprint.return_value = 'd7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62' + m_delete_key_pair.return_value = None + m__import_key_pair.return_value = { + 'KeyFingerprint': '11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa', + 'KeyName': 'my_keypair', + 'KeyPairId': 'key-043046ef2a9a80b56', + 'Tags': {}, + } + m_extract_key_data.return_value = { + "name": "my_keypair", + "fingerprint": "d7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62", + "id": "key-012345678905a208d", + "tags": {}, + } + + expected_result = {'changed': True, 'key': m_extract_key_data.return_value, 'msg': "key pair updated"} + + result = ec2_key.update_key_pair_by_key_material(module.check_mode, ec2_client, name, key, key_material, tag_spec) + + assert result == expected_result + assert m_get_key_fingerprint.call_count == 1 + assert m_delete_key_pair.call_count == 1 + assert m__import_key_pair.call_count == 1 + assert m_extract_key_data.call_count == 1 + m_get_key_fingerprint.assert_called_with(module.check_mode, ec2_client, key_material) + m_delete_key_pair.assert_called_with(module.check_mode, ec2_client, name, finish_task=False) + m__import_key_pair.assert_called_with(ec2_client, name, key_material, tag_spec) + m_extract_key_data.assert_called_with(key) + + +@patch(module_name + ".extract_key_data") +@patch(module_name + ".get_key_fingerprint") +def test_update_key_pair_by_key_material_key_exists(m_get_key_fingerprint, m_extract_key_data): + ec2_client = MagicMock() + + key_material = MagicMock() + key_fingerprint = MagicMock() + tag_spec = MagicMock() + key_id = MagicMock() + key_name = MagicMock() + key = { + "KeyName": key_name, + "KeyFingerprint": key_fingerprint, + "KeyPairId": key_id, + "Tags": {}, + } + + check_mode = False + m_get_key_fingerprint.return_value = key_fingerprint + m_extract_key_data.return_value = { + "name": key_name, + "fingerprint": key_fingerprint, + "id": key_id, + "tags": {}, + } + + expected_result = {"changed": False, "key": m_extract_key_data.return_value, "msg": "key pair already exists"} + + assert expected_result == ec2_key.update_key_pair_by_key_material( + check_mode, ec2_client, key_name, key, key_material, tag_spec + ) + + m_get_key_fingerprint.assert_called_once_with(check_mode, ec2_client, key_material) + m_extract_key_data.assert_called_once_with(key) + + +@patch(module_name + ".extract_key_data") +@patch(module_name + "._create_key_pair") +@patch(module_name + ".delete_key_pair") +def test_update_key_pair_by_key_type_update_needed(m_delete_key_pair, m__create_key_pair, m_extract_key_data): + module = MagicMock() + ec2_client = MagicMock() + + name = 'my_keypair' + key_type = 'rsa' + tag_spec = None + + module.check_mode = False + + m_delete_key_pair.return_value = None + m__create_key_pair.return_value = { + 'KeyFingerprint': '11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa', + 'Name': 'my_keypair', + 'Id': 'key-043046ef2a9a80b56', + 'Tags': {}, + 'Type': 'rsa' + } + m_extract_key_data.return_value = { + "name": "my_keypair", + "fingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "id": "key-043046ef2a9a80b56", + "tags": {}, + "type": "rsa" + } + + expected_result = {"changed": True, "key": m_extract_key_data.return_value, "msg": "key pair updated"} + + result = ec2_key.update_key_pair_by_key_type(module.check_mode, ec2_client, name, key_type, tag_spec) + + assert result == expected_result + assert m_delete_key_pair.call_count == 1 + assert m__create_key_pair.call_count == 1 + assert m_extract_key_data.call_count == 1 + m_delete_key_pair.assert_called_with(module.check_mode, ec2_client, name, finish_task=False) + m__create_key_pair.assert_called_with(ec2_client, name, tag_spec, key_type) + m_extract_key_data.assert_called_with(m__create_key_pair.return_value, key_type) + + +@patch(module_name + '.update_key_pair_by_key_material') +def test_handle_existing_key_pair_update_key_matrial_with_force(m_update_key_pair_by_key_material): + module = MagicMock() + ec2_client = MagicMock() + + name = 'my_keypair' + key = { + "KeyName": "my_keypair", + "KeyFingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "KeyPairId": "key-043046ef2a9a80b56", + "Tags": {}, + "KeyType": "rsa" + } + + module.params = { + 'key_material': "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com", + 'force': True, + 'key_type': 'rsa', + 'tags': None, + 'purge_tags': True, + 'tag_spec': None + } + + key_data = { + "name": "my_keypair", + "fingerprint": "d7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62", + "id": "key-012345678905a208d", + "tags": {}, + } + + m_update_key_pair_by_key_material.return_value = {'changed': True, 'key': key_data, 'msg': "key pair updated"} + + expected_result = {'changed': True, 'key': key_data, 'msg': "key pair updated"} + + result = ec2_key.handle_existing_key_pair_update(module, ec2_client, name, key) + + assert result == expected_result + assert m_update_key_pair_by_key_material.call_count == 1 + + +@patch(module_name + '.update_key_pair_by_key_type') +def test_handle_existing_key_pair_update_key_type(m_update_key_pair_by_key_type): + module = MagicMock() + ec2_client = MagicMock() + + name = 'my_keypair' + key = { + "KeyName": "my_keypair", + "KeyFingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "KeyPairId": "key-043046ef2a9a80b56", + "Tags": {}, + "KeyType": "ed25519" + } + + module.params = { + 'key_material': "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com", + 'force': False, + 'key_type': 'rsa', + 'tags': None, + 'purge_tags': True, + 'tag_spec': None + } + + key_data = { + "name": "my_keypair", + "fingerprint": "d7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62", + "id": "key-012345678905a208d", + "tags": {}, + } + + m_update_key_pair_by_key_type.return_value = {'changed': True, 'key': key_data, 'msg': "key pair updated"} + + expected_result = {'changed': True, 'key': key_data, 'msg': "key pair updated"} + + result = ec2_key.handle_existing_key_pair_update(module, ec2_client, name, key) + + assert result == expected_result + assert m_update_key_pair_by_key_type.call_count == 1 + + +@patch(module_name + '.extract_key_data') +def test_handle_existing_key_pair_else(m_extract_key_data): + module = MagicMock() + ec2_client = MagicMock() + + name = 'my_keypair' + key = { + "KeyName": "my_keypair", + "KeyFingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "KeyPairId": "key-043046ef2a9a80b56", + "Tags": {}, + "KeyType": "rsa" + } + + module.params = { + 'key_material': "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com", + 'force': False, + 'key_type': 'rsa', + 'tags': None, + 'purge_tags': True, + 'tag_spec': None + } + + m_extract_key_data.return_value = { + "name": "my_keypair", + "fingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "id": "key-043046ef2a9a80b56", + "tags": {}, + "type": "rsa" + } + + expected_result = {"changed": False, "key": m_extract_key_data.return_value, "msg": "key pair already exists"} + + result = ec2_key.handle_existing_key_pair_update(module, ec2_client, name, key) + + assert result == expected_result + assert m_extract_key_data.call_count == 1 + + +@patch(module_name + '._delete_key_pair') +@patch(module_name + '.find_key_pair') +def test_delete_key_pair_key_exists(m_find_key_pair, m_delete_key_pair): + module = MagicMock() + ec2_client = MagicMock() + + name = 'my_keypair' + + module.check_mode = False + + m_find_key_pair.return_value = { + 'KeyPairs': [ + { + 'CreateTime': datetime.datetime(2022, 9, 15, 20, 10, 15, tzinfo=tzutc()), + 'KeyFingerprint': '11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa', + 'KeyName': 'my_keypair', + 'KeyPairId': 'key-043046ef2a9a80b56', + 'KeyType': 'rsa', + 'Tags': [] + } + ], + } + + expected_result = {'changed': True, 'key': None, 'msg': 'key deleted'} + + result = ec2_key.delete_key_pair(module.check_mode, ec2_client, name) + + assert m_find_key_pair.call_count == 1 + m_find_key_pair.assert_called_with(ec2_client, name) + assert m_delete_key_pair.call_count == 1 + m_delete_key_pair.assert_called_with(ec2_client, name) + assert result == expected_result + + +@patch(module_name + '._delete_key_pair') +@patch(module_name + '.find_key_pair') +def test_delete_key_pair_key_not_exist(m_find_key_pair, m_delete_key_pair): + module = MagicMock() + ec2_client = MagicMock() + + name = 'my_keypair' + + module.check_mode = False + + m_find_key_pair.return_value = None + + expected_result = {'key': None, 'msg': 'key did not exist'} + + result = ec2_key.delete_key_pair(module.check_mode, ec2_client, name) + + assert m_find_key_pair.call_count == 1 + m_find_key_pair.assert_called_with(ec2_client, name) + assert m_delete_key_pair.call_count == 0 + assert result == expected_result + + +@patch(module_name + ".AnsibleAWSModule") +def test_main_success(m_AnsibleAWSModule): + m_module = MagicMock() + m_AnsibleAWSModule.return_value = m_module + + ec2_key.main() + + m_module.client.assert_called_with("ec2", retry_decorator=ANY) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_security_group.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_security_group.py new file mode 100644 index 000000000..1ebbe86c6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_security_group.py @@ -0,0 +1,83 @@ +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.amazon.aws.plugins.modules import ec2_security_group as group_module + + +def test_from_permission(): + internal_http = { + 'FromPort': 80, + 'IpProtocol': 'tcp', + 'IpRanges': [ + { + 'CidrIp': '10.0.0.0/8', + 'Description': 'Foo Bar Baz' + }, + ], + 'Ipv6Ranges': [ + {'CidrIpv6': 'fe80::94cc:8aff:fef6:9cc/64'}, + ], + 'PrefixListIds': [], + 'ToPort': 80, + 'UserIdGroupPairs': [], + } + perms = list(group_module.rule_from_group_permission(internal_http)) + assert len(perms) == 2 + assert perms[0].target == '10.0.0.0/8' + assert perms[0].target_type == 'ipv4' + assert perms[0].description == 'Foo Bar Baz' + assert perms[1].target == 'fe80::94cc:8aff:fef6:9cc/64' + + global_egress = { + 'IpProtocol': '-1', + 'IpRanges': [{'CidrIp': '0.0.0.0/0'}], + 'Ipv6Ranges': [], + 'PrefixListIds': [], + 'UserIdGroupPairs': [] + } + perms = list(group_module.rule_from_group_permission(global_egress)) + assert len(perms) == 1 + assert perms[0].target == '0.0.0.0/0' + assert perms[0].port_range == (None, None) + + internal_prefix_http = { + 'FromPort': 80, + 'IpProtocol': 'tcp', + 'PrefixListIds': [ + {'PrefixListId': 'p-1234'} + ], + 'ToPort': 80, + 'UserIdGroupPairs': [], + } + perms = list(group_module.rule_from_group_permission(internal_prefix_http)) + assert len(perms) == 1 + assert perms[0].target == 'p-1234' + + +def test_rule_to_permission(): + tests = [ + group_module.Rule((22, 22), 'udp', 'sg-1234567890', 'group', None), + group_module.Rule((1, 65535), 'tcp', '0.0.0.0/0', 'ipv4', "All TCP from everywhere"), + group_module.Rule((443, 443), 'tcp', 'ip-123456', 'ip_prefix', "Traffic to privatelink IPs"), + group_module.Rule((443, 443), 'tcp', 'feed:dead:::beef/64', 'ipv6', None), + ] + for test in tests: + perm = group_module.to_permission(test) + assert perm['FromPort'], perm['ToPort'] == test.port_range + assert perm['IpProtocol'] == test.protocol + + +def test_validate_ip(): + class Warner(object): + def warn(self, msg): + return + ips = [ + ('10.1.1.1/24', '10.1.1.0/24'), + ('192.168.56.101/16', '192.168.0.0/16'), + # Don't modify IPv6 CIDRs, AWS supports /128 and device ranges + ('fc00:8fe0:fe80:b897:8990:8a7c:99bf:323d/128', 'fc00:8fe0:fe80:b897:8990:8a7c:99bf:323d/128'), + ] + + for ip, net in ips: + assert group_module.validate_ip(Warner(), ip) == net diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_vpc_dhcp_option.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_vpc_dhcp_option.py new file mode 100644 index 000000000..73726590f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_vpc_dhcp_option.py @@ -0,0 +1,71 @@ +# (c) 2021 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# Magic... Incorrectly identified by pylint as unused +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify # pylint: disable=unused-import +from ansible_collections.amazon.aws.tests.unit.compat.mock import patch + +from ansible_collections.amazon.aws.plugins.modules import ec2_vpc_dhcp_option as dhcp_module +from ansible_collections.amazon.aws.tests.unit.plugins.modules.utils import ModuleTestCase + +test_module_params = {'domain_name': 'us-west-2.compute.internal', + 'dns_servers': ['AmazonProvidedDNS'], + 'ntp_servers': ['10.10.2.3', '10.10.4.5'], + 'netbios_name_servers': ['10.20.2.3', '10.20.4.5'], + 'netbios_node_type': 2} + +test_create_config = [{'Key': 'domain-name', 'Values': [{'Value': 'us-west-2.compute.internal'}]}, + {'Key': 'domain-name-servers', 'Values': [{'Value': 'AmazonProvidedDNS'}]}, + {'Key': 'ntp-servers', 'Values': [{'Value': '10.10.2.3'}, {'Value': '10.10.4.5'}]}, + {'Key': 'netbios-name-servers', 'Values': [{'Value': '10.20.2.3'}, {'Value': '10.20.4.5'}]}, + {'Key': 'netbios-node-type', 'Values': 2}] + + +test_create_option_set = [{'Key': 'domain-name', 'Values': ['us-west-2.compute.internal']}, + {'Key': 'domain-name-servers', 'Values': ['AmazonProvidedDNS']}, + {'Key': 'ntp-servers', 'Values': ['10.10.2.3', '10.10.4.5']}, + {'Key': 'netbios-name-servers', 'Values': ['10.20.2.3', '10.20.4.5']}, + {'Key': 'netbios-node-type', 'Values': ['2']}] + +test_normalize_config = {'domain-name': ['us-west-2.compute.internal'], + 'domain-name-servers': ['AmazonProvidedDNS'], + 'ntp-servers': ['10.10.2.3', '10.10.4.5'], + 'netbios-name-servers': ['10.20.2.3', '10.20.4.5'], + 'netbios-node-type': '2' + } + + +class FakeModule(object): + def __init__(self, **kwargs): + self.params = kwargs + + def fail_json(self, *args, **kwargs): + self.exit_args = args + self.exit_kwargs = kwargs + raise Exception('FAIL') + + def fail_json_aws(self, *args, **kwargs): + self.exit_args = args + self.exit_kwargs = kwargs + raise Exception('FAIL') + + def exit_json(self, *args, **kwargs): + self.exit_args = args + self.exit_kwargs = kwargs + raise Exception('EXIT') + + +@patch.object(dhcp_module.AnsibleAWSModule, 'client') +class TestDhcpModule(ModuleTestCase): + + def test_normalize_config(self, client_mock): + result = dhcp_module.normalize_ec2_vpc_dhcp_config(test_create_config) + + print(result) + print(test_normalize_config) + assert result == test_normalize_config diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_kms_key.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_kms_key.py new file mode 100644 index 000000000..5a53e2ddb --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_kms_key.py @@ -0,0 +1,82 @@ +# +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import pytest + +from unittest.mock import MagicMock, call, patch +from ansible_collections.amazon.aws.plugins.modules import kms_key + + +module_name = "ansible_collections.amazon.aws.plugins.modules.kms_key" +key_details = { + "KeyMetadata": { + "aliases": ["mykey"], + "Arn": "arn:aws:kms:us-east-1:12345678:key/mrk-12345678", + "customer_master_key_spec": "SYMMETRIC_DEFAULT", + "description": "", + "enable_key_rotation": False, + "enabled": True, + "encryption_algorithms": ["SYMMETRIC_DEFAULT"], + "grants": [], + "key_arn": "arn:aws:kms:us-east-1:12345678:key/mrk-12345678", + "key_id": "mrk-12345678", + "key_manager": "CUSTOMER", + "key_policies": [ + { + "Id": "key-default-1", + "Statement": [ + { + "Action": "kms:*", + "Effect": "Allow", + "Principal": {"AWS": "arn:aws:iam::12345678:root"}, + "Resource": "*", + "Sid": "Enable IAM User Permissions", + } + ], + "Version": "2012-10-17", + } + ], + "key_spec": "SYMMETRIC_DEFAULT", + "key_state": "Enabled", + "key_usage": "ENCRYPT_DECRYPT", + "multi_region": True, + "multi_region_configuration": { + "multi_region_key_type": "PRIM ARY", + "primary_key": { + "arn": "arn:aws:kms:us-east-1:12345678:key/mrk-12345678", + "region": "us-east-1", + }, + "replica_keys": [], + }, + "origin": "AWS_KMS", + "tags": {"Hello": "World2"}, + } +} + + +@patch(module_name + ".get_kms_metadata_with_backoff") +def test_fetch_key_metadata(m_get_kms_metadata_with_backoff): + + module = MagicMock() + kms_client = MagicMock() + + m_get_kms_metadata_with_backoff.return_value = key_details + kms_key.fetch_key_metadata(kms_client, module, "mrk-12345678", "mykey") + assert m_get_kms_metadata_with_backoff.call_count == 1 + + +def test_validate_params(): + + module = MagicMock() + module.params = { + "state": "present", + "multi_region": True + } + + result = kms_key.validate_params(module, key_details["KeyMetadata"]) + module.fail_json.assert_called_with( + msg="You cannot change the multi-region property on an existing key." + ) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_layer.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_layer.py new file mode 100644 index 000000000..451a61766 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_layer.py @@ -0,0 +1,493 @@ +# +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from unittest.mock import MagicMock, call, patch +from ansible_collections.amazon.aws.plugins.modules import lambda_layer + + +def raise_lambdalayer_exception(e=None, m=None): + e = e or "lambda layer exc" + m = m or "unit testing" + return lambda_layer.LambdaLayerFailure(exc=e, msg=m) + + +mod_list_layer = 'ansible_collections.amazon.aws.plugins.modules.lambda_layer.list_layer_versions' +mod_create_layer = 'ansible_collections.amazon.aws.plugins.modules.lambda_layer.create_layer_version' +mod_delete_layer = 'ansible_collections.amazon.aws.plugins.modules.lambda_layer.delete_layer_version' + + +@pytest.mark.parametrize( + "params,api_result,calls,ansible_result", + [ + ( + { + "name": "testlayer", + "version": 4 + }, + [], + [], + {"changed": False, "layer_versions": []} + ), + ( + { + "name": "testlayer", + "version": 4 + }, + [ + { + 'compatible_runtimes': ["python3.7"], + 'created_date': "2022-09-29T10:31:35.977+0000", + 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", + "license_info": "MIT", + 'version': 2, + 'compatible_architectures': [ + 'arm64' + ] + }, + { + "created_date": "2022-09-29T10:31:26.341+0000", + "description": "lambda layer first version", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:1", + "version": 1 + } + ], + [], + {"changed": False, "layer_versions": []} + ), + ( + { + "name": "testlayer", + "version": 2 + }, + [ + { + 'compatible_runtimes': ["python3.7"], + 'created_date': "2022-09-29T10:31:35.977+0000", + 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", + "license_info": "MIT", + 'version': 2, + 'compatible_architectures': [ + 'arm64' + ] + }, + { + "created_date": "2022-09-29T10:31:26.341+0000", + "description": "lambda layer first version", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:1", + "version": 1 + } + ], + [ + call(LayerName='testlayer', VersionNumber=2) + ], + { + "changed": True, + "layer_versions": [ + { + 'compatible_runtimes': ["python3.7"], + 'created_date': "2022-09-29T10:31:35.977+0000", + 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", + "license_info": "MIT", + 'version': 2, + 'compatible_architectures': [ + 'arm64' + ] + } + ] + } + ), + ( + { + "name": "testlayer", + "version": -1 + }, + [ + { + 'compatible_runtimes': ["python3.7"], + 'created_date': "2022-09-29T10:31:35.977+0000", + 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", + "license_info": "MIT", + 'version': 2, + 'compatible_architectures': [ + 'arm64' + ] + }, + { + "created_date": "2022-09-29T10:31:26.341+0000", + "description": "lambda layer first version", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:1", + "version": 1 + } + ], + [ + call(LayerName='testlayer', VersionNumber=2), + call(LayerName='testlayer', VersionNumber=1) + ], + { + "changed": True, + "layer_versions": [ + { + 'compatible_runtimes': ["python3.7"], + 'created_date': "2022-09-29T10:31:35.977+0000", + 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", + "license_info": "MIT", + 'version': 2, + 'compatible_architectures': [ + 'arm64' + ] + }, + { + "created_date": "2022-09-29T10:31:26.341+0000", + "description": "lambda layer first version", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:1", + "version": 1 + } + ] + } + ) + ] +) +@patch(mod_list_layer) +def test_delete_layer(m_list_layer, params, api_result, calls, ansible_result): + + lambda_client = MagicMock() + lambda_client.delete_layer_version.return_value = None + + m_list_layer.return_value = api_result + result = lambda_layer.delete_layer_version(lambda_client, params) + assert result == ansible_result + + m_list_layer.assert_called_once_with( + lambda_client, params.get("name") + ) + + if not calls: + lambda_client.delete_layer_version.assert_not_called() + else: + lambda_client.delete_layer_version.assert_has_calls(calls, any_order=True) + + +@patch(mod_list_layer) +def test_delete_layer_check_mode(m_list_layer): + + lambda_client = MagicMock() + lambda_client.delete_layer_version.return_value = None + + m_list_layer.return_value = [ + { + 'compatible_runtimes': ["python3.7"], + 'created_date': "2022-09-29T10:31:35.977+0000", + 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", + "license_info": "MIT", + 'version': 2, + 'compatible_architectures': [ + 'arm64' + ] + }, + { + "created_date": "2022-09-29T10:31:26.341+0000", + "description": "lambda layer first version", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:1", + "version": 1 + } + ] + params = {"name": "testlayer", "version": -1} + result = lambda_layer.delete_layer_version(lambda_client, params, check_mode=True) + ansible_result = { + "changed": True, + "layer_versions": [ + { + 'compatible_runtimes': ["python3.7"], + 'created_date': "2022-09-29T10:31:35.977+0000", + 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", + "license_info": "MIT", + 'version': 2, + 'compatible_architectures': [ + 'arm64' + ] + }, + { + "created_date": "2022-09-29T10:31:26.341+0000", + "description": "lambda layer first version", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:1", + "version": 1 + } + ] + } + assert result == ansible_result + + m_list_layer.assert_called_once_with( + lambda_client, params.get("name") + ) + lambda_client.delete_layer_version.assert_not_called() + + +@patch(mod_list_layer) +def test_delete_layer_failure(m_list_layer): + + lambda_client = MagicMock() + lambda_client.delete_layer_version.side_effect = raise_lambdalayer_exception() + + m_list_layer.return_value = [ + { + "created_date": "2022-09-29T10:31:26.341+0000", + "description": "lambda layer first version", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:1", + "version": 1 + } + ] + params = {"name": "testlayer", "version": 1} + with pytest.raises(lambda_layer.LambdaLayerFailure): + lambda_layer.delete_layer_version(lambda_client, params) + + +@pytest.mark.parametrize( + "b_s3content", + [ + (True), + (False) + ] +) +@patch(mod_list_layer) +def test_create_layer(m_list_layer, b_s3content, tmp_path): + params = { + "name": "testlayer", + "description": "ansible units testing sample layer", + "content": {}, + "license_info": "MIT" + } + + lambda_client = MagicMock() + + lambda_client.publish_layer_version.return_value = { + 'CompatibleRuntimes': [ + 'python3.6', + 'python3.7', + ], + 'Content': { + 'CodeSha256': 'tv9jJO+rPbXUUXuRKi7CwHzKtLDkDRJLB3cC3Z/ouXo=', + 'CodeSize': 169, + 'Location': 'https://awslambda-us-west-2-layers.s3.us-west-2.amazonaws.com/snapshots/123456789012/my-layer-4aaa2fbb', + }, + 'CreatedDate': '2018-11-14T23:03:52.894+0000', + 'Description': "ansible units testing sample layer", + 'LayerArn': 'arn:aws:lambda:us-west-2:123456789012:layer:my-layer', + 'LayerVersionArn': 'arn:aws:lambda:us-west-2:123456789012:layer:testlayer:1', + 'LicenseInfo': 'MIT', + 'Version': 1, + 'ResponseMetadata': { + 'http_header': 'true', + }, + } + + expected = { + "changed": True, + "layer_versions": [ + { + 'compatible_runtimes': ['python3.6', 'python3.7'], + 'content': { + 'code_sha256': 'tv9jJO+rPbXUUXuRKi7CwHzKtLDkDRJLB3cC3Z/ouXo=', + 'code_size': 169, + 'location': 'https://awslambda-us-west-2-layers.s3.us-west-2.amazonaws.com/snapshots/123456789012/my-layer-4aaa2fbb' + }, + 'created_date': '2018-11-14T23:03:52.894+0000', + 'description': 'ansible units testing sample layer', + 'layer_arn': 'arn:aws:lambda:us-west-2:123456789012:layer:my-layer', + 'layer_version_arn': 'arn:aws:lambda:us-west-2:123456789012:layer:testlayer:1', + 'license_info': 'MIT', + 'version': 1 + } + ] + } + + if b_s3content: + params["content"] = { + "s3_bucket": "mybucket", + "s3_key": "mybucket-key", + "s3_object_version": "v1" + } + content_arg = { + "S3Bucket": "mybucket", + "S3Key": "mybucket-key", + "S3ObjectVersion": "v1" + } + else: + binary_data = b"simple lambda layer content" + test_dir = tmp_path / "lambda_layer" + test_dir.mkdir() + zipfile = test_dir / "lambda.zip" + zipfile.write_bytes(binary_data) + + params["content"] = {"zip_file": str(zipfile)} + content_arg = { + "ZipFile": binary_data, + } + + result = lambda_layer.create_layer_version(lambda_client, params) + + assert result == expected + + lambda_client.publish_layer_version.assert_called_with( + LayerName="testlayer", + Description="ansible units testing sample layer", + LicenseInfo="MIT", + Content=content_arg, + ) + + m_list_layer.assert_not_called() + + +@patch(mod_list_layer) +def test_create_layer_check_mode(m_list_layer): + params = { + "name": "testlayer", + "description": "ansible units testing sample layer", + "content": { + "s3_bucket": "mybucket", + "s3_key": "mybucket-key", + "s3_object_version": "v1" + }, + "license_info": "MIT" + } + + lambda_client = MagicMock() + + result = lambda_layer.create_layer_version(lambda_client, params, check_mode=True) + assert result == {"msg": "Create operation skipped - running in check mode", "changed": True} + + m_list_layer.assert_not_called() + lambda_client.publish_layer_version.assert_not_called() + + +def test_create_layer_failure(): + params = { + "name": "testlayer", + "description": "ansible units testing sample layer", + "content": { + "s3_bucket": "mybucket", + "s3_key": "mybucket-key", + "s3_object_version": "v1" + }, + "compatible_runtimes": [ + "nodejs", + "python3.9" + ], + "compatible_architectures": [ + 'x86_64', + 'arm64' + ] + } + lambda_client = MagicMock() + lambda_client.publish_layer_version.side_effect = raise_lambdalayer_exception() + + with pytest.raises(lambda_layer.LambdaLayerFailure): + lambda_layer.create_layer_version(lambda_client, params) + + +def test_create_layer_using_unexisting_file(): + params = { + "name": "testlayer", + "description": "ansible units testing sample layer", + "content": { + "zip_file": "this_file_does_not_exist", + }, + "compatible_runtimes": [ + "nodejs", + "python3.9" + ], + "compatible_architectures": [ + 'x86_64', + 'arm64' + ] + } + + lambda_client = MagicMock() + + lambda_client.publish_layer_version.return_value = {} + with pytest.raises(FileNotFoundError): + lambda_layer.create_layer_version(lambda_client, params) + + lambda_client.publish_layer_version.assert_not_called() + + +@pytest.mark.parametrize( + "params,failure", + [ + ( + {"name": "test-layer"}, + False + ), + ( + {"name": "test-layer", "state": "absent"}, + False + ), + ( + {"name": "test-layer"}, + True + ), + ( + {"name": "test-layer", "state": "absent"}, + True + ), + ] +) +@patch(mod_create_layer) +@patch(mod_delete_layer) +def test_execute_module(m_delete_layer, m_create_layer, params, failure): + + module = MagicMock() + module.params = params + module.check_mode = False + module.exit_json.side_effect = SystemExit(1) + module.fail_json_aws.side_effect = SystemExit(2) + + lambda_client = MagicMock() + + state = params.get("state", "present") + result = {"changed": True, "layers_versions": {}} + + if not failure: + if state == "present": + m_create_layer.return_value = result + with pytest.raises(SystemExit): + lambda_layer.execute_module(module, lambda_client) + + module.exit_json.assert_called_with(**result) + module.fail_json_aws.assert_not_called() + m_create_layer.assert_called_with( + lambda_client, params, module.check_mode + ) + m_delete_layer.assert_not_called() + + elif state == "absent": + m_delete_layer.return_value = result + with pytest.raises(SystemExit): + lambda_layer.execute_module(module, lambda_client) + + module.exit_json.assert_called_with(**result) + module.fail_json_aws.assert_not_called() + m_delete_layer.assert_called_with( + lambda_client, params, module.check_mode + ) + m_create_layer.assert_not_called() + else: + exc = "lambdalayer_execute_module_exception" + msg = "this_exception_is_used_for_unit_testing" + m_create_layer.side_effect = raise_lambdalayer_exception(exc, msg) + m_delete_layer.side_effect = raise_lambdalayer_exception(exc, msg) + + with pytest.raises(SystemExit): + lambda_layer.execute_module(module, lambda_client) + + module.exit_json.assert_not_called() + module.fail_json_aws.assert_called_with( + exc, msg=msg + ) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_layer_info.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_layer_info.py new file mode 100644 index 000000000..25a1f15ac --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_layer_info.py @@ -0,0 +1,358 @@ +# +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +from botocore.exceptions import BotoCoreError + +from unittest.mock import MagicMock, call, patch +from ansible_collections.amazon.aws.plugins.modules import lambda_layer_info + + +mod__list_layer_versions = 'ansible_collections.amazon.aws.plugins.modules.lambda_layer_info._list_layer_versions' +mod__list_layers = 'ansible_collections.amazon.aws.plugins.modules.lambda_layer_info._list_layers' +mod_list_layer_versions = 'ansible_collections.amazon.aws.plugins.modules.lambda_layer_info.list_layer_versions' +mod_list_layers = 'ansible_collections.amazon.aws.plugins.modules.lambda_layer_info.list_layers' + + +list_layers_paginate_result = { + 'NextMarker': '002', + 'Layers': [ + { + 'LayerName': "test-layer-01", + 'LayerArn': "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-01", + 'LatestMatchingVersion': { + 'LayerVersionArn': "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-01:1", + 'Version': 1, + 'Description': "lambda layer created for unit tests", + 'CreatedDate': "2022-09-29T10:31:26.341+0000", + 'CompatibleRuntimes': [ + 'nodejs', + 'nodejs4.3', + 'nodejs6.10' + ], + 'LicenseInfo': 'MIT', + 'CompatibleArchitectures': [ + 'arm64' + ] + } + }, + { + 'LayerName': "test-layer-02", + 'LayerArn': "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-02", + 'LatestMatchingVersion': { + 'LayerVersionArn': "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-02:1", + 'Version': 1, + 'CreatedDate': "2022-09-29T10:31:26.341+0000", + 'CompatibleArchitectures': [ + 'arm64' + ] + } + }, + ], + 'ResponseMetadata': { + 'http': 'true', + }, +} + +list_layers_result = [ + { + 'layer_name': "test-layer-01", + 'layer_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-01", + 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-01:1", + 'version': 1, + 'description': "lambda layer created for unit tests", + 'created_date': "2022-09-29T10:31:26.341+0000", + 'compatible_runtimes': [ + 'nodejs', + 'nodejs4.3', + 'nodejs6.10' + ], + 'license_info': 'MIT', + 'compatible_architectures': [ + 'arm64' + ] + }, + { + 'layer_name': "test-layer-02", + 'layer_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-02", + 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-02:1", + 'version': 1, + 'created_date': "2022-09-29T10:31:26.341+0000", + 'compatible_architectures': [ + 'arm64' + ] + } +] + + +list_layers_versions_paginate_result = { + 'LayerVersions': [ + { + 'CompatibleRuntimes': ["python3.7"], + 'CreatedDate': "2022-09-29T10:31:35.977+0000", + 'LayerVersionArn': "arn:aws:lambda:eu-west-2:123456789012:layer:layer-01:2", + "LicenseInfo": "MIT", + 'Version': 2, + 'CompatibleArchitectures': [ + 'arm64' + ] + }, + { + "CompatibleRuntimes": ["python3.7"], + "CreatedDate": "2022-09-29T10:31:26.341+0000", + "Description": "lambda layer first version", + "LayerVersionArn": "arn:aws:lambda:eu-west-2:123456789012:layer:layer-01:1", + "LicenseInfo": "GPL-3.0-only", + "Version": 1 + } + ], + 'ResponseMetadata': { + 'http': 'true', + }, + 'NextMarker': '001', +} + + +list_layers_versions_result = [ + { + "compatible_runtimes": ["python3.7"], + "created_date": "2022-09-29T10:31:35.977+0000", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:layer-01:2", + "license_info": "MIT", + "version": 2, + 'compatible_architectures': [ + 'arm64' + ] + }, + { + "compatible_runtimes": ["python3.7"], + "created_date": "2022-09-29T10:31:26.341+0000", + "description": "lambda layer first version", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:layer-01:1", + "license_info": "GPL-3.0-only", + "version": 1 + } +] + + +@pytest.mark.parametrize( + "params,call_args", + [ + ( + { + "compatible_runtime": "nodejs", + "compatible_architecture": "arm64" + }, + { + "CompatibleRuntime": "nodejs", + "CompatibleArchitecture": "arm64" + } + ), + ( + { + "compatible_runtime": "nodejs", + }, + { + "CompatibleRuntime": "nodejs", + } + ), + ( + { + "compatible_architecture": "arm64" + }, + { + "CompatibleArchitecture": "arm64" + } + ), + ( + {}, {} + ) + ] +) +@patch(mod__list_layers) +def test_list_layers_with_latest_version(m__list_layers, params, call_args): + + lambda_client = MagicMock() + + m__list_layers.return_value = list_layers_paginate_result + layers = lambda_layer_info.list_layers(lambda_client, **params) + + m__list_layers.assert_has_calls( + [ + call(lambda_client, **call_args) + ] + ) + assert layers == list_layers_result + + +@pytest.mark.parametrize( + "params,call_args", + [ + ( + { + "name": "layer-01", + "compatible_runtime": "nodejs", + "compatible_architecture": "arm64" + }, + { + "LayerName": "layer-01", + "CompatibleRuntime": "nodejs", + "CompatibleArchitecture": "arm64" + } + ), + ( + { + "name": "layer-01", + "compatible_runtime": "nodejs", + }, + { + "LayerName": "layer-01", + "CompatibleRuntime": "nodejs", + } + ), + ( + { + "name": "layer-01", + "compatible_architecture": "arm64" + }, + { + "LayerName": "layer-01", + "CompatibleArchitecture": "arm64" + } + ), + ( + {"name": "layer-01"}, {"LayerName": "layer-01"} + ) + ] +) +@patch(mod__list_layer_versions) +def test_list_layer_versions(m__list_layer_versions, params, call_args): + + lambda_client = MagicMock() + + m__list_layer_versions.return_value = list_layers_versions_paginate_result + layers = lambda_layer_info.list_layer_versions(lambda_client, **params) + + m__list_layer_versions.assert_has_calls( + [ + call(lambda_client, **call_args) + ] + ) + assert layers == list_layers_versions_result + + +def raise_botocore_exception(): + return BotoCoreError(error="failed", operation="list_layers") + + +@pytest.mark.parametrize( + "params", + [ + ( + { + "name": "test-layer", + "compatible_runtime": "nodejs", + "compatible_architecture": "arm64" + } + ), + ( + { + "compatible_runtime": "nodejs", + "compatible_architecture": "arm64" + } + ) + ] +) +@patch(mod__list_layers) +@patch(mod__list_layer_versions) +def test_list_layers_with_failure(m__list_layer_versions, m__list_layers, params): + + lambda_client = MagicMock() + + if "name" in params: + m__list_layer_versions.side_effect = raise_botocore_exception() + test_function = lambda_layer_info.list_layer_versions + else: + m__list_layers.side_effect = raise_botocore_exception() + test_function = lambda_layer_info.list_layers + + with pytest.raises(lambda_layer_info.LambdaLayerInfoFailure): + test_function(lambda_client, **params) + + +def raise_layer_info_exception(exc, msg): + return lambda_layer_info.LambdaLayerInfoFailure(exc=exc, msg=msg) + + +@pytest.mark.parametrize( + "params,failure", + [ + ( + { + "name": "test-layer", + "compatible_runtime": "nodejs", + "compatible_architecture": "arm64" + }, + False + ), + ( + { + "compatible_runtime": "nodejs", + "compatible_architecture": "arm64" + }, + False + ), + ( + { + "name": "test-layer", + "compatible_runtime": "nodejs", + "compatible_architecture": "arm64" + }, + True + ) + ] +) +@patch(mod_list_layers) +@patch(mod_list_layer_versions) +def test_execute_module(m_list_layer_versions, m_list_layers, params, failure): + + lambda_client = MagicMock() + + module = MagicMock() + module.params = params + module.exit_json.side_effect = SystemExit(1) + module.fail_json_aws.side_effect = SystemExit(2) + + method_called, method_not_called = m_list_layers, m_list_layer_versions + if "name" in params: + method_not_called, method_called = m_list_layers, m_list_layer_versions + + if failure: + exc = "lambda_layer_exception" + msg = "this exception has been generated for unit tests" + + method_called.side_effect = raise_layer_info_exception(exc, msg) + + with pytest.raises(SystemExit): + lambda_layer_info.execute_module(module, lambda_client) + + module.fail_json_aws.assert_called_with(exception=exc, msg=msg) + + else: + result = {"A": "valueA", "B": "valueB"} + method_called.return_value = result + + with pytest.raises(SystemExit): + lambda_layer_info.execute_module(module, lambda_client) + + module.exit_json.assert_called_with( + changed=False, layers_versions=result + ) + method_called.assert_called_with(lambda_client, **params) + method_not_called.list_layers.assert_not_called() diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_s3_object.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_s3_object.py new file mode 100644 index 000000000..b02513072 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_s3_object.py @@ -0,0 +1,29 @@ +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils.six.moves.urllib.parse import urlparse + +from ansible_collections.amazon.aws.plugins.modules import s3_object + + +class TestUrlparse(): + + def test_urlparse(self): + actual = urlparse("http://test.com/here") + assert actual.scheme == "http" + assert actual.netloc == "test.com" + assert actual.path == "/here" + + def test_is_fakes3(self): + actual = s3_object.is_fakes3("fakes3://bla.blubb") + assert actual is True + + def test_get_s3_connection(self): + aws_connect_kwargs = dict(aws_access_key_id="access_key", + aws_secret_access_key="secret_key") + location = None + rgw = True + s3_url = "http://bla.blubb" + actual = s3_object.get_s3_connection(None, aws_connect_kwargs, location, rgw, s3_url) + assert "bla.blubb" in str(actual._endpoint) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/utils.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/utils.py new file mode 100644 index 000000000..058a5b605 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/utils.py @@ -0,0 +1,50 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +from ansible_collections.amazon.aws.tests.unit.compat import unittest +from ansible_collections.amazon.aws.tests.unit.compat.mock import patch +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes + + +def set_module_args(args): + if '_ansible_remote_tmp' not in args: + args['_ansible_remote_tmp'] = '/tmp' + if '_ansible_keep_remote_files' not in args: + args['_ansible_keep_remote_files'] = False + + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) + + +class AnsibleExitJson(Exception): + pass + + +class AnsibleFailJson(Exception): + pass + + +def exit_json(*args, **kwargs): + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class ModuleTestCase(unittest.TestCase): + + def setUp(self): + self.mock_module = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) + self.mock_module.start() + self.mock_sleep = patch('time.sleep') + self.mock_sleep.start() + set_module_args({}) + self.addCleanup(self.mock_module.stop) + self.addCleanup(self.mock_sleep.stop) diff --git a/ansible_collections/amazon/aws/tests/unit/requirements.txt b/ansible_collections/amazon/aws/tests/unit/requirements.txt new file mode 100644 index 000000000..49f392832 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/requirements.txt @@ -0,0 +1,5 @@ +# Our code is based on the AWS SDKs +botocore +boto3 + +placebo diff --git a/ansible_collections/amazon/aws/tests/unit/utils/amazon_placebo_fixtures.py b/ansible_collections/amazon/aws/tests/unit/utils/amazon_placebo_fixtures.py new file mode 100644 index 000000000..6912c2e32 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/utils/amazon_placebo_fixtures.py @@ -0,0 +1,213 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import errno +import os +import time +import mock +import pytest + +boto3 = pytest.importorskip("boto3") +botocore = pytest.importorskip("botocore") +placebo = pytest.importorskip("placebo") + +""" +Using Placebo to test modules using boto3: + +This is an example test, using the placeboify fixture to test that a module +will fail if resources it depends on don't exist. + +> from placebo_fixtures import placeboify, scratch_vpc +> +> def test_create_with_nonexistent_launch_config(placeboify): +> connection = placeboify.client('autoscaling') +> module = FakeModule('test-asg-created', None, min_size=0, max_size=0, desired_capacity=0) +> with pytest.raises(FailJSON) as excinfo: +> asg_module.create_autoscaling_group(connection, module) +> .... asserts based on module state/exceptions .... + +In more advanced cases, use unrecorded resource fixtures to fill in ARNs/IDs of +things modules depend on, such as: + +> def test_create_in_vpc(placeboify, scratch_vpc): +> connection = placeboify.client('autoscaling') +> module = FakeModule(name='test-asg-created', +> min_size=0, max_size=0, desired_capacity=0, +> availability_zones=[s['az'] for s in scratch_vpc['subnets']], +> vpc_zone_identifier=[s['id'] for s in scratch_vpc['subnets']], +> ) +> ..... so on and so forth .... +""" + + +@pytest.fixture +def placeboify(request, monkeypatch): + """This fixture puts a recording/replaying harness around `boto3_conn` + + Placeboify patches the `boto3_conn` function in ec2 module_utils to return + a boto3 session that in recording or replaying mode, depending on the + PLACEBO_RECORD environment variable. Unset PLACEBO_RECORD (the common case + for just running tests) will put placebo in replay mode, set PLACEBO_RECORD + to any value to turn off replay & operate on real AWS resources. + + The recorded sessions are stored in the test file's directory, under the + namespace `placebo_recordings/{testfile name}/{test function name}` to + distinguish them. + """ + session = boto3.Session(region_name='us-west-2') + + recordings_path = os.path.join( + request.fspath.dirname, + 'placebo_recordings', + request.fspath.basename.replace('.py', ''), + request.function.__name__ + # remove the test_ prefix from the function & file name + ).replace('test_', '') + + if not os.getenv('PLACEBO_RECORD'): + if not os.path.isdir(recordings_path): + raise NotImplementedError('Missing Placebo recordings in directory: %s' % recordings_path) + else: + try: + # make sure the directory for placebo test recordings is available + os.makedirs(recordings_path) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + pill = placebo.attach(session, data_path=recordings_path) + if os.getenv('PLACEBO_RECORD'): + pill.record() + else: + pill.playback() + + def boto3_middleman_connection(module, conn_type, resource, region='us-west-2', **kwargs): + if conn_type != 'client': + # TODO support resource-based connections + raise ValueError('Mocker only supports client, not %s' % conn_type) + return session.client(resource, region_name=region) + + import ansible_collections.amazon.aws.plugins.module_utils.ec2 + monkeypatch.setattr( + ansible_collections.amazon.aws.plugins.module_utils.ec2, + 'boto3_conn', + boto3_middleman_connection, + ) + yield session + + # tear down + pill.stop() + + +@pytest.fixture(scope='module') +def basic_launch_config(): + """Create an EC2 launch config whose creation *is not* recorded and return its name + + This fixture is module-scoped, since launch configs are immutable and this + can be reused for many tests. + """ + if not os.getenv('PLACEBO_RECORD'): + yield 'pytest_basic_lc' + return + + # use a *non recording* session to make the launch config + # since that's a prereq of the ec2_asg module, and isn't what + # we're testing. + asg = boto3.client('autoscaling') + asg.create_launch_configuration( + LaunchConfigurationName='pytest_basic_lc', + ImageId='ami-9be6f38c', # Amazon Linux 2016.09 us-east-1 AMI, can be any valid AMI + SecurityGroups=[], + UserData='#!/bin/bash\necho hello world', + InstanceType='t2.micro', + InstanceMonitoring={'Enabled': False}, + AssociatePublicIpAddress=True + ) + + yield 'pytest_basic_lc' + + try: + asg.delete_launch_configuration(LaunchConfigurationName='pytest_basic_lc') + except botocore.exceptions.ClientError as e: + if 'not found' in e.message: + return + raise + + +@pytest.fixture(scope='module') +def scratch_vpc(): + if not os.getenv('PLACEBO_RECORD'): + yield { + 'vpc_id': 'vpc-123456', + 'cidr_range': '10.0.0.0/16', + 'subnets': [ + { + 'id': 'subnet-123456', + 'az': 'us-east-1d', + }, + { + 'id': 'subnet-654321', + 'az': 'us-east-1e', + }, + ] + } + return + + # use a *non recording* session to make the base VPC and subnets + ec2 = boto3.client('ec2') + vpc_resp = ec2.create_vpc( + CidrBlock='10.0.0.0/16', + AmazonProvidedIpv6CidrBlock=False, + ) + subnets = ( + ec2.create_subnet( + VpcId=vpc_resp['Vpc']['VpcId'], + CidrBlock='10.0.0.0/24', + ), + ec2.create_subnet( + VpcId=vpc_resp['Vpc']['VpcId'], + CidrBlock='10.0.1.0/24', + ) + ) + time.sleep(3) + + yield { + 'vpc_id': vpc_resp['Vpc']['VpcId'], + 'cidr_range': '10.0.0.0/16', + 'subnets': [ + { + 'id': s['Subnet']['SubnetId'], + 'az': s['Subnet']['AvailabilityZone'], + } for s in subnets + ] + } + + try: + for s in subnets: + try: + ec2.delete_subnet(SubnetId=s['Subnet']['SubnetId']) + except botocore.exceptions.ClientError as e: + if 'not found' in e.message: + continue + raise + ec2.delete_vpc(VpcId=vpc_resp['Vpc']['VpcId']) + except botocore.exceptions.ClientError as e: + if 'not found' in e.message: + return + raise + + +@pytest.fixture(scope='module') +def maybe_sleep(): + """If placebo is reading saved sessions, make sleep always take 0 seconds. + + AWS modules often perform polling or retries, but when using recorded + sessions there's no reason to wait. We can still exercise retry and other + code paths without waiting for wall-clock time to pass.""" + if not os.getenv('PLACEBO_RECORD'): + p = mock.patch('time.sleep', return_value=None) + p.start() + yield + p.stop() + else: + yield diff --git a/ansible_collections/amazon/aws/tox.ini b/ansible_collections/amazon/aws/tox.ini new file mode 100644 index 000000000..292a97001 --- /dev/null +++ b/ansible_collections/amazon/aws/tox.ini @@ -0,0 +1,27 @@ +[tox] +skipsdist=True +envlist = clean,ansible{2.12,2.13}-py{38,39,310}-{with_constraints,without_constraints} + +[testenv] +deps = + pytest + pytest-cov + ansible2.12: ansible-core>2.12,<2.13 + ansible2.13: ansible-core>2.13,<2.14 + !ansible2.12-!ansible2.13: ansible-core + pytest-ansible-units + -rtest-requirements.txt + with_constraints: -rtests/unit/constraints.txt +commands = pytest --cov-report html --cov plugins/callback --cov plugins/inventory --cov plugins/lookup --cov plugins/module_utils --cov plugins/modules plugins {posargs:tests/} + +[testenv:clean] +deps = coverage +skip_install = true +commands = coverage erase + +[testenv:complexity-report] +deps = + # See: https://github.com/lordmauve/flake8-html/issues/30 + flake8>=3.3.0,<5.0.0' + flake8-html +commands = -flake8 --select C90 --max-complexity 10 --format=html --htmldir={posargs} plugins -- cgit v1.2.3