summaryrefslogtreecommitdiffstats
path: root/ansible_collections/amazon/aws/tests/integration/targets
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
commit975f66f2eebe9dadba04f275774d4ab83f74cf25 (patch)
tree89bd26a93aaae6a25749145b7e4bca4a1e75b2be /ansible_collections/amazon/aws/tests/integration/targets
parentInitial commit. (diff)
downloadansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.tar.xz
ansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.zip
Adding upstream version 7.7.0+dfsg.upstream/7.7.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/amazon/aws/tests/integration/targets')
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/aliases7
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/inventory8
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/main.yml35
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/meta/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/defaults/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml593
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_cleanup.yml116
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_setup.yml51
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/instance_detach.yml256
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/main.yml40
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/tag_operations.yml339
-rwxr-xr-xansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/runme.sh12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/main.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/main.yml193
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/tasks/main.yaml18
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/inventory6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/main.yml35
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/meta/main.yml1
-rwxr-xr-xansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/runme.sh19
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudformation/aliases2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudformation/defaults/main.yml8
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/cf_template.json37
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/update_policy.json10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudformation/meta/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/main.yml491
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/aliases2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/defaults/main.yml8
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/meta/main.yml1
-rwxr-xr-xansible_collections/amazon/aws/tests/integration/targets/cloudtrail/runme.sh27
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml1595
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/tagging.yml252
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-assume-policy.j211
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-policy.j211
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j213
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-policy.j217
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/kms-policy.j234
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/s3-policy.j234
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/sns-policy.j234
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/meta/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_cleanup.yml94
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_setup.yml62
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/main.yml518
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/defaults/main.yml9
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml96
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/aliases5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/defaults/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/cloudwatchlogs_tests.yml151
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/create-delete-tags.yml444
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/main.yml16
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/aliases5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/defaults/main.yml11
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/meta/main.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/tasks/main.yml786
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/vars/main.yml20
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/aliases5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/defaults/main.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/meta/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/tasks/main.yml1442
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/defaults/main.yml16
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/meta/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/main.yaml159
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_attachment.yaml278
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_create_attached_multiple.yml121
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_deletion.yaml118
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml263
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml325
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml214
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml98
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml251
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/meta/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/tasks/main.yml110
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/meta/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/tasks/main.yml208
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/meta/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/tasks/main.yml85
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/meta/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/tasks/main.yml63
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/meta/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/tasks/main.yml31
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/meta/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml161
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/meta/main.yml9
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/tasks/main.yml145
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/defaults/main.yml7
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/files/assume-role-policy.json13
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/meta/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml131
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/meta/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/tasks/main.yml699
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/meta/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/tasks/main.yml443
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/meta/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/tasks/main.yml58
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/meta/main.yml9
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/tasks/main.yml98
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/meta/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/tasks/main.yml87
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/meta/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/tasks/main.yml133
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks/main.yml179
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/meta/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/tasks/main.yml250
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/meta/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/tasks/main.yml63
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_key/aliases5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_key/defaults/main.yml3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_key/meta/main.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_key/tasks/main.yml461
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/aliases5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/meta/main.yml7
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml182
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml84
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml18
-rwxr-xr-xansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/runme.sh22
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/templates/inventory.j234
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/aliases6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/defaults/main.yml7
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/data_validation.yml33
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/diff_mode.yml167
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/egress_tests.yml177
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/group_info.yml96
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/icmp_verbs.yml221
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/ipv6_default_tests.yml90
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/main.yml1368
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_account.yml124
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_nested_target.yml213
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/numeric_protos.yml60
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/rule_group_create.yml127
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/aliases10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/defaults/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/meta/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/main.yml400
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/aliases2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/defaults/main.yml14
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/meta/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/main.yaml315
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/terminate_associated_instances.yml109
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/aliases2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/defaults/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/tasks/main.yml136
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/vars/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/aliases4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/defaults/main.yml8
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/meta/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml1002
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml948
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/defaults/main.yml7
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml862
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/aliases2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/defaults/main.yml3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/tasks/main.yml135
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/defaults/main.yml3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/tasks/main.yml550
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/aliases5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml978
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/aliases2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/defaults/main.yml8
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/tasks/main.yml1560
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/aliases5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/meta/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/tasks/main.yml1499
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/aliases2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/defaults/main.yml9
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/meta/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/tasks/main.yml683
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml28
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml1558
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/templates/policy.json13
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/aliases4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/defaults/main.yml170
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/meta/main.yml3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml292
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_public.yml273
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml9
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml32
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml29
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/complex_changes.yml330
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/describe_region.yml10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/https_listeners.yml132
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/main.yml58
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/missing_params.yml203
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/schema_change.yml189
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml25
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml26
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml103
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml79
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml100
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml148
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml116
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml50
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml415
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml196
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml587
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml141
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml106
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml390
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml141
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/templates/s3_policy.j215
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/vars/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_policy/aliases8
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_policy/defaults/main.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access.json10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access_with_id.json11
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access_with_second_id.json11
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_trust.json10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_policy/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml70
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/object.yml1169
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_user/aliases9
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_user/defaults/main.yml10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_user/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml798
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_environment_script.yml9
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml11
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml9
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml55
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml52
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml31
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml9
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml18
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml78
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml56
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml69
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags.yml62
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml62
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml65
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_include_or_exclude_filters.yml103
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_literal_string.yml56
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_use_contrib_script_keys.yml57
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml61
-rwxr-xr-xansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/runme.sh67
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j214
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j214
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j215
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j222
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags.yml.j221
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags_classic.yml.j221
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostvars_prefix_suffix.yml.j219
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_include_or_exclude_filters.yml.j223
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_literal_string.yml.j215
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_template.yml.j214
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_use_contrib_script_keys.yml.j215
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/test.aws_ec2.yml0
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/aliases2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml11
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml9
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml57
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml9
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml18
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_with_hostvars_prefix_suffix.yml63
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml77
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml65
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml67
-rwxr-xr-xansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/runme.sh47
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory.j210
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j213
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j217
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_hostvars_prefix_suffix.j216
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/test.aws_rds.yml0
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/kms_key/aliases11
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/kms_key/inventory12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/kms_key/main.yml9
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/kms_key/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/defaults/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml11
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_grants.yml350
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_modify.yml279
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_multi_region.yml100
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_states.yml522
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_tagging.yml187
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy-no-key-rotation.j281
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy.j272
-rwxr-xr-xansible_collections/amazon/aws/tests/integration/targets/kms_key/runme.sh12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda/aliases4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda/defaults/main.yml13
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda/files/mini_lambda.py48
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda/files/minimal_trust_policy.json12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda/meta/main.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml788
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/tagging.yml246
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/defaults/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/mini_lambda.py48
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/minimal_trust_policy.json12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml622
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_event/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_event/defaults/main.yml10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/mini_lambda.py48
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/minimal_trust_policy.json12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_event/meta/main.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml117
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml83
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml33
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/aliases2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/defaults/main.yml10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/tasks/main.yml248
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/defaults/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/mini_http_lambda.py40
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/minimal_trust_policy.json12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml144
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j239
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/README.md5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/aliases5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/tasks/main.yaml130
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/tasks/main.yaml120
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/tasks/main.yaml148
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/defaults/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/tasks/main.yml276
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/main.yml12
-rwxr-xr-xansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/record.sh23
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/recording.tar.gzbin0 -> 967 bytes
-rwxr-xr-xansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/runme.sh15
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/aliases4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/inventory6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/main.yml8
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/amazonroot.pem20
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/isrg-x1.pem31
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py46
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml202
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml281
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml123
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml74
-rwxr-xr-xansible_collections/amazon/aws/tests/integration/targets/module_utils_core/runme.sh16
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/setup.yml40
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/boto_config.j25
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/session_credentials.yml.j23
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/inventory6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/main.yml7
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py39
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml36
-rwxr-xr-xansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/runme.sh8
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/aliases5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/inventory23
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/main.yml10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/defaults/main.yml36
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/main.yml10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create.yml123
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create_sgs.yml208
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_modify.yml270
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_promote.yml187
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_restore.yml185
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_tag.yml290
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/vars/main.yml1
-rwxr-xr-xansible_collections/amazon/aws/tests/integration/targets/rds_cluster/runme.sh12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/aliases7
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/defaults/main.yml7
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/meta/main.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/tasks/main.yml79
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/aliases5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/defaults/main.yml13
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/tasks/main.yml480
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/vars/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/defaults/main.yml9
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/tasks/main.yml122
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml16
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/enhanced_monitoring_assume_policy.json13
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/s3_integration_policy.json16
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/s3_integration_trust_policy.json13
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml205
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml9
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml206
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/defaults/main.yml12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/tasks/main.yml141
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/defaults/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/tasks/main.yml234
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/defaults/main.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/tasks/main.yml131
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/defaults/main.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/tasks/main.yml332
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/aliases5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml14
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/tasks/main.yml505
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/vars/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/aliases4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/defaults/main.yml9
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/tasks/main.yml224
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/aliases4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/defaults/main.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/tasks/main.yml320
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/defaults/main.yml7
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/main.yml202
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/test_tagging_gp3.yml190
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/defaults/main.yml10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/tasks/main.yml128
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml17
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/tasks/main.yml948
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/defaults/main.yml29
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml517
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/defaults/main.yml9
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/main.yml112
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/params.yml29
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/tests.yml675
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/route53/aliases4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/route53/defaults/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/route53/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/route53/tasks/main.yml1126
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/route53/vars/main.yml0
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/defaults/main.yml36
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/meta/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml134
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/main.yml1822
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml303
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/route53_zone/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/route53_zone/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/route53_zone/tasks/main.yml617
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/aliases1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/inventory17
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/main.yml12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/meta/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml68
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml150
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml55
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_bucket_key.yml103
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml92
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml93
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml20
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml28
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/object_lock.yml131
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/ownership_controls.yml143
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml115
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml67
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml257
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json12
-rwxr-xr-xansible_collections/amazon/aws/tests/integration/targets/s3_bucket/runme.sh12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_object/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_object/defaults/main.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_object/files/hello.txt1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_object/files/test.pngbin0 -> 99 bytes
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_object/meta/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml135
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object_acl_disabled_bucket.yml111
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/delete_bucket.yml25
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml1092
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_object/templates/policy.json.j221
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_object/templates/put-template.txt.j22
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/defaults/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/handlers/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/cleanup.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/main.yml43
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/defaults/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/tasks/main.yml53
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/defaults/main.yml24
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/handlers/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/cleanup.yml118
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/main.yml88
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py34
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/meta/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/tasks/main.yml71
561 files changed, 56394 insertions, 0 deletions
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/aliases b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/aliases
new file mode 100644
index 000000000..5619cbdc8
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/aliases
@@ -0,0 +1,7 @@
+# reason: slow
+# Tests take around 30 minutes
+
+slow
+cloud/aws
+
+autoscaling_group_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/inventory b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/inventory
new file mode 100644
index 000000000..edc19ef5f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/inventory
@@ -0,0 +1,8 @@
+[tests]
+create_update_delete
+tag_operations
+instance_detach
+
+[all:vars]
+ansible_connection=local
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/main.yml
new file mode 100644
index 000000000..d2479e44f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/main.yml
@@ -0,0 +1,35 @@
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/ec2_asg/tasks/
+
+
+# Prepare the VPC and figure out which AMI to use
+- hosts: all
+ gather_facts: no
+ tasks:
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ - include_role:
+ name: setup_ec2_facts
+ - include_role:
+ name: ec2_asg
+ tasks_from: env_setup.yml
+ rescue:
+ - include_role:
+ name: ec2_asg
+ tasks_from: env_cleanup.yml
+ run_once: yes
+ - fail:
+ msg: Environment preparation failed
+ run_once: yes
+- hosts: all
+ gather_facts: no
+ strategy: free
+ serial: 6
+ roles:
+ - ec2_asg
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/meta/main.yml
new file mode 100644
index 000000000..1d40168d0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- setup_ec2_facts
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/defaults/main.yml
new file mode 100644
index 000000000..da86a186e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/defaults/main.yml
@@ -0,0 +1,2 @@
+load_balancer_name: '{{ tiny_prefix }}-lb'
+ec2_asg_setup_run_once: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml
new file mode 100644
index 000000000..0e57eaa50
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml
@@ -0,0 +1,593 @@
+# tasks file for test_ec2_asg
+
+ # ============================================================
+
+- name: Test create/update/delete AutoScalingGroups with ec2_asg
+ block:
+
+ # ============================================================
+
+ - name: test without specifying required module options
+ ec2_asg:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ ignore_errors: true
+ register: result
+ - name: assert name is a required module option
+ assert:
+ that:
+ - "result.msg == 'missing required arguments: name'"
+
+
+ - name: ensure launch configs exist
+ ec2_lc:
+ name: '{{ item }}'
+ assign_public_ip: true
+ image_id: '{{ ec2_ami_id }}'
+ user_data: |
+ #cloud-config
+ package_upgrade: true
+ package_update: true
+ packages:
+ - httpd
+ runcmd:
+ - "service httpd start"
+ security_groups: '{{ sg.group_id }}'
+ instance_type: t3.micro
+ loop:
+ - '{{ resource_prefix }}-lc'
+ - '{{ resource_prefix }}-lc-2'
+
+ # ============================================================
+
+ - name: launch asg and wait for instances to be deemed healthy (no ELB)
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_config_name: '{{ resource_prefix }}-lc'
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ vpc_zone_identifier: '{{ testing_subnet.subnet.id }}'
+ state: present
+ wait_for_instances: yes
+ register: output
+ - assert:
+ that:
+ - output.viable_instances == 1
+
+ - name: Enable metrics collection - check_mode
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ metrics_collection: yes
+ register: output
+ check_mode: true
+ - assert:
+ that:
+ - output is changed
+ - output is not failed
+ - '"autoscaling:UpdateAutoScalingGroup" not in output.resource_actions'
+
+ - name: Enable metrics collection
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ metrics_collection: yes
+ register: output
+ - assert:
+ that:
+ - output is changed
+
+ - name: Enable metrics collection (idempotency)
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ metrics_collection: yes
+ register: output
+ - assert:
+ that:
+ - output is not changed
+
+ - name: Disable metrics collection - check_mode
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ metrics_collection: no
+ register: output
+ check_mode: true
+ - assert:
+ that:
+ - output is changed
+ - output is not failed
+ - '"autoscaling:UpdateAutoScalingGroup" not in output.resource_actions'
+
+
+ - name: Disable metrics collection
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ metrics_collection: no
+ register: output
+ - assert:
+ that:
+ - output is changed
+
+ - name: Disable metrics collection (idempotency)
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ metrics_collection: no
+ register: output
+ - assert:
+ that:
+ - output is not changed
+
+ - name: kill asg
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ state: absent
+ wait_timeout: 800
+ async: 400
+ - name: launch asg and do not wait for instances to be deemed healthy (no ELB)
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_config_name: '{{ resource_prefix }}-lc'
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ vpc_zone_identifier: '{{ testing_subnet.subnet.id }}'
+ wait_for_instances: no
+ state: present
+ register: output
+ - assert:
+ that:
+ - output.viable_instances == 0
+
+ - name: kill asg
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ state: absent
+ wait_timeout: 800
+ register: output
+ retries: 3
+ until: output is succeeded
+ delay: 10
+ async: 400
+ - name: create asg with asg metrics enabled
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ metrics_collection: true
+ launch_config_name: '{{ resource_prefix }}-lc'
+ desired_capacity: 0
+ min_size: 0
+ max_size: 0
+ vpc_zone_identifier: '{{ testing_subnet.subnet.id }}'
+ state: present
+ register: output
+ - assert:
+ that:
+ - "'Group' in output.metrics_collection.0.Metric"
+
+ - name: kill asg
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ state: absent
+ wait_timeout: 800
+ async: 400
+ - name: launch load balancer
+ ec2_elb_lb:
+ name: '{{ load_balancer_name }}'
+ state: present
+ security_group_ids:
+ - '{{ sg.group_id }}'
+ subnets: '{{ testing_subnet.subnet.id }}'
+ connection_draining_timeout: 60
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ health_check:
+ ping_protocol: tcp
+ ping_port: 80
+ ping_path: /
+ response_timeout: 5
+ interval: 10
+ unhealthy_threshold: 4
+ healthy_threshold: 2
+ register: load_balancer
+ - name: launch asg and wait for instances to be deemed healthy (ELB)
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_config_name: '{{ resource_prefix }}-lc'
+ health_check_type: ELB
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ health_check_period: 300
+ vpc_zone_identifier: '{{ testing_subnet.subnet.id }}'
+ load_balancers: '{{ load_balancer_name }}'
+ wait_for_instances: yes
+ wait_timeout: 900
+ state: present
+ register: output
+ - assert:
+ that:
+ - output.viable_instances == 1
+
+ # ============================================================
+
+ # grow scaling group to 3
+ - name: add 2 more instances wait for instances to be deemed healthy (ELB)
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_config_name: '{{ resource_prefix }}-lc'
+ health_check_type: ELB
+ desired_capacity: 3
+ min_size: 3
+ max_size: 5
+ health_check_period: 600
+ vpc_zone_identifier: '{{ testing_subnet.subnet.id }}'
+ load_balancers: '{{ load_balancer_name }}'
+ wait_for_instances: yes
+ wait_timeout: 1200
+ state: present
+ register: output
+ - assert:
+ that:
+ - output.viable_instances == 3
+
+ # ============================================================
+
+ # Test max_instance_lifetime option
+ - name: enable asg max_instance_lifetime
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ max_instance_lifetime: 604801
+ register: output
+ - name: ensure max_instance_lifetime is set
+ assert:
+ that:
+ - output.max_instance_lifetime == 604801
+
+ - name: run without max_instance_lifetime
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_config_name: '{{ resource_prefix }}-lc'
+ - name: ensure max_instance_lifetime not affected by defaults
+ assert:
+ that:
+ - output.max_instance_lifetime == 604801
+
+ - name: disable asg max_instance_lifetime
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_config_name: '{{ resource_prefix }}-lc'
+ max_instance_lifetime: 0
+ register: output
+ - name: ensure max_instance_lifetime is not set
+ assert:
+ that:
+ - not output.max_instance_lifetime
+
+ # ============================================================
+
+ # perform rolling replace with different launch configuration
+ - name: perform rolling update to new AMI
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_config_name: '{{ resource_prefix }}-lc-2'
+ health_check_type: ELB
+ desired_capacity: 3
+ min_size: 1
+ max_size: 5
+ health_check_period: 900
+ load_balancers: '{{ load_balancer_name }}'
+ vpc_zone_identifier: '{{ testing_subnet.subnet.id }}'
+ wait_for_instances: yes
+ replace_all_instances: yes
+ wait_timeout: 1800
+ state: present
+ register: output
+ - assert:
+ that:
+ - item.value.launch_config_name == '{{ resource_prefix }}-lc-2'
+ loop: '{{ output.instance_facts | dict2items }}'
+ - assert:
+ that:
+ - output.viable_instances == 3
+
+ # ============================================================
+
+ # perform rolling replace with the original launch configuration
+ - name: perform rolling update to new AMI while removing the load balancer
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_config_name: '{{ resource_prefix }}-lc'
+ health_check_type: EC2
+ desired_capacity: 3
+ min_size: 1
+ max_size: 5
+ health_check_period: 900
+ load_balancers: []
+ vpc_zone_identifier: '{{ testing_subnet.subnet.id }}'
+ wait_for_instances: yes
+ replace_all_instances: yes
+ wait_timeout: 1800
+ state: present
+ register: output
+ - assert:
+ that:
+ - item.value.launch_config_name == '{{ resource_prefix }}-lc'
+ loop: '{{ output.instance_facts | dict2items }}'
+ - assert:
+ that:
+ - output.viable_instances == 3
+
+ # ============================================================
+
+ # perform rolling replace with new launch configuration and lc_check:false
+ - name: 'perform rolling update to new AMI with lc_check: false'
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_config_name: '{{ resource_prefix }}-lc-2'
+ health_check_type: EC2
+ desired_capacity: 3
+ min_size: 1
+ max_size: 5
+ health_check_period: 900
+ load_balancers: []
+ vpc_zone_identifier: '{{ testing_subnet.subnet.id }}'
+ wait_for_instances: yes
+ replace_all_instances: yes
+ replace_batch_size: 3
+ lc_check: false
+ wait_timeout: 1800
+ state: present
+ - name: get ec2_asg info
+ ec2_asg_info:
+ name: '{{ resource_prefix }}-asg'
+ register: output
+ - assert:
+ that:
+ - output.results[0].instances | length == 3
+
+ # ============================================================
+
+ - name: kill asg
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ state: absent
+ wait_timeout: 800
+ async: 400
+ - name: 'new asg with lc_check: false'
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_config_name: '{{ resource_prefix }}-lc'
+ health_check_type: EC2
+ desired_capacity: 3
+ min_size: 1
+ max_size: 5
+ health_check_period: 900
+ load_balancers: []
+ vpc_zone_identifier: '{{ testing_subnet.subnet.id }}'
+ wait_for_instances: yes
+ replace_all_instances: yes
+ replace_batch_size: 3
+ lc_check: false
+ wait_timeout: 1800
+ state: present
+ - name: get ec2_asg information
+ ec2_asg_info:
+ name: '{{ resource_prefix }}-asg'
+ register: output
+ - assert:
+ that:
+ - output.results[0].instances | length == 3
+
+ # we need a launch template, otherwise we cannot test the mixed instance policy
+ - name: create launch template for autoscaling group to test its mixed instances
+ policy
+ ec2_launch_template:
+ template_name: '{{ resource_prefix }}-lt'
+ image_id: '{{ ec2_ami_id }}'
+ instance_type: t3.micro
+ credit_specification:
+ cpu_credits: standard
+ network_interfaces:
+ - associate_public_ip_address: yes
+ delete_on_termination: yes
+ device_index: 0
+ groups:
+ - '{{ sg.group_id }}'
+
+ - name: update autoscaling group with mixed-instances policy with mixed instances
+ types - check_mode
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_template:
+ launch_template_name: '{{ resource_prefix }}-lt'
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ vpc_zone_identifier: '{{ testing_subnet.subnet.id }}'
+ state: present
+ mixed_instances_policy:
+ instance_types:
+ - t3.micro
+ - t2.nano
+ wait_for_instances: yes
+ register: output
+ check_mode: true
+ - assert:
+ that:
+ - output is changed
+ - output is not failed
+ - '"autoscaling:CreateOrUpdateTags" not in output.resource_actions'
+
+ - name: update autoscaling group with mixed-instances policy with mixed instances
+ types
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_template:
+ launch_template_name: '{{ resource_prefix }}-lt'
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ vpc_zone_identifier: '{{ testing_subnet.subnet.id }}'
+ state: present
+ mixed_instances_policy:
+ instance_types:
+ - t3.micro
+ - t2.nano
+ wait_for_instances: yes
+ register: output
+ - assert:
+ that:
+ - output.mixed_instances_policy | length == 2
+ - output.mixed_instances_policy[0] == 't3.micro'
+ - output.mixed_instances_policy[1] == 't2.nano'
+
+ - name: update autoscaling group with mixed-instances policy with instances_distribution
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_template:
+ launch_template_name: '{{ resource_prefix }}-lt'
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ vpc_zone_identifier: '{{ testing_subnet.subnet.id }}'
+ state: present
+ mixed_instances_policy:
+ instance_types:
+ - t3.micro
+ - t2.nano
+ instances_distribution:
+ on_demand_percentage_above_base_capacity: 0
+ spot_allocation_strategy: capacity-optimized
+ wait_for_instances: yes
+ register: output
+ - assert:
+ that:
+ - output.mixed_instances_policy_full['launch_template']['overrides'][0]['instance_type']
+ == 't3.micro'
+ - output.mixed_instances_policy_full['launch_template']['overrides'][1]['instance_type']
+ == 't2.nano'
+ - output.mixed_instances_policy_full['instances_distribution']['on_demand_percentage_above_base_capacity']
+ == 0
+ - output.mixed_instances_policy_full['instances_distribution']['spot_allocation_strategy']
+ == 'capacity-optimized'
+
+ # ============================================================
+
+ # Target group names have max length of 32 characters
+ - set_fact:
+ tg1_name: "ansible-test-{{tiny_prefix}}-asg-t1"
+ tg2_name: "ansible-test-{{tiny_prefix}}-asg-t2"
+ - name: create target group 1
+ elb_target_group:
+ name: '{{ tg1_name }}'
+ protocol: tcp
+ port: 80
+ health_check_protocol: tcp
+ health_check_port: 80
+ healthy_threshold_count: 2
+ unhealthy_threshold_count: 2
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ state: present
+ register: out_tg1
+ - name: create target group 2
+ elb_target_group:
+ name: '{{ tg2_name }}'
+ protocol: tcp
+ port: 80
+ health_check_protocol: tcp
+ health_check_port: 80
+ healthy_threshold_count: 2
+ unhealthy_threshold_count: 2
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ state: present
+ register: out_tg2
+ - name: update autoscaling group with tg1
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_template:
+ launch_template_name: '{{ resource_prefix }}-lt'
+ target_group_arns:
+ - '{{ out_tg1.target_group_arn }}'
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ state: present
+ wait_for_instances: yes
+ register: output
+ - assert:
+ that:
+ - output.target_group_arns[0] == out_tg1.target_group_arn
+
+ - name: update autoscaling group add tg2
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_template:
+ launch_template_name: '{{ resource_prefix }}-lt'
+ target_group_arns:
+ - '{{ out_tg1.target_group_arn }}'
+ - '{{ out_tg2.target_group_arn }}'
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ state: present
+ wait_for_instances: yes
+ register: output
+ - assert:
+ that:
+ - output.target_group_arns | length == 2
+
+ - name: update autoscaling group remove tg1
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_template:
+ launch_template_name: '{{ resource_prefix }}-lt'
+ target_group_arns:
+ - '{{ out_tg2.target_group_arn }}'
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ state: present
+ wait_for_instances: yes
+ register: output
+ - assert:
+ that:
+ - output.target_group_arns | length == 1
+ - output.target_group_arns[0] == out_tg2.target_group_arn
+
+ - name: update autoscaling group remove tg2 and add tg1
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_template:
+ launch_template_name: '{{ resource_prefix }}-lt'
+ target_group_arns:
+ - '{{ out_tg1.target_group_arn }}'
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ state: present
+ wait_for_instances: yes
+ register: output
+ - assert:
+ that:
+ - output.target_group_arns | length == 1
+ - output.target_group_arns[0] == out_tg1.target_group_arn
+
+ - name: target group no change
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ launch_template:
+ launch_template_name: '{{ resource_prefix }}-lt'
+ target_group_arns:
+ - '{{ out_tg1.target_group_arn }}'
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ state: present
+ wait_for_instances: yes
+ register: output
+ - assert:
+ that:
+ - output.target_group_arns | length == 1
+ - output.target_group_arns[0] == out_tg1.target_group_arn
+ - output.changed == false
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_cleanup.yml
new file mode 100644
index 000000000..e2e6c02f6
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_cleanup.yml
@@ -0,0 +1,116 @@
+- name: kill asg
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg'
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: true
+ retries: 10
+- name: remove target group
+ elb_target_group:
+ name: '{{ item }}'
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: true
+ retries: 10
+ loop:
+ - '{{ tg1_name }}'
+ - '{{ tg2_name }}'
+
+- name: remove the load balancer
+ ec2_elb_lb:
+ name: '{{ load_balancer_name }}'
+ state: absent
+ security_group_ids:
+ - '{{ sg.group_id }}'
+ subnets: '{{ testing_subnet.subnet.id }}'
+ wait: true
+ connection_draining_timeout: 60
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ health_check:
+ ping_protocol: tcp
+ ping_port: 80
+ ping_path: /
+ response_timeout: 5
+ interval: 10
+ unhealthy_threshold: 4
+ healthy_threshold: 2
+ register: removed
+ until: removed is not failed
+ ignore_errors: true
+ retries: 10
+- name: remove launch configs
+ ec2_lc:
+ name: '{{ item }}'
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: true
+ retries: 10
+ loop:
+ - '{{ resource_prefix }}-lc'
+ - '{{ resource_prefix }}-lc-2'
+
+- name: delete launch template
+ ec2_launch_template:
+ name: '{{ resource_prefix }}-lt'
+ state: absent
+ register: del_lt
+ retries: 10
+ until: del_lt is not failed
+ ignore_errors: true
+- name: remove the security group
+ ec2_group:
+ name: '{{ resource_prefix }}-sg'
+ description: a security group for ansible tests
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: true
+ retries: 10
+- name: remove routing rules
+ ec2_vpc_route_table:
+ state: absent
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ tags:
+ created: '{{ resource_prefix }}-route'
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: '{{ igw.gateway_id }}'
+ subnets:
+ - '{{ testing_subnet.subnet.id }}'
+ register: removed
+ until: removed is not failed
+ ignore_errors: true
+ retries: 10
+- name: remove internet gateway
+ ec2_vpc_igw:
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: true
+ retries: 10
+- name: remove the subnet
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ cidr: 10.55.77.0/24
+ register: removed
+ until: removed is not failed
+ ignore_errors: true
+ retries: 10
+- name: remove the VPC
+ ec2_vpc_net:
+ name: '{{ resource_prefix }}-vpc'
+ cidr_block: 10.55.77.0/24
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: true
+ retries: 10
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_setup.yml
new file mode 100644
index 000000000..2bff18c5f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_setup.yml
@@ -0,0 +1,51 @@
+- name: Run ec2_asg integration tests.
+ run_once: '{{ ec2_asg_setup_run_once }}'
+ block:
+
+ # Set up the testing dependencies: VPC, subnet, security group, and two launch configurations
+ - name: Create VPC for use in testing
+ ec2_vpc_net:
+ name: '{{ resource_prefix }}-vpc'
+ cidr_block: 10.55.77.0/24
+ tenancy: default
+ register: testing_vpc
+ - name: Create internet gateway for use in testing
+ ec2_vpc_igw:
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ state: present
+ register: igw
+ - name: Create subnet for use in testing
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ cidr: 10.55.77.0/24
+ az: '{{ aws_region }}a'
+ resource_tags:
+ Name: '{{ resource_prefix }}-subnet'
+ register: testing_subnet
+ - name: create routing rules
+ ec2_vpc_route_table:
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ tags:
+ created: '{{ resource_prefix }}-route'
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: '{{ igw.gateway_id }}'
+ subnets:
+ - '{{ testing_subnet.subnet.id }}'
+
+ - name: create a security group with the vpc created in the ec2_setup
+ ec2_group:
+ name: '{{ resource_prefix }}-sg'
+ description: a security group for ansible tests
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ register: sg
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/instance_detach.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/instance_detach.yml
new file mode 100644
index 000000000..a938ce5b0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/instance_detach.yml
@@ -0,0 +1,256 @@
+- name: Running instance detach tests
+ block:
+ #----------------------------------------------------------------------
+ - name: create a launch configuration
+ ec2_lc:
+ name: '{{ resource_prefix }}-lc-detach-test'
+ image_id: '{{ ec2_ami_id }}'
+ region: '{{ aws_region }}'
+ instance_type: t2.micro
+ assign_public_ip: yes
+ register: create_lc
+ - name: ensure that lc is created
+ assert:
+ that:
+ - create_lc is changed
+ - create_lc.failed is false
+ - '"autoscaling:CreateLaunchConfiguration" in create_lc.resource_actions'
+
+ #----------------------------------------------------------------------
+
+ - name: create a AutoScalingGroup to be used for instance_detach test - check_mode
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-detach-test'
+ launch_config_name: '{{ resource_prefix }}-lc-detach-test'
+ health_check_period: 60
+ health_check_type: ELB
+ replace_all_instances: yes
+ min_size: 3
+ max_size: 6
+ desired_capacity: 3
+ region: '{{ aws_region }}'
+ register: create_asg
+ check_mode: true
+ - assert:
+ that:
+ - create_asg is changed
+ - create_asg is not failed
+ - '"autoscaling:CreateAutoScalingGroup" not in create_asg.resource_actions'
+
+ - name: create a AutoScalingGroup to be used for instance_detach test
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-detach-test'
+ launch_config_name: '{{ resource_prefix }}-lc-detach-test'
+ health_check_period: 60
+ health_check_type: ELB
+ replace_all_instances: yes
+ min_size: 3
+ max_size: 6
+ desired_capacity: 3
+ region: '{{ aws_region }}'
+ register: create_asg
+ - name: ensure that AutoScalingGroup is created
+ assert:
+ that:
+ - create_asg is changed
+ - create_asg.failed is false
+ - create_asg.instances | length == 3
+ - create_asg.desired_capacity == 3
+ - create_asg.in_service_instances == 3
+ - '"autoscaling:CreateAutoScalingGroup" in create_asg.resource_actions'
+
+ - name: gather info about asg, get instance ids
+ ec2_asg_info:
+ name: '{{ resource_prefix }}-asg-detach-test'
+ register: asg_info
+ - set_fact:
+ init_instance_1: '{{ asg_info.results[0].instances[0].instance_id }}'
+ init_instance_2: '{{ asg_info.results[0].instances[1].instance_id }}'
+ init_instance_3: '{{ asg_info.results[0].instances[2].instance_id }}'
+ - name: Gather information about recently detached instances
+ amazon.aws.ec2_instance_info:
+ instance_ids:
+ - '{{ init_instance_1 }}'
+ - '{{ init_instance_2 }}'
+ - '{{ init_instance_3 }}'
+ register: instances_info
+ - assert:
+ that:
+ - asg_info.results[0].instances | length == 3
+ - "'{{ instances_info.instances[0].state.name }}' == 'running'"
+ - "'{{ instances_info.instances[1].state.name }}' == 'running'"
+ - "'{{ instances_info.instances[2].state.name }}' == 'running'"
+
+ #----------------------------------------------------------------------
+
+ - name: detach 2 instance from the asg and replace with other instances - check_mode
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-detach-test'
+ launch_config_name: '{{ resource_prefix }}-lc-detach-test'
+ health_check_period: 60
+ health_check_type: ELB
+ min_size: 3
+ max_size: 3
+ desired_capacity: 3
+ region: '{{ aws_region }}'
+ detach_instances:
+ - '{{ init_instance_1 }}'
+ - '{{ init_instance_2 }}'
+ register: detach_result
+ check_mode: true
+ - assert:
+ that:
+ - detach_result is changed
+ - detach_result is not failed
+ - '"autoscaling:DetachInstances" not in detach_result.resource_actions'
+
+ - name: detach 2 instance from the asg and replace with other instances
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-detach-test'
+ launch_config_name: '{{ resource_prefix }}-lc-detach-test'
+ health_check_period: 60
+ health_check_type: ELB
+ min_size: 3
+ max_size: 3
+ desired_capacity: 3
+ region: '{{ aws_region }}'
+ detach_instances:
+ - '{{ init_instance_1 }}'
+ - '{{ init_instance_2 }}'
+
+ # pause to allow completion of instance replacement
+ - name: Pause for 30 seconds
+ wait_for:
+ timeout: 30
+ - ec2_asg_info:
+ name: '{{ resource_prefix }}-asg-detach-test'
+ register: asg_info_replaced
+ - set_fact:
+ instance_replace_1: '{{ asg_info_replaced.results[0].instances[0].instance_id
+ }}'
+ instance_replace_2: '{{ asg_info_replaced.results[0].instances[1].instance_id
+ }}'
+ instance_replace_3: '{{ asg_info_replaced.results[0].instances[2].instance_id
+ }}'
+ - set_fact:
+ asg_instance_detach_replace: "{{ asg_info_replaced.results[0].instances | map(attribute='instance_id')\
+ \ | list }}"
+ - name: Gather information about recently detached instances
+ amazon.aws.ec2_instance_info:
+ instance_ids:
+ - '{{ init_instance_1 }}'
+ - '{{ init_instance_2 }}'
+ register: detached_instances_info
+ - assert:
+ that:
+ - asg_info_replaced.results[0].desired_capacity == 3
+ - asg_info_replaced.results[0].instances | length == 3
+ - "'{{ init_instance_1 }}' not in {{ asg_instance_detach_replace }}"
+ - "'{{ init_instance_2 }}' not in {{ asg_instance_detach_replace }}"
+ - "'{{ detached_instances_info.instances[0].state.name }}' == 'running'"
+ - "'{{ detached_instances_info.instances[1].state.name }}' == 'running'"
+
+ #----------------------------------------------------------------------
+
+ # detach 2 instances from the asg and reduce the desired capacity from 3 to 1
+ - name: detach 2 instance from the asg and reduce the desired capacity from 3 to
+ 1
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-detach-test'
+ launch_config_name: '{{ resource_prefix }}-lc-detach-test'
+ health_check_period: 60
+ health_check_type: ELB
+ min_size: 1
+ max_size: 5
+ desired_capacity: 3
+ region: '{{ aws_region }}'
+ decrement_desired_capacity: true
+ detach_instances:
+ - '{{ instance_replace_1 }}'
+ - '{{ instance_replace_2 }}'
+
+ - name: Pause for 30 seconds to allow completion of above task
+ wait_for:
+ timeout: 30
+ - ec2_asg_info:
+ name: '{{ resource_prefix }}-asg-detach-test'
+ register: asg_info_decrement
+ - set_fact:
+ instance_detach_decrement: '{{ asg_info_decrement.results[0].instances[0].instance_id
+ }}'
+ - set_fact:
+ asg_instance_detach_decrement: "{{ asg_info_decrement.results[0].instances |\
+ \ map(attribute='instance_id') | list }}"
+ - name: Gather information about recently detached instances
+ amazon.aws.ec2_instance_info:
+ instance_ids:
+ - '{{ instance_replace_1 }}'
+ - '{{ instance_replace_2 }}'
+ register: detached_instances_info
+ - assert:
+ that:
+ - asg_info_decrement.results[0].instances | length == 1
+ - asg_info_decrement.results[0].desired_capacity == 1
+ - "'{{ instance_replace_1 }}' not in {{ asg_instance_detach_decrement }}"
+ - "'{{ instance_replace_2 }}' not in {{ asg_instance_detach_decrement }}"
+ - "'{{ detached_instances_info.instances[0].state.name }}' == 'running'"
+ - "'{{ detached_instances_info.instances[1].state.name }}' == 'running'"
+ - "'{{ instance_replace_3 }}' == '{{ instance_detach_decrement }}'"
+
+ #----------------------------------------------------------------------
+
+ always:
+
+ - name: terminate any instances created during this test
+ amazon.aws.ec2_instance:
+ instance_ids:
+ - '{{ item }}'
+ state: absent
+ loop:
+ - '{{ init_instance_1 }}'
+ - '{{ init_instance_2 }}'
+ - '{{ init_instance_3 }}'
+ - '{{ instance_replace_1 }}'
+ - '{{ instance_replace_2 }}'
+ - '{{ instance_replace_3 }}'
+
+ - name: kill asg created in this test - check_mode
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-detach-test'
+ state: absent
+ register: removed
+ check_mode: true
+ - assert:
+ that:
+ - removed is changed
+ - removed is not failed
+ - '"autoscaling:DeleteAutoScalingGroup" not in removed.resource_actions'
+
+ - name: kill asg created in this test
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-detach-test'
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+ - name: kill asg created in this test - check_mode (idempotent)
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-detach-test'
+ state: absent
+ register: removed
+ check_mode: true
+ - assert:
+ that:
+ - removed is not changed
+ - removed is not failed
+ - '"autoscaling:DeleteAutoScalingGroup" not in removed.resource_actions'
+
+ - name: remove launch config created in this test
+ ec2_lc:
+ name: '{{ resource_prefix }}-lc-detach-test'
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/main.yml
new file mode 100644
index 000000000..70e23a642
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/main.yml
@@ -0,0 +1,40 @@
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/ec2_asg/tasks/
+
+- name: Wrap up all tests and setup AWS credentials
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ aws_config:
+ retries:
+ # Unfortunately AWSRetry doesn't support paginators and boto3's paginators
+ # don't support any configuration of the delay between retries.
+ max_attempts: 20
+ collections:
+ - community.aws
+ block:
+ - debug:
+ msg: "{{ inventory_hostname }} start: {{ lookup('pipe','date') }}"
+ - include_tasks: '{{ inventory_hostname }}.yml'
+ - debug:
+ msg: "{{ inventory_hostname }} finish: {{ lookup('pipe','date') }}"
+ always:
+ - set_fact:
+ _role_complete: true
+ - vars:
+ completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete")
+ | list | select("defined") | list | length }}'
+ hosts_in_play: '{{ ansible_play_hosts_all | length }}'
+ debug:
+ msg: '{{ completed_hosts }} of {{ hosts_in_play }} complete'
+ - include_tasks: env_cleanup.yml
+ vars:
+ completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete")
+ | list | select("defined") | list | length }}'
+ hosts_in_play: '{{ ansible_play_hosts_all | length }}'
+ when:
+ - completed_hosts == hosts_in_play
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/tag_operations.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/tag_operations.yml
new file mode 100644
index 000000000..4f62faa31
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/tag_operations.yml
@@ -0,0 +1,339 @@
+- name: Running AutoScalingGroup Tag operations test
+ block:
+ #----------------------------------------------------------------------
+ - name: create a launch configuration
+ ec2_lc:
+ name: '{{ resource_prefix }}-lc-tag-test'
+ image_id: '{{ ec2_ami_id }}'
+ region: '{{ aws_region }}'
+ instance_type: t2.micro
+ assign_public_ip: yes
+ register: create_lc
+ - name: ensure that lc is created
+ assert:
+ that:
+ - create_lc is changed
+ - create_lc.failed is false
+ - '"autoscaling:CreateLaunchConfiguration" in create_lc.resource_actions'
+
+ #----------------------------------------------------------------------
+ - name: create a AutoScalingGroup to be used for tag_operations test
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ launch_config_name: '{{ resource_prefix }}-lc-tag-test'
+ health_check_period: 60
+ health_check_type: ELB
+ replace_all_instances: yes
+ min_size: 1
+ max_size: 1
+ desired_capacity: 1
+ region: '{{ aws_region }}'
+ register: create_asg
+ - name: ensure that AutoScalingGroup is created
+ assert:
+ that:
+ - create_asg is changed
+ - create_asg.failed is false
+ - '"autoscaling:CreateAutoScalingGroup" in create_asg.resource_actions'
+
+ #----------------------------------------------------------------------
+
+ - name: Get asg info
+ ec2_asg_info:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ register: info_result
+ - assert:
+ that:
+ - info_result.results[0].tags | length == 0
+
+ - name: Tag asg - check_mode
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ tags:
+ - tag_a: value 1
+ propagate_at_launch: no
+ - tag_b: value 2
+ propagate_at_launch: yes
+ register: output
+ check_mode: true
+ - assert:
+ that:
+ - output is changed
+ - output is not failed
+ - '"autoscaling:CreateOrUpdateTags" not in output.resource_actions'
+
+ - name: Tag asg
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ tags:
+ - tag_a: value 1
+ propagate_at_launch: no
+ - tag_b: value 2
+ propagate_at_launch: yes
+ register: output
+ - assert:
+ that:
+ - output.tags | length == 2
+ - output is changed
+
+ - name: Re-Tag asg (different order)
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ tags:
+ - tag_b: value 2
+ propagate_at_launch: yes
+ - tag_a: value 1
+ propagate_at_launch: no
+ register: output
+ - assert:
+ that:
+ - output.tags | length == 2
+ - output is not changed
+
+ - name: Re-Tag asg new tags
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ tags:
+ - tag_c: value 3
+ propagate_at_launch: no
+ purge_tags: true
+ register: output
+ - assert:
+ that:
+ - output.tags | length == 1
+ - output is changed
+
+ - name: Re-Tag asg update propagate_at_launch
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ tags:
+ - tag_c: value 3
+ propagate_at_launch: yes
+ register: output
+ - assert:
+ that:
+ - output.tags | length == 1
+ - output is changed
+
+ - name: Remove all tags
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ tags: []
+ purge_tags: true
+ register: add_empty
+ - name: Get asg info
+ ec2_asg_info:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ register: info_result
+ - set_fact:
+ tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}"
+ - assert:
+ that:
+ - add_empty is changed
+ - info_result.results[0].tags | length == 0
+ - '"autoscaling:CreateOrUpdateTags" not in add_empty.resource_actions'
+ - '"autoscaling:DeleteTags" in add_empty.resource_actions'
+
+ - name: Add 4 new tags - do not purge existing tags
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ tags:
+ - lowercase spaced: hello cruel world
+ propagate_at_launch: no
+ - Title Case: Hello Cruel World
+ propagate_at_launch: yes
+ - CamelCase: SimpleCamelCase
+ propagate_at_launch: yes
+ - snake_case: simple_snake_case
+ propagate_at_launch: no
+ register: add_result
+ - name: Get asg info
+ ec2_asg_info:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ register: info_result
+ - set_fact:
+ tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}"
+ - assert:
+ that:
+ - add_result is changed
+ - info_result.results[0].tags | length == 4
+ - '"lowercase spaced" in tag_keys'
+ - '"Title Case" in tag_keys'
+ - '"CamelCase" in tag_keys'
+ - '"snake_case" in tag_keys'
+ - '"autoscaling:CreateOrUpdateTags" in add_result.resource_actions'
+
+ - name: Add 4 new tags - do not purge existing tags - idempotency
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ tags:
+ - lowercase spaced: hello cruel world
+ propagate_at_launch: no
+ - Title Case: Hello Cruel World
+ propagate_at_launch: yes
+ - CamelCase: SimpleCamelCase
+ propagate_at_launch: yes
+ - snake_case: simple_snake_case
+ propagate_at_launch: no
+ register: add_result
+ - name: Get asg info
+ ec2_asg_info:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ register: info_result
+ - assert:
+ that:
+ - add_result is not changed
+ - info_result.results[0].tags | length == 4
+ - '"autoscaling:CreateOrUpdateTags" not in add_result.resource_actions'
+
+ - name: Add 2 new tags - purge existing tags
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ tags:
+ - tag_a: val_a
+ propagate_at_launch: no
+ - tag_b: val_b
+ propagate_at_launch: yes
+ purge_tags: true
+ register: add_purge_result
+ - name: Get asg info
+ ec2_asg_info:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ register: info_result
+ - set_fact:
+ tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}"
+ - assert:
+ that:
+ - add_purge_result is changed
+ - info_result.results[0].tags | length == 2
+ - '"tag_a" in tag_keys'
+ - '"tag_b" in tag_keys'
+ - '"lowercase spaced" not in tag_keys'
+ - '"Title Case" not in tag_keys'
+ - '"CamelCase" not in tag_keys'
+ - '"snake_case" not in tag_keys'
+ - '"autoscaling:CreateOrUpdateTags" in add_purge_result.resource_actions'
+
+ - name: Re-tag ASG - modify values
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ tags:
+ - tag_a: new_val_a
+ propagate_at_launch: no
+ - tag_b: new_val_b
+ propagate_at_launch: yes
+ register: add_purge_result
+ - name: Get asg info
+ ec2_asg_info:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ register: info_result
+ - set_fact:
+ tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}"
+ - set_fact:
+ tag_values: "{{ info_result.results[0].tags | map(attribute='value') | list\
+ \ }}"
+ - assert:
+ that:
+ - add_purge_result is changed
+ - info_result.results[0].tags | length == 2
+ - '"tag_a" in tag_keys'
+ - '"tag_b" in tag_keys'
+ - '"new_val_a" in tag_values'
+ - '"new_val_b" in tag_values'
+ - '"lowercase spaced" not in tag_keys'
+ - '"Title Case" not in tag_keys'
+ - '"CamelCase" not in tag_keys'
+ - '"snake_case" not in tag_keys'
+ - '"autoscaling:CreateOrUpdateTags" in add_purge_result.resource_actions'
+
+ - name: Add 2 more tags - do not purge existing tags
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ tags:
+ - lowercase spaced: hello cruel world
+ propagate_at_launch: no
+ - Title Case: Hello Cruel World
+ propagate_at_launch: yes
+ register: add_result
+ - name: Get asg info
+ ec2_asg_info:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ register: info_result
+ - set_fact:
+ tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}"
+ - assert:
+ that:
+ - add_result is changed
+ - info_result.results[0].tags | length == 4
+ - '"tag_a" in tag_keys'
+ - '"tag_b" in tag_keys'
+ - '"lowercase spaced" in tag_keys'
+ - '"Title Case" in tag_keys'
+ - '"autoscaling:CreateOrUpdateTags" in add_result.resource_actions'
+
+ - name: Add empty tags with purge set to false to assert that existing tags are
+ retained
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ tags: []
+ purge_tags: false
+ register: add_empty
+ - name: Get asg info
+ ec2_asg_info:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ register: info_result
+ - set_fact:
+ tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}"
+ - assert:
+ that:
+ - add_empty is not changed
+ - info_result.results[0].tags | length == 4
+ - '"tag_a" in tag_keys'
+ - '"tag_b" in tag_keys'
+ - '"lowercase spaced" in tag_keys'
+ - '"Title Case" in tag_keys'
+ - '"autoscaling:CreateOrUpdateTags" not in add_empty.resource_actions'
+
+ - name: Add empty tags with purge set to true to assert that existing tags are removed
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ tags: []
+ purge_tags: true
+ register: add_empty
+ - name: Get asg info
+ ec2_asg_info:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ register: info_result
+ - set_fact:
+ tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}"
+ - assert:
+ that:
+ - add_empty is changed
+ - info_result.results[0].tags | length == 0
+ - '"tag_a" not in tag_keys'
+ - '"tag_b" not in tag_keys'
+ - '"lowercase spaced" not in tag_keys'
+ - '"Title Case" not in tag_keys'
+ - '"autoscaling:CreateOrUpdateTags" not in add_empty.resource_actions'
+ - '"autoscaling:DeleteTags" in add_empty.resource_actions'
+
+ #----------------------------------------------------------------------
+
+ always:
+
+ - name: kill asg created in this test
+ ec2_asg:
+ name: '{{ resource_prefix }}-asg-tag-test'
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+ - name: remove launch config created in this test
+ ec2_lc:
+ name: '{{ resource_prefix }}-lc-tag-test'
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/runme.sh
new file mode 100755
index 000000000..aa324772b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/runme.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+#
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
+
+
+set -eux
+
+export ANSIBLE_ROLES_PATH=../
+
+ansible-playbook main.yml -i inventory "$@"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/aliases b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/main.yml
new file mode 100644
index 000000000..2fe745f07
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/main.yml
@@ -0,0 +1,5 @@
+- hosts: localhost
+ connection: local
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+ - include_tasks: 'tasks/main.yml'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/main.yml
new file mode 100644
index 000000000..0787ea121
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/main.yml
@@ -0,0 +1,193 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key | default(omit) }}'
+ aws_secret_key: '{{ aws_secret_key | default(omit) }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region | default(omit) }}'
+
+ block:
+ - name: 'List available AZs in current Region'
+ aws_az_info:
+ register: region_azs
+
+ - name: check task return attributes
+ vars:
+ first_az: '{{ region_azs.availability_zones[0] }}'
+ assert:
+ that:
+ - region_azs is successful
+ - '"availability_zones" in region_azs'
+ - '"group_name" in first_az'
+ - '"messages" in first_az'
+ - '"network_border_group" in first_az'
+ - '"opt_in_status" in first_az'
+ - '"region_name" in first_az'
+ - '"state" in first_az'
+ - '"zone_id" in first_az'
+ - '"zone_name" in first_az'
+ # botocore >= 1.17.18
+ #- '"zone_type" in first_az'
+
+ - name: 'List available AZs in current Region - check_mode'
+ aws_az_info:
+ check_mode: yes
+ register: check_azs
+
+ - name: check task return attributes
+ vars:
+ first_az: '{{ check_azs.availability_zones[0] }}'
+ assert:
+ that:
+ - check_azs is successful
+ - '"availability_zones" in check_azs'
+ - '"group_name" in first_az'
+ - '"messages" in first_az'
+ - '"network_border_group" in first_az'
+ - '"opt_in_status" in first_az'
+ - '"region_name" in first_az'
+ - '"state" in first_az'
+ - '"zone_id" in first_az'
+ - '"zone_name" in first_az'
+ # botocore >= 1.17.18
+ #- '"zone_type" in first_az'
+
+
+ # Be specific - aws_region isn't guaranteed to be any specific value
+ - name: 'List Available AZs in us-east-1'
+ aws_az_info:
+ region: 'us-east-1'
+ register: us_east_1
+
+ - name: 'Check that an AZ from us-east-1 has valid looking attributes'
+ vars:
+ first_az: '{{ us_east_1.availability_zones[0] }}'
+ assert:
+ that:
+ - us_east_1 is successful
+ - '"availability_zones" in us_east_1'
+ - '"group_name" in first_az'
+ - '"messages" in first_az'
+ - '"network_border_group" in first_az'
+ - '"opt_in_status" in first_az'
+ - '"region_name" in first_az'
+ - '"state" in first_az'
+ - '"zone_id" in first_az'
+ - '"zone_name" in first_az'
+ # botocore >= 1.17.18
+ #- '"zone_type" in first_az'
+ - first_az.group_name.startswith('us-east-1')
+ - first_az.network_border_group.startswith('us-east-1')
+ - first_az.region_name == 'us-east-1'
+ - first_az.zone_id.startswith('use1-az')
+ - not first_az.zone_id == "use1-az"
+ - first_az.zone_name.startswith('us-east-1')
+ - not first_az.zone_name == 'us-east-1'
+ # botocore >= 1.17.18
+ #- first_az.zone_type == 'availability-zone'
+
+ - name: 'Filter Available AZs in us-west-2 using - ("zone-name")'
+ aws_az_info:
+ region: 'us-west-2'
+ filters:
+ zone-name: 'us-west-2c'
+ register: us_west_2
+
+ - name: 'Check that an AZ from us-west-2 has attributes we expect'
+ vars:
+ first_az: '{{ us_west_2.availability_zones[0] }}'
+ assert:
+ that:
+ - us_west_2 is successful
+ - '"availability_zones" in us_west_2'
+ - us_west_2.availability_zones | length == 1
+ - '"group_name" in first_az'
+ - '"messages" in first_az'
+ - '"network_border_group" in first_az'
+ - '"opt_in_status" in first_az'
+ - '"region_name" in first_az'
+ - '"state" in first_az'
+ - '"zone_id" in first_az'
+ - '"zone_name" in first_az'
+ # botocore >= 1.17.18
+ #- '"zone_type" in first_az'
+ - first_az.group_name == 'us-west-2'
+ - first_az.network_border_group == 'us-west-2'
+ - first_az.region_name == 'us-west-2'
+ # AZs are mapped to the 'real' AZs on a per-account basis
+ - first_az.zone_id.startswith('usw2-az')
+ - not first_az.zone_id == 'usw2-az'
+ - first_az.zone_name == 'us-west-2c'
+ # botocore >= 1.17.18
+ #- first_az.zone_type == 'availability-zone'
+
+ - name: 'Filter Available AZs in eu-central-1 using _ ("zone_name")'
+ aws_az_info:
+ region: 'eu-central-1'
+ filters:
+ zone_name: 'eu-central-1b'
+ register: eu_central_1
+
+ - name: 'Check that eu-central-1b has the attributes we expect'
+ vars:
+ first_az: '{{ eu_central_1.availability_zones[0] }}'
+ assert:
+ that:
+ - eu_central_1 is successful
+ - '"availability_zones" in eu_central_1'
+ - eu_central_1.availability_zones | length == 1
+ - '"group_name" in first_az'
+ - '"messages" in first_az'
+ - '"network_border_group" in first_az'
+ - '"opt_in_status" in first_az'
+ - '"region_name" in first_az'
+ - '"state" in first_az'
+ - '"zone_id" in first_az'
+ - '"zone_name" in first_az'
+ # botocore >= 1.17.18
+ #- '"zone_type" in first_az'
+ - first_az.group_name == 'eu-central-1'
+ - first_az.network_border_group == 'eu-central-1'
+ - first_az.region_name == 'eu-central-1'
+ # AZs are mapped to the 'real' AZs on a per-account basis
+ - first_az.zone_id.startswith('euc1-az')
+ - not first_az.zone_id == "euc1-az"
+ - first_az.zone_name == 'eu-central-1b'
+ # botocore >= 1.17.18
+ #- first_az.zone_type == 'availability-zone'
+
+ - name: 'Filter Available AZs in eu-west-2 using _ and - ("zone_name" and "zone-name") : _ wins '
+ aws_az_info:
+ region: 'eu-west-2'
+ filters:
+ zone-name: 'eu-west-2a'
+ zone_name: 'eu-west-2c'
+ register: eu_west_2
+
+ - name: 'Check that we get the AZ specified by zone_name rather than zone-name'
+ vars:
+ first_az: '{{ eu_west_2.availability_zones[0] }}'
+ assert:
+ that:
+ - eu_west_2 is successful
+ - '"availability_zones" in eu_west_2'
+ - eu_west_2.availability_zones | length == 1
+ - '"group_name" in first_az'
+ - '"messages" in first_az'
+ - '"network_border_group" in first_az'
+ - '"opt_in_status" in first_az'
+ - '"region_name" in first_az'
+ - '"state" in first_az'
+ - '"zone_id" in first_az'
+ - '"zone_name" in first_az'
+ # botocore >= 1.17.18
+ #- '"zone_type" in first_az'
+ - first_az.group_name == 'eu-west-2'
+ - first_az.network_border_group == 'eu-west-2'
+ - first_az.region_name == 'eu-west-2'
+ # AZs are mapped to the 'real' AZs on a per-account basis
+ - first_az.zone_id.startswith('euw2-az')
+ - not first_az.zone_id == "euw2-az"
+ - first_az.zone_name == 'eu-west-2c'
+ # botocore >= 1.17.18
+ #- first_az.zone_type == 'availability-zone'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/aliases b/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/tasks/main.yaml
new file mode 100644
index 000000000..c40d0f11b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/tasks/main.yaml
@@ -0,0 +1,18 @@
+- module_defaults:
+ group/aws:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ block:
+ - name: retrieve caller facts
+ aws_caller_info:
+ register: result
+
+ - name: assert correct keys are returned
+ assert:
+ that:
+ - result.account is not none
+ - result.arn is not none
+ - result.user_id is not none
+ - result.account_alias is not none
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/aliases b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/inventory b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/inventory
new file mode 100644
index 000000000..5093e8582
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/inventory
@@ -0,0 +1,6 @@
+[tests]
+localhost
+
+[all:vars]
+ansible_connection=local
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/main.yml
new file mode 100644
index 000000000..b3c3fa155
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/main.yml
@@ -0,0 +1,35 @@
+- hosts: localhost
+ gather_facts: no
+ collections:
+ - amazon.aws
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ tasks:
+ - ec2_instance_info:
+ register: ec2_info
+
+ - assert:
+ that:
+ - '"resource_actions" in ec2_info'
+ - '"ec2:DescribeInstances" in ec2_info.resource_actions'
+
+ - aws_az_info:
+ register: az_info
+
+ - assert:
+ that:
+ - '"resource_actions" in az_info'
+ - '"ec2:DescribeAvailabilityZones" in az_info.resource_actions'
+
+ - aws_caller_info:
+ register: caller_info
+
+ - assert:
+ that:
+ - '"resource_actions" in caller_info'
+ - '"sts:GetCallerIdentity" in caller_info.resource_actions'
+ - '"iam:ListAccountAliases" in caller_info.resource_actions'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/runme.sh
new file mode 100755
index 000000000..a2c41e1f5
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/runme.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+
+set -eux
+
+export ANSIBLE_CALLBACK_WHITELIST="aws_resource_actions"
+
+OUTFILE="callback_aws_resource_actions.out"
+trap 'rm -rvf "${OUTFILE}" "${OUTFILE}.actions"' EXIT
+
+# Tests that the resource_actions are added to each task
+ansible-playbook main.yml -i localhost "$@" | tee "${OUTFILE}"
+
+# There should be a summary at the end of the run with the actions performed:
+# AWS ACTIONS: ['ec2:DescribeAvailabilityZones', 'ec2:DescribeInstances', 'iam:ListAccountAliases', 'sts:GetCallerIdentity']
+grep -E "AWS ACTIONS: \[" "${OUTFILE}" > "${OUTFILE}.actions"
+for action in 'ec2:DescribeAvailabilityZones' 'ec2:DescribeInstances' 'iam:ListAccountAliases' 'sts:GetCallerIdentity'
+do
+ grep "${action}" "${OUTFILE}.actions"
+done
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/aliases b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/aliases
new file mode 100644
index 000000000..d393681d6
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+cloudformation_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/defaults/main.yml
new file mode 100644
index 000000000..2f2a70c55
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/defaults/main.yml
@@ -0,0 +1,8 @@
+stack_name: "{{ resource_prefix }}"
+
+availability_zone: '{{ ec2_availability_zone_names[0] }}'
+
+vpc_name: '{{ resource_prefix }}-vpc'
+vpc_seed: '{{ resource_prefix }}'
+vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
+subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/cf_template.json b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/cf_template.json
new file mode 100644
index 000000000..ff4c5693b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/cf_template.json
@@ -0,0 +1,37 @@
+{
+ "AWSTemplateFormatVersion" : "2010-09-09",
+
+ "Description" : "Create an Amazon EC2 instance.",
+
+ "Parameters" : {
+ "InstanceType" : {
+ "Description" : "EC2 instance type",
+ "Type" : "String",
+ "Default" : "t3.nano",
+ "AllowedValues" : [ "t3.micro", "t3.nano"]
+ },
+ "ImageId" : {
+ "Type" : "String"
+ },
+ "SubnetId" : {
+ "Type" : "String"
+ }
+ },
+
+ "Resources" : {
+ "EC2Instance" : {
+ "Type" : "AWS::EC2::Instance",
+ "Properties" : {
+ "InstanceType" : { "Ref" : "InstanceType" },
+ "ImageId" : { "Ref" : "ImageId" },
+ "SubnetId": { "Ref" : "SubnetId" }
+ }
+ }
+ },
+
+ "Outputs" : {
+ "InstanceId" : {
+ "Value" : { "Ref" : "EC2Instance" }
+ }
+ }
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/update_policy.json b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/update_policy.json
new file mode 100644
index 000000000..6a3513825
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/update_policy.json
@@ -0,0 +1,10 @@
+{
+ "Statement" : [
+ {
+ "Effect" : "Allow",
+ "Action" : "Update:*",
+ "Principal": "*",
+ "Resource" : "*"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/meta/main.yml
new file mode 100644
index 000000000..2bff8543a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- role: setup_ec2_facts
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/main.yml
new file mode 100644
index 000000000..b9f174137
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/main.yml
@@ -0,0 +1,491 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key | default(omit) }}'
+ aws_secret_key: '{{ aws_secret_key | default(omit) }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region | default(omit) }}'
+
+ block:
+
+ # ==== Env setup ==========================================================
+
+ - name: Create a test VPC
+ ec2_vpc_net:
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: Cloudformation testing
+ register: testing_vpc
+
+ - name: Create a test subnet
+ ec2_vpc_subnet:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ register: testing_subnet
+
+ # ==== Cloudformation tests ===============================================
+
+ # 1. Basic stack creation (check mode, actual run and idempotency)
+ # 2. Tags
+ # 3. cloudformation_info tests (basic + all_facts)
+ # 4. termination_protection
+ # 5. create_changeset + changeset_name
+
+ # There is still scope to add tests for -
+ # 1. capabilities
+ # 2. stack_policy
+ # 3. on_create_failure (covered in unit tests)
+ # 4. Passing in a role
+ # 5. nested stacks?
+
+
+ - name: create a cloudformation stack (check mode)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_id }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+ check_mode: yes
+
+ - name: check task return attributes
+ assert:
+ that:
+ - cf_stack.changed
+ - "'msg' in cf_stack and 'New stack would be created' in cf_stack.msg"
+
+ - name: create a cloudformation stack
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_id }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - cf_stack.changed
+ - "'events' in cf_stack"
+ - "'output' in cf_stack and 'Stack CREATE complete' in cf_stack.output"
+ - "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs"
+ - "'stack_resources' in cf_stack"
+
+ - name: create a cloudformation stack (check mode) (idempotent)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_id }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+ check_mode: yes
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not cf_stack.changed
+
+ - name: create a cloudformation stack (idempotent)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_id }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not cf_stack.changed
+ - "'output' in cf_stack and 'Stack is already up-to-date.' in cf_stack.output"
+ - "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs"
+ - "'stack_resources' in cf_stack"
+
+ - name: get all stacks details
+ cloudformation_info:
+ register: all_stacks_info
+
+ - name: assert all stacks info
+ assert:
+ that:
+ - all_stacks_info | length > 0
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "'cloudformation' in stack_info"
+ - "stack_info.cloudformation | length == 1"
+ - "stack_name in stack_info.cloudformation"
+ - "'stack_description' in stack_info.cloudformation[stack_name]"
+ - "'stack_outputs' in stack_info.cloudformation[stack_name]"
+ - "'stack_parameters' in stack_info.cloudformation[stack_name]"
+ - "'stack_tags' in stack_info.cloudformation[stack_name]"
+ - "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name"
+
+ - name: get stack details (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "'cloudformation' in stack_info"
+ - "stack_info.cloudformation | length == 1"
+ - "stack_name in stack_info.cloudformation"
+ - "'stack_description' in stack_info.cloudformation[stack_name]"
+ - "'stack_outputs' in stack_info.cloudformation[stack_name]"
+ - "'stack_parameters' in stack_info.cloudformation[stack_name]"
+ - "'stack_tags' in stack_info.cloudformation[stack_name]"
+ - "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name"
+
+ - name: get stack details (all_facts)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ all_facts: yes
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "'stack_events' in stack_info.cloudformation[stack_name]"
+ - "'stack_policy' in stack_info.cloudformation[stack_name]"
+ - "'stack_resource_list' in stack_info.cloudformation[stack_name]"
+ - "'stack_resources' in stack_info.cloudformation[stack_name]"
+ - "'stack_template' in stack_info.cloudformation[stack_name]"
+
+ - name: get stack details (all_facts) (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ all_facts: yes
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "'stack_events' in stack_info.cloudformation[stack_name]"
+ - "'stack_policy' in stack_info.cloudformation[stack_name]"
+ - "'stack_resource_list' in stack_info.cloudformation[stack_name]"
+ - "'stack_resources' in stack_info.cloudformation[stack_name]"
+ - "'stack_template' in stack_info.cloudformation[stack_name]"
+
+ # ==== Cloudformation tests (create changeset) ============================
+
+ # try to create a changeset by changing instance type
+ - name: create a changeset
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ create_changeset: yes
+ changeset_name: "test-changeset"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.micro"
+ ImageId: "{{ ec2_ami_id }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: create_changeset_result
+
+ - name: assert changeset created
+ assert:
+ that:
+ - "create_changeset_result.changed"
+ - "'change_set_id' in create_changeset_result"
+ - "'Stack CREATE_CHANGESET complete' in create_changeset_result.output"
+
+ - name: get stack details with changesets
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ stack_change_sets: True
+ register: stack_info
+
+ - name: assert changesets in info
+ assert:
+ that:
+ - "'stack_change_sets' in stack_info.cloudformation[stack_name]"
+
+ - name: get stack details with changesets (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ stack_change_sets: True
+ register: stack_info
+ check_mode: yes
+
+ - name: assert changesets in info
+ assert:
+ that:
+ - "'stack_change_sets' in stack_info.cloudformation[stack_name]"
+
+ # try to create an empty changeset by passing in unchanged template
+ - name: create a changeset
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ create_changeset: yes
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_id }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: create_changeset_result
+
+ - name: assert changeset created
+ assert:
+ that:
+ - "not create_changeset_result.changed"
+ - "'The created Change Set did not contain any changes to this stack and was deleted.' in create_changeset_result.output"
+
+ # ==== Cloudformation tests (termination_protection) ======================
+
+ - name: set termination protection to true
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ termination_protection: yes
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_id }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+# This fails - #65592
+# - name: check task return attributes
+# assert:
+# that:
+# - cf_stack.changed
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
+
+ - name: get stack details (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
+
+ - name: set termination protection to false
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ termination_protection: no
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_id }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+# This fails - #65592
+# - name: check task return attributes
+# assert:
+# that:
+# - cf_stack.changed
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
+
+ - name: get stack details (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
+
+ # ==== Cloudformation tests (update_policy) ======================
+
+ - name: setting an stack policy with json body
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ stack_policy_body: "{{ lookup('file','update_policy.json') }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_id }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: setting an stack policy on update
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ stack_policy_on_update_body: "{{ lookup('file','update_policy.json') }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_id }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ # ==== Cloudformation tests (delete stack tests) ==========================
+
+ - name: delete cloudformation stack (check mode)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ check_mode: yes
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - cf_stack.changed
+ - "'msg' in cf_stack and 'Stack would be deleted' in cf_stack.msg"
+
+ - name: delete cloudformation stack
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - cf_stack.changed
+ - "'output' in cf_stack and 'Stack Deleted' in cf_stack.output"
+
+ - name: delete cloudformation stack (check mode) (idempotent)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ check_mode: yes
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not cf_stack.changed
+ - "'msg' in cf_stack"
+ - >-
+ "Stack doesn't exist" in cf_stack.msg
+
+ - name: delete cloudformation stack (idempotent)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not cf_stack.changed
+ - "'output' in cf_stack and 'Stack not found.' in cf_stack.output"
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "not stack_info.cloudformation"
+
+ - name: get stack details (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "not stack_info.cloudformation"
+
+ # ==== Cleanup ============================================================
+
+ always:
+
+ - name: delete stack
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ ignore_errors: yes
+
+ - name: Delete test subnet
+ ec2_vpc_subnet:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_cidr }}"
+ state: absent
+ ignore_errors: yes
+
+ - name: Delete test VPC
+ ec2_vpc_net:
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ state: absent
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/aliases b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/aliases
new file mode 100644
index 000000000..3cbc2a485
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/aliases
@@ -0,0 +1,2 @@
+# reason: missing-policy
+unsupported
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/defaults/main.yml
new file mode 100644
index 000000000..2174b31ae
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/defaults/main.yml
@@ -0,0 +1,8 @@
+cloudtrail_name: '{{ resource_prefix }}-cloudtrail'
+s3_bucket_name: '{{ resource_prefix }}-cloudtrail-bucket'
+kms_alias: '{{ resource_prefix }}-cloudtrail'
+sns_topic: '{{ resource_prefix }}-cloudtrail-notifications'
+cloudtrail_prefix: 'ansible-test-prefix'
+cloudwatch_log_group: '{{ resource_prefix }}-cloudtrail'
+cloudwatch_role: '{{ resource_prefix }}-cloudtrail'
+cloudwatch_no_kms_role: '{{ resource_prefix }}-cloudtrail2'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/main.yml
new file mode 100644
index 000000000..b20eb2ad4
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/main.yml
@@ -0,0 +1,6 @@
+---
+- hosts: localhost
+ gather_facts: no
+ #serial: 10
+ roles:
+ - cloudtrail
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/runme.sh
new file mode 100755
index 000000000..14d1958ff
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/runme.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+#
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
+
+
+set -eux
+
+export ANSIBLE_ROLES_PATH=../
+tiny_prefix="$(uuidgen -r|cut -d- -f1)"
+
+# shellcheck disable=SC2016,SC2086
+echo '
+{
+"ansible_test": {
+ "environment": {
+ "ANSIBLE_DEBUG_BOTOCORE_LOGS": "True"
+ },
+ "module_defaults": null
+},
+"resource_prefix": "'${tiny_prefix}'",
+"tiny_prefix": "'${tiny_prefix}'",
+"aws_region": "us-east-2"
+}' > _config-file.json
+
+ansible-playbook main.yml -i inventory "$@" -e @_config-file.json
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml
new file mode 100644
index 000000000..e35136d5d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml
@@ -0,0 +1,1595 @@
+---
+# General Tests:
+# - s3_bucket_name required when state is 'present'
+# - Creation / Deletion
+# - Enable/Disable logging
+# - Enable/Disable log file validation option
+# - Manipulation of Global Event logging option
+# - Manipulation of Multi-Region logging option
+# - Manipulation of S3 bucket option
+# - Manipulation of Encryption option
+# - Manipulation of SNS options
+# - Manipulation of CloudWatch Log group options
+# - Manipulation of Tags
+#
+# Notes:
+# - results include the updates, even when check_mode is true
+# - Poor handling of disable global + enable multi-region
+# botocore.errorfactory.InvalidParameterCombinationException: An error
+# occurred (InvalidParameterCombinationException) when calling the
+# UpdateTrail operation: Multi-Region trail must include global service
+# events.
+# - Using blank string for KMS ID doesn't remove encryption
+# - Using blank string for SNS Topic doesn't remove it
+# - Using blank string for CloudWatch Log Group / Role doesn't remove them
+#
+# Possible Bugs:
+
+- module_defaults:
+ # Add this as a default because we (almost) always need it
+ amazon.aws.cloudtrail:
+ s3_bucket_name: '{{ s3_bucket_name }}'
+ region: '{{ aws_region }}'
+ collections:
+ - amazon.aws
+ block:
+
+ # ============================================================
+ # Argument Tests
+ # ============================================================
+ - name: 'S3 Bucket required when state is "present"'
+ module_defaults: { cloudtrail: {} }
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ ignore_errors: yes
+ - assert:
+ that:
+ - output is failed
+
+ - name: 'CloudWatch cloudwatch_logs_log_group_arn required when cloudwatch_logs_role_arn passed'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ cloudwatch_logs_role_arn: 'SomeValue'
+ register: output
+ ignore_errors: yes
+ - assert:
+ that:
+ - output is failed
+ - '"parameters are required together" in output.msg'
+ - '"cloudwatch_logs_log_group_arn" in output.msg'
+
+ - name: 'CloudWatch cloudwatch_logs_role_arn required when cloudwatch_logs_log_group_arn passed'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ cloudwatch_logs_log_group_arn: 'SomeValue'
+ register: output
+ ignore_errors: yes
+ - assert:
+ that:
+ - output is failed
+ - '"parameters are required together" in output.msg'
+ - '"cloudwatch_logs_role_arn" in output.msg'
+
+ #- name: 'Global Logging must be enabled when enabling Multi-region'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # include_global_events: no
+ # is_multi_region_trail: yes
+ # register: output
+ # ignore_errors: yes
+ #- assert:
+ # that:
+ # - output is failed
+
+ # ============================================================
+ # Preparation
+ # ============================================================
+ - name: 'Retrieve caller facts'
+ aws_caller_info: {}
+ register: aws_caller_info
+
+ - name: 'Create S3 bucket'
+ vars:
+ bucket_name: '{{ s3_bucket_name }}'
+ s3_bucket:
+ state: present
+ name: '{{ bucket_name }}'
+ policy: '{{ lookup("template", "s3-policy.j2") }}'
+ - name: 'Create second S3 bucket'
+ vars:
+ bucket_name: '{{ s3_bucket_name }}-2'
+ s3_bucket:
+ state: present
+ name: '{{ bucket_name }}'
+ policy: '{{ lookup("template", "s3-policy.j2") }}'
+
+ - name: 'Create SNS Topic'
+ vars:
+ sns_topic_name: '{{ sns_topic }}'
+ sns_topic:
+ state: present
+ name: '{{ sns_topic_name }}'
+ display_name: 'Used for testing SNS/CloudWatch integration'
+ policy: "{{ lookup('template', 'sns-policy.j2') | to_json }}"
+ register: output_sns_topic
+ - name: 'Create second SNS Topic'
+ vars:
+ sns_topic_name: '{{ sns_topic }}-2'
+ sns_topic:
+ state: present
+ name: '{{ sns_topic_name }}'
+ display_name: 'Used for testing SNS/CloudWatch integration'
+ policy: "{{ lookup('template', 'sns-policy.j2') | to_json }}"
+
+ - name: 'Create KMS Key'
+ aws_kms:
+ state: present
+ alias: '{{ kms_alias }}'
+ enabled: yes
+ policy: "{{ lookup('template', 'kms-policy.j2') | to_json }}"
+ register: kms_key
+ - name: 'Create second KMS Key'
+ aws_kms:
+ state: present
+ alias: '{{ kms_alias }}-2'
+ enabled: yes
+ policy: "{{ lookup('template', 'kms-policy.j2') | to_json }}"
+ register: kms_key2
+
+ - name: 'Create CloudWatch IAM Role'
+ iam_role:
+ state: present
+ name: '{{ cloudwatch_role }}'
+ assume_role_policy_document: "{{ lookup('template', 'cloudwatch-assume-policy.j2') }}"
+ register: output_cloudwatch_role
+ - name: 'Create CloudWatch Log Group'
+ cloudwatchlogs_log_group:
+ state: present
+ log_group_name: '{{ cloudwatch_log_group }}'
+ retention: 1
+ register: output_cloudwatch_log_group
+ - name: 'Create second CloudWatch Log Group'
+ cloudwatchlogs_log_group:
+ state: present
+ log_group_name: '{{ cloudwatch_log_group }}-2'
+ retention: 1
+ register: output_cloudwatch_log_group2
+ - name: 'Add inline policy to CloudWatch Role'
+ iam_policy:
+ state: present
+ iam_type: role
+ iam_name: '{{ cloudwatch_role }}'
+ policy_name: 'CloudWatch'
+ policy_json: "{{ lookup('template', 'cloudwatch-policy.j2') | to_json }}"
+
+ - name: 'Create CloudWatch IAM Role with no kms permissions'
+ iam_role:
+ state: present
+ name: '{{ cloudwatch_no_kms_role }}'
+ assume_role_policy_document: "{{ lookup('template', 'cloudtrail-no-kms-assume-policy.j2') }}"
+ managed_policies:
+ - "arn:aws:iam::aws:policy/AWSCloudTrail_FullAccess"
+ register: output_cloudwatch_no_kms_role
+
+ - name: pause to ensure role exists before attaching policy
+ pause:
+ seconds: 15
+
+ - name: 'Add inline policy to CloudWatch Role'
+ iam_policy:
+ state: present
+ iam_type: role
+ iam_name: '{{ cloudwatch_no_kms_role }}'
+ policy_name: 'CloudWatchNokms'
+ policy_json: "{{ lookup('template', 'cloudtrail-no-kms-policy.j2') }}"
+
+ # ============================================================
+ # Tests
+ # ============================================================
+
+ - name: 'Create a trail (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Create a trail'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.exists == True
+ - output.trail.name == cloudtrail_name
+
+ - name: 'No-op update to trail'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.exists == True
+ # Check everything is what we expect before we start making changes
+ - output.trail.name == cloudtrail_name
+ - output.trail.home_region == aws_region
+ - output.trail.include_global_service_events == True
+ - output.trail.is_multi_region_trail == False
+ - output.trail.is_logging == True
+ - output.trail.log_file_validation_enabled == False
+ - output.trail.s3_bucket_name == s3_bucket_name
+ - output.trail.s3_key_prefix is none
+ - output.trail.kms_key_id is none
+ - output.trail.sns_topic_arn is none
+ - output.trail.sns_topic_name is none
+ - output.trail.tags | length == 0
+
+ - name: 'Get the trail info'
+ cloudtrail_info:
+ register: info
+
+ - name: 'Get the trail name from the cloud trail info'
+ set_fact:
+ trail_present: true
+ trail_arn: '{{ item.resource_id }}'
+ when: item.name == cloudtrail_name
+ loop: "{{ info.trail_list }}"
+
+ - name: 'Assert that the trail name is present in the info'
+ assert:
+ that:
+ - trail_present is defined
+ - trail_present == True
+
+ # ============================================================
+
+ - name: 'Set S3 prefix (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '{{ cloudtrail_prefix }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Set S3 prefix'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '{{ cloudtrail_prefix }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.s3_key_prefix == cloudtrail_prefix
+
+ - name: 'Set S3 prefix (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '{{ cloudtrail_prefix }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.s3_key_prefix == cloudtrail_prefix
+
+ - name: 'No-op update to trail'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.s3_key_prefix == cloudtrail_prefix
+
+ - name: 'Get the trail info'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the s3_key_prefix is correct'
+ assert:
+ that:
+ - info.trail_list[0].s3_key_prefix == cloudtrail_prefix
+
+ - name: 'Update S3 prefix (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '{{ cloudtrail_prefix }}-2'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Update S3 prefix'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '{{ cloudtrail_prefix }}-2'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - 'output.trail.s3_key_prefix == "{{ cloudtrail_prefix }}-2"'
+
+ - name: 'Update S3 prefix (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '{{ cloudtrail_prefix }}-2'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - 'output.trail.s3_key_prefix == "{{ cloudtrail_prefix }}-2"'
+
+ - name: 'Get the trail info after updating S3 prefix'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the s3_key_prefix is correct'
+ assert:
+ that:
+ - 'info.trail_list[0].s3_key_prefix == "{{ cloudtrail_prefix }}-2"'
+
+ - name: 'Remove S3 prefix (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '/'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Remove S3 prefix'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '/'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.s3_key_prefix is none
+
+ - name: 'Remove S3 prefix (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '/'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.s3_key_prefix is none
+
+ - name: 'Get the trail info after removing S3 prefix'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the s3_key_prefix is None'
+ assert:
+ that:
+ - info.trail_list[0].s3_key_prefix is not defined
+
+ # ============================================================
+
+ - include_tasks: 'tagging.yml'
+
+ # ============================================================
+
+ - name: 'Set SNS Topic (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ sns_topic_name: '{{ sns_topic }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Set SNS Topic'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ sns_topic_name: '{{ sns_topic }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.sns_topic_name == sns_topic
+
+ - name: 'Set SNS Topic (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ sns_topic_name: '{{ sns_topic }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.sns_topic_name == sns_topic
+
+ - name: 'No-op update to trail'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.sns_topic_name == sns_topic
+
+ - name: 'Get the trail info with SNS topic'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the sns_topic is correctly set'
+ assert:
+ that:
+ - info.trail_list[0].sns_topic_name == sns_topic
+
+ - name: 'Update SNS Topic (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ sns_topic_name: '{{ sns_topic }}-2'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Update SNS Topic'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ sns_topic_name: '{{ sns_topic }}-2'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - 'output.trail.sns_topic_name == "{{ sns_topic }}-2"'
+
+ - name: 'Update SNS Topic (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ sns_topic_name: '{{ sns_topic }}-2'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - 'output.trail.sns_topic_name == "{{ sns_topic }}-2"'
+
+ - name: 'Get the trail info with SNS topic after update'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the sns_topic is correctly set'
+ assert:
+ that:
+ - 'info.trail_list[0].sns_topic_name == "{{ sns_topic }}-2"'
+
+ #- name: 'Remove SNS Topic (CHECK MODE)'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # sns_topic_name: ''
+ # register: output
+ # check_mode: yes
+ #- assert:
+ # that:
+ # - output is changed
+
+ #- name: 'Remove SNS Topic'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # sns_topic_name: ''
+ # register: output
+ #- assert:
+ # that:
+ # - output is changed
+ # - output.trail.name == cloudtrail_name
+ # - output.trail.sns_topic_name is none
+
+ #- name: 'Remove SNS Topic (no change)'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # sns_topic_name: ''
+ # register: output
+ #- assert:
+ # that:
+ # - output is not changed
+ # - output.trail.name == cloudtrail_name
+ # - output.trail.sns_topic_name is none
+
+
+ # ============================================================
+
+ - name: 'Set CloudWatch Log Group (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}'
+ cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Set CloudWatch Log Group'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}'
+ cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+
+ - name: 'Set CloudWatch Log Group (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}'
+ cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+
+ - name: 'No-op update to trail'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+
+ - name: 'Get the trail info with CloudWatch Log Group'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the cloud watch log group is correctly set'
+ assert:
+ that:
+ - info.trail_list[0].cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn
+ - info.trail_list[0].cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+
+ - name: 'Update CloudWatch Log Group (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group2.arn }}'
+ cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+ - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+
+ - name: 'Update CloudWatch Log Group'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group2.arn }}'
+ cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+
+ - name: 'Update CloudWatch Log Group (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group2.arn }}'
+ cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+
+ - name: 'Get the trail info with CloudWatch Log Group after update'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the cloud watch log group is correctly set after update'
+ assert:
+ that:
+ - info.trail_list[0].cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn
+ - info.trail_list[0].cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+
+ #- name: 'Remove CloudWatch Log Group (CHECK MODE)'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # cloudwatch_logs_log_group_arn: ''
+ # cloudwatch_logs_role_arn: ''
+ # register: output
+ # check_mode: yes
+ #- assert:
+ # that:
+ # - output is changed
+ # - output.trail.name == cloudtrail_name
+ # - output.trail.cloud_watch_logs_log_group_arn is none
+ # - output.trail.cloud_watch_logs_role_arn is none
+
+ #- name: 'Remove CloudWatch Log Group'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # cloudwatch_logs_log_group_arn: ''
+ # cloudwatch_logs_role_arn: ''
+ # register: output
+ #- assert:
+ # that:
+ # - output is changed
+ # - output.trail.name == cloudtrail_name
+ # - output.trail.cloud_watch_logs_log_group_arn is none
+ # - output.trail.cloud_watch_logs_role_arn is none
+
+ #- name: 'Remove CloudWatch Log Group (no change)'
+ # cloudtrail:
+ # state: present
+ # name: '{{ cloudtrail_name }}'
+ # cloudwatch_logs_log_group_arn: ''
+ # cloudwatch_logs_role_arn: ''
+ # register: output
+ #- assert:
+ # that:
+ # - output is not changed
+ # - output.trail.name == cloudtrail_name
+ # - output.trail.cloud_watch_logs_log_group_arn is none
+ # - output.trail.cloud_watch_logs_role_arn is none
+
+ # ============================================================
+
+ - name: 'Update S3 bucket (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_bucket_name: '{{ s3_bucket_name }}-2'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Update S3 bucket'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_bucket_name: '{{ s3_bucket_name }}-2'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - 'output.trail.s3_bucket_name == "{{ s3_bucket_name }}-2"'
+
+ - name: 'Update S3 bucket (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_bucket_name: '{{ s3_bucket_name }}-2'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - 'output.trail.s3_bucket_name == "{{ s3_bucket_name }}-2"'
+
+ - name: 'Get the trail info with S3 bucket name'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the S3 Bucket name is correctly set'
+ assert:
+ that:
+ - 'info.trail_list[0].s3_bucket_name == "{{ s3_bucket_name }}-2"'
+
+ - name: 'Reset S3 bucket'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output.trail.name == cloudtrail_name
+ - output.trail.s3_bucket_name == s3_bucket_name
+
+ # ============================================================
+
+ - name: 'Disable logging (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_logging: no
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Disable logging'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_logging: no
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.is_logging == False
+
+ - name: 'Disable logging (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_logging: no
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.is_logging == False
+
+ - name: 'Get the trail info to check the logging state'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the logging state is correctly set'
+ assert:
+ that:
+ - info.trail_list[0].is_logging == False
+
+ # Ansible Documentation lists logging as explicitly defaulting to enabled
+
+ - name: 'Enable logging (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_logging: yes
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Enable logging'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_logging: yes
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.is_logging == True
+
+ - name: 'Enable logging (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_logging: yes
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.is_logging == True
+
+ - name: 'Get the trail info to check the logging state'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the logging state is correctly set'
+ assert:
+ that:
+ - info.trail_list[0].is_logging == True
+
+ # ============================================================
+
+ - name: 'Disable global logging (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ include_global_events: no
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Disable global logging'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ include_global_events: no
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.include_global_service_events == False
+
+ - name: 'Disable global logging (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ include_global_events: no
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.include_global_service_events == False
+
+ - name: 'Get the trail info to check the global logging state'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the global logging state is correctly set'
+ assert:
+ that:
+ - info.trail_list[0].include_global_service_events == False
+
+ # Ansible Documentation lists Global-logging as explicitly defaulting to enabled
+
+ - name: 'Enable global logging (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ include_global_events: yes
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Enable global logging'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ include_global_events: yes
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.include_global_service_events == True
+
+ - name: 'Enable global logging (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ include_global_events: yes
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.include_global_service_events == True
+
+ - name: 'Get the trail info to check the global logging state (default)'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the global logging state is correctly set (default)'
+ assert:
+ that:
+ - info.trail_list[0].include_global_service_events == True
+
+ # ============================================================
+
+ - name: 'Enable multi-region logging (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ is_multi_region_trail: yes
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Enable multi-region logging'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ is_multi_region_trail: yes
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.is_multi_region_trail == True
+
+ - name: 'Enable multi-region logging (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ is_multi_region_trail: yes
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.is_multi_region_trail == True
+
+ - name: 'Get the trail info to check the multi-region logging state (default)'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the global logging state is correctly set (default)'
+ assert:
+ that:
+ - info.trail_list[0].is_multi_region_trail == True
+
+ # Ansible Documentation lists Multi-Region-logging as explicitly defaulting to disabled
+
+ - name: 'Disable multi-region logging (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ is_multi_region_trail: no
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Disable multi-region logging'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ is_multi_region_trail: no
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.is_multi_region_trail == False
+
+ - name: 'Disable multi-region logging (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ is_multi_region_trail: no
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.is_multi_region_trail == False
+
+ - name: 'Get the trail info to check the multi-region logging state (default)'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the global logging state is correctly set (default)'
+ assert:
+ that:
+ - info.trail_list[0].is_multi_region_trail == False
+
+ # ============================================================
+
+ - name: 'Enable logfile validation (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_log_file_validation: yes
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Enable logfile validation'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_log_file_validation: yes
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.log_file_validation_enabled == True
+
+ - name: 'Enable logfile validation (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_log_file_validation: yes
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.log_file_validation_enabled == True
+
+ - name: 'No-op update to trail'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.log_file_validation_enabled == True
+
+ - name: 'Get the trail info to check the log file validation'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the log file validation is correctly set'
+ assert:
+ that:
+ - info.trail_list[0].log_file_validation_enabled == True
+
+ - name: 'Disable logfile validation (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_log_file_validation: no
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Disable logfile validation'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_log_file_validation: no
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.log_file_validation_enabled == False
+
+ - name: 'Disable logfile validation (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ enable_log_file_validation: no
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.name == cloudtrail_name
+ - output.trail.log_file_validation_enabled == False
+
+ - name: 'Get the trail info to check the log file validation'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the log file validation is disabled'
+ assert:
+ that:
+ - info.trail_list[0].log_file_validation_enabled == False
+
+ # ============================================================
+
+ - name: 'Enable logging encryption (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: '{{ kms_key.key_arn }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Enable logging encryption'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: '{{ kms_key.key_arn }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.kms_key_id == kms_key.key_arn
+
+ - name: 'Enable logging encryption (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: '{{ kms_key.key_arn }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.kms_key_id == kms_key.key_arn
+
+ - name: 'Enable logging encryption (no change, check mode)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: '{{ kms_key.key_arn }}'
+ check_mode: yes
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.kms_key_id == kms_key.key_arn
+
+ - name: 'No-op update to trail'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.kms_key_id == kms_key.key_arn
+
+ - name: 'Get the trail info to check the logging encryption'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the logging encryption is correctly set'
+ assert:
+ that:
+ - info.trail_list[0].kms_key_id == kms_key.key_arn
+
+ - name: 'Update logging encryption key (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: '{{ kms_key2.key_arn }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Update logging encryption key'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: '{{ kms_key2.key_arn }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.kms_key_id == kms_key2.key_arn
+
+ - name: 'Update logging encryption key (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: '{{ kms_key2.key_arn }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.kms_key_id == kms_key2.key_arn
+
+ - name: 'Get the trail info to check the logging key encryption'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the logging key encryption is correctly set'
+ assert:
+ that:
+ - info.trail_list[0].kms_key_id == kms_key2.key_arn
+
+ - name: 'Update logging encryption to alias (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: 'alias/{{ kms_alias }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Update logging encryption to alias'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: 'alias/{{ kms_alias }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.trail.kms_key_id == kms_key.key_arn
+
+ - name: 'Update logging encryption to alias (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: 'alias/{{ kms_alias }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.kms_key_id == kms_key.key_arn
+
+ - name: 'Update logging encryption to alias (CHECK MODE, no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: '{{ kms_key.key_id }}' # Test when using key id
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is not changed
+ - output.trail.kms_key_id == kms_key.key_id
+
+ - debug:
+ msg: '{{ output }}'
+
+ - name: 'Get the trail info to check the logging key encryption after update'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the logging key encryption is correctly updated'
+ assert:
+ that:
+ - kms_key.key_id in info.trail_list[0].kms_key_id
+
+ # Assume role to a role with Denied access to KMS
+
+ - community.aws.sts_assume_role:
+ role_arn: '{{ output_cloudwatch_no_kms_role.arn }}'
+ role_session_name: "cloudtrailNoKms"
+ region: '{{ aws_region }}'
+ register: noKms_assumed_role
+
+ - name: 'Enable logging encryption w/ alias (no change, no kms permmissions, check mode)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: 'alias/{{ kms_alias }}'
+ aws_access_key: "{{ noKms_assumed_role.sts_creds.access_key }}"
+ aws_secret_key: "{{ noKms_assumed_role.sts_creds.secret_key }}"
+ security_token: "{{ noKms_assumed_role.sts_creds.session_token }}"
+ check_mode: yes
+ register: output
+ - assert:
+ that:
+ - output is changed
+ # when using check_mode, with no kms permissions, and not giving kms_key_id as a key arn
+ # output will always be marked as changed.
+
+ - name: 'Disable logging encryption (CHECK MODE)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: ''
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Disable logging encryption'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: ''
+ register: output
+ - assert:
+ that:
+ - output.trail.kms_key_id == ""
+ - output is changed
+
+ - name: 'Disable logging encryption (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ kms_key_id: ''
+ register: output
+ - assert:
+ that:
+ - output.kms_key_id == ""
+ - output is not changed
+
+ # ============================================================
+
+ - name: 'Delete a trail without providing bucket_name (CHECK MODE)'
+ module_defaults: { cloudtrail: {} }
+ cloudtrail:
+ state: absent
+ name: '{{ cloudtrail_name }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Delete a trail while providing bucket_name (CHECK MODE)'
+ cloudtrail:
+ state: absent
+ name: '{{ cloudtrail_name }}'
+ register: output
+ check_mode: yes
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Delete a trail'
+ cloudtrail:
+ state: absent
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ - output.exists == False
+
+ - name: 'Delete a non-existent trail'
+ cloudtrail:
+ state: absent
+ name: '{{ cloudtrail_name }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.exists == False
+
+ # ============================================================
+
+ - name: 'Test creation of a complex Trail (all features)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '{{ cloudtrail_prefix }}'
+ sns_topic_name: '{{ sns_topic }}'
+ cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}'
+ cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
+ is_multi_region_trail: yes
+ include_global_events: yes
+ enable_log_file_validation: yes
+ kms_key_id: '{{ kms_key.key_arn }}'
+ register: output
+ - assert:
+ that:
+ - output is changed
+ #- output.exists == True
+ - output.trail.name == cloudtrail_name
+ - output.trail.home_region == aws_region
+ - output.trail.include_global_service_events == True
+ - output.trail.is_multi_region_trail == True
+ - output.trail.is_logging == True
+ - output.trail.log_file_validation_enabled == True
+ - output.trail.s3_bucket_name == s3_bucket_name
+ - output.trail.s3_key_prefix == cloudtrail_prefix
+ - output.trail.kms_key_id == kms_key.key_arn
+ - output.trail.sns_topic_arn == output_sns_topic.sns_arn
+ - output.trail.sns_topic_name == sns_topic
+ - output.trail.tags | length == 0
+
+ - name: 'Test creation of a complex Trail (no change)'
+ cloudtrail:
+ state: present
+ name: '{{ cloudtrail_name }}'
+ s3_key_prefix: '{{ cloudtrail_prefix }}'
+ sns_topic_name: '{{ sns_topic }}'
+ cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}'
+ cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
+ is_multi_region_trail: yes
+ include_global_events: yes
+ enable_log_file_validation: yes
+ kms_key_id: '{{ kms_key.key_arn }}'
+ register: output
+ - assert:
+ that:
+ - output is not changed
+ - output.exists == True
+ - output.trail.name == cloudtrail_name
+ - output.trail.home_region == aws_region
+ - output.trail.include_global_service_events == True
+ - output.trail.is_multi_region_trail == True
+ - output.trail.is_logging == True
+ - output.trail.log_file_validation_enabled == True
+ - output.trail.s3_bucket_name == s3_bucket_name
+ - output.trail.s3_key_prefix == cloudtrail_prefix
+ - output.trail.kms_key_id == kms_key.key_arn
+ - output.trail.sns_topic_arn == output_sns_topic.sns_arn
+ - output.trail.sns_topic_name == sns_topic
+ - output.trail.tags | length == 0
+
+ - name: 'Get the trail info of the created trail'
+ cloudtrail_info:
+ trail_names:
+ - '{{ trail_arn }}'
+ register: info
+
+ - name: 'Assert that the logging key encryption is correctly updated'
+ assert:
+ that:
+ - info.trail_list[0].name == cloudtrail_name
+ - info.trail_list[0].home_region == aws_region
+ - info.trail_list[0].include_global_service_events == True
+ - info.trail_list[0].is_multi_region_trail == True
+ - info.trail_list[0].is_logging == True
+ - info.trail_list[0].log_file_validation_enabled == True
+ - info.trail_list[0].s3_bucket_name == s3_bucket_name
+ - info.trail_list[0].s3_key_prefix == cloudtrail_prefix
+ - info.trail_list[0].kms_key_id == kms_key.key_arn
+ - info.trail_list[0].sns_topic_arn == output_sns_topic.sns_arn
+ - info.trail_list[0].sns_topic_name == sns_topic
+ - info.trail_list[0].tags | length == 0
+
+ always:
+ # ============================================================
+ # Cleanup
+ # ============================================================
+ - name: 'Delete test trail'
+ cloudtrail:
+ state: absent
+ name: '{{ cloudtrail_name }}'
+ ignore_errors: yes
+ - name: 'Delete S3 bucket'
+ s3_bucket:
+ state: absent
+ name: '{{ s3_bucket_name }}'
+ force: yes
+ ignore_errors: yes
+ - name: 'Delete second S3 bucket'
+ s3_bucket:
+ state: absent
+ name: '{{ s3_bucket_name }}-2'
+ force: yes
+ ignore_errors: yes
+ - name: 'Delete KMS Key'
+ aws_kms:
+ state: absent
+ alias: '{{ kms_alias }}'
+ ignore_errors: yes
+ - name: 'Delete second KMS Key'
+ aws_kms:
+ state: absent
+ alias: '{{ kms_alias }}-2'
+ ignore_errors: yes
+ - name: 'Delete SNS Topic'
+ sns_topic:
+ state: absent
+ name: '{{ sns_topic }}'
+ ignore_errors: yes
+ - name: 'Delete second SNS Topic'
+ sns_topic:
+ state: absent
+ name: '{{ sns_topic }}-2'
+ ignore_errors: yes
+ - name: 'Delete CloudWatch Log Group'
+ cloudwatchlogs_log_group:
+ state: absent
+ log_group_name: '{{ cloudwatch_log_group }}'
+ ignore_errors: yes
+ - name: 'Delete second CloudWatch Log Group'
+ cloudwatchlogs_log_group:
+ state: absent
+ log_group_name: '{{ cloudwatch_log_group }}-2'
+ ignore_errors: yes
+ - name: 'Remove inline policy to CloudWatch Role'
+ iam_policy:
+ state: absent
+ iam_type: role
+ iam_name: '{{ cloudwatch_role }}'
+ policy_name: 'CloudWatch'
+ ignore_errors: yes
+ - name: 'Delete CloudWatch IAM Role'
+ iam_role:
+ state: absent
+ name: '{{ cloudwatch_role }}'
+ ignore_errors: yes
+ - name: 'Remove inline policy to CloudWatch Role'
+ iam_policy:
+ state: absent
+ iam_type: role
+ iam_name: '{{ cloudwatch_no_kms_role }}'
+ policy_name: 'CloudWatchNokms'
+ ignore_errors: yes
+ - name: 'Delete CloudWatch No KMS IAM Role'
+ iam_role:
+ state: absent
+ name: '{{ cloudwatch_no_kms_role }}'
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/tagging.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/tagging.yml
new file mode 100644
index 000000000..df537c67e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/tagging.yml
@@ -0,0 +1,252 @@
+- name: Tests relating to tagging cloudtrails
+ vars:
+ first_tags:
+ 'Key with Spaces': Value with spaces
+ CamelCaseKey: CamelCaseValue
+ pascalCaseKey: pascalCaseValue
+ snake_case_key: snake_case_value
+ second_tags:
+ 'New Key with Spaces': Value with spaces
+ NewCamelCaseKey: CamelCaseValue
+ newPascalCaseKey: pascalCaseValue
+ new_snake_case_key: snake_case_value
+ third_tags:
+ 'Key with Spaces': Value with spaces
+ CamelCaseKey: CamelCaseValue
+ pascalCaseKey: pascalCaseValue
+ snake_case_key: snake_case_value
+ 'New Key with Spaces': Updated Value with spaces
+ final_tags:
+ 'Key with Spaces': Value with spaces
+ CamelCaseKey: CamelCaseValue
+ pascalCaseKey: pascalCaseValue
+ snake_case_key: snake_case_value
+ 'New Key with Spaces': Updated Value with spaces
+ NewCamelCaseKey: CamelCaseValue
+ newPascalCaseKey: pascalCaseValue
+ new_snake_case_key: snake_case_value
+ # Mandatory settings
+ module_defaults:
+ amazon.aws.cloudtrail:
+ name: '{{ cloudtrail_name }}'
+ s3_bucket_name: '{{ s3_bucket_name }}'
+ state: present
+# community.aws.cloudtrail_info:
+# name: '{{ cloudtrail_name }}'
+ block:
+
+ ###
+
+ - name: test adding tags to cloudtrail (check mode)
+ cloudtrail:
+ tags: '{{ first_tags }}'
+ purge_tags: True
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+
+ - name: test adding tags to cloudtrail
+ cloudtrail:
+ tags: '{{ first_tags }}'
+ purge_tags: True
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+ - update_result.trail.tags == first_tags
+
+ - name: test adding tags to cloudtrail - idempotency (check mode)
+ cloudtrail:
+ tags: '{{ first_tags }}'
+ purge_tags: True
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+
+ - name: test adding tags to cloudtrail - idempotency
+ cloudtrail:
+ tags: '{{ first_tags }}'
+ purge_tags: True
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.trail.tags == first_tags
+
+ ###
+
+ - name: test updating tags with purge on cloudtrail (check mode)
+ cloudtrail:
+ tags: '{{ second_tags }}'
+ purge_tags: True
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+
+ - name: test updating tags with purge on cloudtrail
+ cloudtrail:
+ tags: '{{ second_tags }}'
+ purge_tags: True
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+ - update_result.trail.tags == second_tags
+
+ - name: test updating tags with purge on cloudtrail - idempotency (check mode)
+ cloudtrail:
+ tags: '{{ second_tags }}'
+ purge_tags: True
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+
+ - name: test updating tags with purge on cloudtrail - idempotency
+ cloudtrail:
+ tags: '{{ second_tags }}'
+ purge_tags: True
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.trail.tags == second_tags
+
+ ###
+
+ - name: test updating tags without purge on cloudtrail (check mode)
+ cloudtrail:
+ tags: '{{ third_tags }}'
+ purge_tags: False
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+
+ - name: test updating tags without purge on cloudtrail
+ cloudtrail:
+ tags: '{{ third_tags }}'
+ purge_tags: False
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+ - update_result.trail.tags == final_tags
+
+ - name: test updating tags without purge on cloudtrail - idempotency (check mode)
+ cloudtrail:
+ tags: '{{ third_tags }}'
+ purge_tags: False
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+
+ - name: test updating tags without purge on cloudtrail - idempotency
+ cloudtrail:
+ tags: '{{ third_tags }}'
+ purge_tags: False
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.trail.tags == final_tags
+
+# ###
+#
+# - name: test that cloudtrail_info returns the tags
+# cloudtrail_info:
+# register: tag_info
+# - name: assert tags present
+# assert:
+# that:
+# - tag_info.trail.tags == final_tags
+#
+# ###
+
+ - name: test no tags param cloudtrail (check mode)
+ cloudtrail: {}
+ register: update_result
+ check_mode: yes
+ - name: assert no change
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.trail.tags == final_tags
+
+
+ - name: test no tags param cloudtrail
+ cloudtrail: {}
+ register: update_result
+ - name: assert no change
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.trail.tags == final_tags
+
+ ###
+
+ - name: test removing tags from cloudtrail (check mode)
+ cloudtrail:
+ tags: {}
+ purge_tags: True
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+
+ - name: test removing tags from cloudtrail
+ cloudtrail:
+ tags: {}
+ purge_tags: True
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+ - update_result.trail.tags == {}
+
+ - name: test removing tags from cloudtrail - idempotency (check mode)
+ cloudtrail:
+ tags: {}
+ purge_tags: True
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+
+ - name: test removing tags from cloudtrail - idempotency
+ cloudtrail:
+ tags: {}
+ purge_tags: True
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.trail.tags == {}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-assume-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-assume-policy.j2
new file mode 100644
index 000000000..f3bfd14ec
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-assume-policy.j2
@@ -0,0 +1,11 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "AssumeRole",
+ "Effect": "Allow",
+ "Principal": { "AWS": "arn:aws:iam::{{ aws_caller_info.account }}:root" },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-policy.j2
new file mode 100644
index 000000000..d85b650b7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-policy.j2
@@ -0,0 +1,11 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "kmsDeny",
+ "Effect": "Deny",
+ "Action": [ "kms:*" ],
+ "Resource": [ "*" ]
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j2
new file mode 100644
index 000000000..6d7fb7b88
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j2
@@ -0,0 +1,13 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "AssumeFromCloudTrails",
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "cloudtrail.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-policy.j2
new file mode 100644
index 000000000..8f354a702
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/cloudwatch-policy.j2
@@ -0,0 +1,17 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "CloudTrail2CloudWatch",
+ "Effect": "Allow",
+ "Action": [
+ "logs:CreateLogStream",
+ "logs:PutLogEvents"
+ ],
+ "Resource": [
+ "arn:aws:logs:{{ aws_region }}:{{ aws_caller_info.account }}:log-group:{{ cloudwatch_log_group }}:log-stream:*",
+ "arn:aws:logs:{{ aws_region }}:{{ aws_caller_info.account }}:log-group:{{ cloudwatch_log_group }}-2:log-stream:*"
+ ]
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/kms-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/kms-policy.j2
new file mode 100644
index 000000000..35730f1d2
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/kms-policy.j2
@@ -0,0 +1,34 @@
+{
+ "Version": "2012-10-17",
+ "Id": "CloudTrailPolicy",
+ "Statement": [
+ {
+ "Sid": "EncryptLogs",
+ "Effect": "Allow",
+ "Principal": { "Service": "cloudtrail.amazonaws.com" },
+ "Action": "kms:GenerateDataKey*",
+ "Resource": "*",
+ "Condition": {
+ "StringLike": {
+ "kms:EncryptionContext:aws:cloudtrail:arn": [
+ "arn:aws:cloudtrail:*:{{ aws_caller_info.account }}:trail/{{ resource_prefix }}*"
+ ]
+ }
+ }
+ },
+ {
+ "Sid": "DescribeKey",
+ "Effect": "Allow",
+ "Principal": { "Service": "cloudtrail.amazonaws.com" },
+ "Action": "kms:DescribeKey",
+ "Resource": "*"
+ },
+ {
+ "Sid": "AnsibleTestManage",
+ "Effect": "Allow",
+ "Principal": { "AWS": "{{ aws_caller_info.arn }}" },
+ "Action": "*",
+ "Resource": "*"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/s3-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/s3-policy.j2
new file mode 100644
index 000000000..78c056e30
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/s3-policy.j2
@@ -0,0 +1,34 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "CloudTrailCheckAcl",
+ "Effect": "Allow",
+ "Principal": { "Service": "cloudtrail.amazonaws.com" },
+ "Action": "s3:GetBucketAcl",
+ "Resource": "arn:aws:s3:::{{ bucket_name }}",
+ },
+ {
+ "Sid": "CloudTrailWriteLogs",
+ "Effect": "Allow",
+ "Principal": { "Service": "cloudtrail.amazonaws.com" },
+ "Action": "s3:PutObject",
+ "Resource": [
+ "arn:aws:s3:::{{ bucket_name }}/AWSLogs/{{ aws_caller_info.account }}/*",
+ "arn:aws:s3:::{{ bucket_name }}/{{ cloudtrail_prefix }}*/AWSLogs/{{ aws_caller_info.account }}/*"
+ ],
+ "Condition": {
+ "StringEquals": {
+ "s3:x-amz-acl": "bucket-owner-full-control"
+ }
+ }
+ },
+ {
+ "Sid": "AnsibleTestManage",
+ "Effect": "Allow",
+ "Principal": { "AWS": "{{ aws_caller_info.arn }}" },
+ "Action": "*",
+ "Resource": "arn:aws:s3:::{{ bucket_name }}"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/sns-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/sns-policy.j2
new file mode 100644
index 000000000..3c267b800
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/templates/sns-policy.j2
@@ -0,0 +1,34 @@
+{
+ "Version": "2008-10-17",
+ "Id": "AnsibleSNSTesting",
+ "Statement": [
+ {
+ "Sid": "CloudTrailSNSPolicy",
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "cloudtrail.amazonaws.com"
+ },
+ "Action": "sns:Publish",
+ "Resource": "arn:aws:sns:{{ aws_region }}:{{ aws_caller_info.account }}:{{ sns_topic_name }}"
+ },
+ {
+ "Sid": "AnsibleTestManage",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "{{ aws_caller_info.arn }}"
+ },
+ "Action": [
+ "sns:Subscribe",
+ "sns:ListSubscriptionsByTopic",
+ "sns:DeleteTopic",
+ "sns:GetTopicAttributes",
+ "sns:Publish",
+ "sns:RemovePermission",
+ "sns:AddPermission",
+ "sns:Receive",
+ "sns:SetTopicAttributes"
+ ],
+ "Resource": "arn:aws:sns:{{ aws_region }}:{{ aws_caller_info.account }}:{{ sns_topic_name }}"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/aliases b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/defaults/main.yml
new file mode 100644
index 000000000..f65410b95
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/defaults/main.yml
@@ -0,0 +1,4 @@
+# defaults file for ec2_instance
+ec2_instance_name: '{{ resource_prefix }}-node'
+ec2_instance_owner: integration-run-{{ resource_prefix }}
+alarm_prefix: ansible-test
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/meta/main.yml
new file mode 100644
index 000000000..1d40168d0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- setup_ec2_facts
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_cleanup.yml
new file mode 100644
index 000000000..104f57984
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_cleanup.yml
@@ -0,0 +1,94 @@
+- name: remove any instances in the test VPC
+ ec2_instance:
+ filters:
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: remove ENIs
+ ec2_eni_info:
+ filters:
+ vpc-id: '{{ testing_vpc.vpc.id }}'
+ register: enis
+
+- name: delete all ENIs
+ ec2_eni:
+ eni_id: '{{ item.id }}'
+ state: absent
+ until: removed is not failed
+ with_items: '{{ enis.network_interfaces }}'
+ ignore_errors: yes
+ retries: 10
+
+- name: remove the security group
+ ec2_group:
+ name: '{{ resource_prefix }}-sg'
+ description: a security group for ansible tests
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: remove routing rules
+ ec2_vpc_route_table:
+ state: absent
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ tags:
+ created: '{{ resource_prefix }}-route'
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: '{{ igw.gateway_id }}'
+ subnets:
+ - '{{ testing_subnet_a.subnet.id }}'
+ - '{{ testing_subnet_b.subnet.id }}'
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: remove internet gateway
+ ec2_vpc_igw:
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: remove subnet A
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ cidr: 10.22.32.0/24
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: remove subnet B
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ cidr: 10.22.33.0/24
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: remove the VPC
+ ec2_vpc_net:
+ name: '{{ resource_prefix }}-vpc'
+ cidr_block: 10.22.32.0/23
+ state: absent
+ tags:
+ Name: Ansible Testing VPC
+ tenancy: default
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_setup.yml
new file mode 100644
index 000000000..2153d876a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_setup.yml
@@ -0,0 +1,62 @@
+- name: Create VPC for use in testing
+ ec2_vpc_net:
+ name: '{{ resource_prefix }}-vpc'
+ cidr_block: 10.22.32.0/23
+ tags:
+ Name: Ansible ec2_instance Testing VPC
+ tenancy: default
+ register: testing_vpc
+
+- name: Create internet gateway for use in testing
+ ec2_vpc_igw:
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ state: present
+ register: igw
+
+- name: Create default subnet in zone A
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ cidr: 10.22.32.0/24
+ az: '{{ aws_region }}a'
+ resource_tags:
+ Name: '{{ resource_prefix }}-subnet-a'
+ register: testing_subnet_a
+
+- name: Create secondary subnet in zone B
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ cidr: 10.22.33.0/24
+ az: '{{ aws_region }}b'
+ resource_tags:
+ Name: '{{ resource_prefix }}-subnet-b'
+ register: testing_subnet_b
+
+- name: create routing rules
+ ec2_vpc_route_table:
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ tags:
+ created: '{{ resource_prefix }}-route'
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: '{{ igw.gateway_id }}'
+ subnets:
+ - '{{ testing_subnet_a.subnet.id }}'
+ - '{{ testing_subnet_b.subnet.id }}'
+
+- name: create a security group with the vpc
+ ec2_group:
+ name: '{{ resource_prefix }}-sg'
+ description: a security group for ansible tests
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ register: sg
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/main.yml
new file mode 100644
index 000000000..d3f522c97
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/main.yml
@@ -0,0 +1,518 @@
+- name: run ec2_metric_alarm tests
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ - set_fact:
+ alarm_full_name: '{{ alarm_prefix }}-{{ resource_prefix }}-cpu-low'
+
+ - name: set up environment for testing.
+ include_tasks: env_setup.yml
+
+ - name: get info on alarms
+ amazon.aws.cloudwatch_metric_alarm_info:
+ alarm_names:
+ - "{{ alarm_full_name }}"
+ register: alarm_info_query
+
+ - name: Make instance in a default subnet of the VPC
+ ec2_instance:
+ name: '{{ resource_prefix }}-test-default-vpc'
+ image_id: '{{ ec2_ami_id }}'
+ tags:
+ TestId: '{{ resource_prefix }}'
+ security_groups: '{{ sg.group_id }}'
+ vpc_subnet_id: '{{ testing_subnet_a.subnet.id }}'
+ instance_type: t2.micro
+ wait: true
+ register: ec2_instance_results
+
+ - name: ensure alarm doesn't exist for a clean test
+ ec2_metric_alarm:
+ state: absent
+ name: '{{ alarm_full_name }}'
+
+ - name: create ec2 metric alarm on ec2 instance (check mode)
+ ec2_metric_alarm:
+ dimensions:
+ InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}'
+ state: present
+ name: '{{ alarm_full_name }}'
+ metric: CPUUtilization
+ namespace: AWS/EC2
+ treat_missing_data: missing
+ statistic: Average
+ comparison: LessThanOrEqualToThreshold
+ threshold: 5.0
+ period: 300
+ evaluation_periods: 3
+ unit: Percent
+ description: This will alarm when an instance's cpu usage average is lower than
+ 5% for 15 minutes
+ check_mode: true
+ register: ec2_instance_metric_alarm_check
+
+ - name: get info on alarms
+ amazon.aws.cloudwatch_metric_alarm_info:
+ alarm_names:
+ - "{{ alarm_full_name }}"
+ register: alarm_info_check
+
+ - name: "verify that an alarm was not created in check mode"
+ assert:
+ that:
+ - 'ec2_instance_metric_alarm_check.changed'
+ - 'not ec2_instance_metric_alarm_check.alarm_arn'
+ - 'alarm_info_check.metric_alarms | length == 0'
+
+ - name: create ec2 metric alarm on ec2 instance
+ ec2_metric_alarm:
+ dimensions:
+ InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}'
+ state: present
+ name: '{{ alarm_full_name }}'
+ metric: CPUUtilization
+ namespace: AWS/EC2
+ treat_missing_data: missing
+ statistic: Average
+ comparison: LessThanOrEqualToThreshold
+ threshold: 5.0
+ period: 300
+ evaluation_periods: 3
+ unit: Percent
+ description: This will alarm when an instance's cpu usage average is lower than
+ 5% for 15 minutes
+ register: ec2_instance_metric_alarm
+
+ - name: get info on alarms
+ amazon.aws.cloudwatch_metric_alarm_info:
+ alarm_names:
+ - "{{ alarm_full_name }}"
+ register: alarm_info
+
+ - name: "verify that an alarm was created"
+ assert:
+ that:
+ - 'ec2_instance_metric_alarm.changed'
+ - 'ec2_instance_metric_alarm.alarm_arn'
+ - 'ec2_instance_metric_alarm.statistic == alarm_info.metric_alarms[0].statistic'
+ - 'ec2_instance_metric_alarm.name == alarm_info.metric_alarms[0].alarm_name'
+ - 'ec2_instance_metric_alarm.metric == alarm_info.metric_alarms[0].metric_name'
+ - 'ec2_instance_metric_alarm.namespace == alarm_info.metric_alarms[0].namespace'
+ - 'ec2_instance_metric_alarm.comparison == alarm_info.metric_alarms[0].comparison_operator'
+ - 'ec2_instance_metric_alarm.threshold == alarm_info.metric_alarms[0].threshold'
+ - 'ec2_instance_metric_alarm.period == alarm_info.metric_alarms[0].period'
+ - 'ec2_instance_metric_alarm.unit == alarm_info.metric_alarms[0].unit'
+ - 'ec2_instance_metric_alarm.evaluation_periods == alarm_info.metric_alarms[0].evaluation_periods'
+ - 'ec2_instance_metric_alarm.description == alarm_info.metric_alarms[0].alarm_description'
+ - 'ec2_instance_metric_alarm.treat_missing_data == alarm_info.metric_alarms[0].treat_missing_data'
+
+ - name: create ec2 metric alarm on ec2 instance (idempotent) (check mode)
+ ec2_metric_alarm:
+ dimensions:
+ InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}'
+ state: present
+ name: '{{ alarm_full_name }}'
+ metric: CPUUtilization
+ namespace: AWS/EC2
+ treat_missing_data: missing
+ statistic: Average
+ comparison: LessThanOrEqualToThreshold
+ threshold: 5.0
+ period: 300
+ evaluation_periods: 3
+ unit: Percent
+ description: This will alarm when an instance's cpu usage average is lower than
+ 5% for 15 minutes
+ check_mode: true
+ register: ec2_instance_metric_alarm_idempotent_check
+
+ - name: get info on alarms
+ amazon.aws.cloudwatch_metric_alarm_info:
+ alarm_names:
+ - "{{ alarm_full_name }}"
+ register: alarm_info_idempotent_check
+
+ - name: "Verify alarm does not register as changed after update in check mode"
+ assert:
+ that:
+ - not ec2_instance_metric_alarm_idempotent_check.changed
+
+ - name: "Verify alarm did not change after updating in check mode"
+ assert:
+ that:
+ - "alarm_info.metric_alarms[0]['{{item}}'] == alarm_info_idempotent_check.metric_alarms[0]['{{ item }}']"
+ with_items:
+ - alarm_arn
+ - statistic
+ - alarm_name
+ - metric_name
+ - namespace
+ - comparison_operator
+ - threshold
+ - period
+ - unit
+ - evaluation_periods
+ - alarm_description
+ - treat_missing_data
+
+ - name: create ec2 metric alarm on ec2 instance (idempotent)
+ ec2_metric_alarm:
+ dimensions:
+ InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}'
+ state: present
+ name: '{{ alarm_full_name }}'
+ metric: CPUUtilization
+ namespace: AWS/EC2
+ treat_missing_data: missing
+ statistic: Average
+ comparison: LessThanOrEqualToThreshold
+ threshold: 5.0
+ period: 300
+ evaluation_periods: 3
+ unit: Percent
+ description: This will alarm when an instance's cpu usage average is lower than
+ 5% for 15 minutes
+ register: ec2_instance_metric_alarm_idempotent
+
+ - name: get info on alarms
+ amazon.aws.cloudwatch_metric_alarm_info:
+ alarm_names:
+ - "{{ alarm_full_name }}"
+ register: alarm_info_idempotent_check
+
+ - name: "Verify alarm does not register as changed after update in check mode"
+ assert:
+ that:
+ - not ec2_instance_metric_alarm_idempotent_check.changed
+
+ - name: "Verify alarm did not change after updating in check mode"
+ assert:
+ that:
+ - "alarm_info.metric_alarms[0]['{{item}}'] == alarm_info_idempotent_check.metric_alarms[0]['{{ item }}']"
+ with_items:
+ - alarm_arn
+ - statistic
+ - alarm_name
+ - metric_name
+ - namespace
+ - comparison_operator
+ - threshold
+ - period
+ - unit
+ - evaluation_periods
+ - alarm_description
+ - treat_missing_data
+
+ - name: update alarm (check mode)
+ ec2_metric_alarm:
+ dimensions:
+ InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}'
+ state: present
+ name: '{{ alarm_full_name }}'
+ metric: CPUUtilization
+ namespace: AWS/EC2
+ statistic: Average
+ comparison: LessThanOrEqualToThreshold
+ threshold: 5.0
+ period: 60
+ evaluation_periods: 3
+ unit: Percent
+ description: This will alarm when an instance's cpu usage average is lower than
+ 5% for 3 minutes
+ check_mode: true
+ register: ec2_instance_metric_alarm_update_check
+
+ - name: verify that alarm registers as updated in check mode
+ assert:
+ that:
+ - ec2_instance_metric_alarm_check.changed
+
+ - name: verify that properties were not changed in check mode
+ assert:
+ that:
+ - ec2_instance_metric_alarm_update_check.changed
+ - 'ec2_instance_metric_alarm_update_check.period == alarm_info.metric_alarms[0].period' # Period of actual alarm should not change
+ - 'ec2_instance_metric_alarm_update_check.alarm_arn == ec2_instance_metric_alarm.alarm_arn'
+ - 'ec2_instance_metric_alarm_update_check.statistic == alarm_info.metric_alarms[0].statistic'
+ - 'ec2_instance_metric_alarm_update_check.name == alarm_info.metric_alarms[0].alarm_name'
+ - 'ec2_instance_metric_alarm_update_check.metric == alarm_info.metric_alarms[0].metric_name'
+ - 'ec2_instance_metric_alarm_update_check.namespace == alarm_info.metric_alarms[0].namespace'
+ - 'ec2_instance_metric_alarm_update_check.statistic == alarm_info.metric_alarms[0].statistic'
+ - 'ec2_instance_metric_alarm_update_check.comparison == alarm_info.metric_alarms[0].comparison_operator'
+ - 'ec2_instance_metric_alarm_update_check.threshold == alarm_info.metric_alarms[0].threshold'
+ - 'ec2_instance_metric_alarm_update_check.unit == alarm_info.metric_alarms[0].unit'
+ - 'ec2_instance_metric_alarm_update_check.evaluation_periods == alarm_info.metric_alarms[0].evaluation_periods'
+ - 'ec2_instance_metric_alarm_update_check.treat_missing_data == alarm_info.metric_alarms[0].treat_missing_data'
+
+ - name: update alarm
+ ec2_metric_alarm:
+ dimensions:
+ InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}'
+ state: present
+ name: '{{ alarm_full_name }}'
+ metric: CPUUtilization
+ namespace: AWS/EC2
+ statistic: Average
+ comparison: LessThanOrEqualToThreshold
+ threshold: 5.0
+ period: 60
+ evaluation_periods: 3
+ unit: Percent
+ description: This will alarm when an instance's cpu usage average is lower than
+ 5% for 3 minutes
+ register: ec2_instance_metric_alarm_update
+
+ - name: verify that alarm registers as updated
+ assert:
+ that:
+ - ec2_instance_metric_alarm.changed
+
+ - name: verify that properties were changed
+ assert:
+ that:
+ - ec2_instance_metric_alarm_update.changed
+ - ec2_instance_metric_alarm_update.period == 60 # Period should be 60, not matching old value
+ - ec2_instance_metric_alarm_update.alarm_arn == ec2_instance_metric_alarm.alarm_arn
+ - 'ec2_instance_metric_alarm_update.statistic == alarm_info.metric_alarms[0].statistic'
+ - 'ec2_instance_metric_alarm_update.name == alarm_info.metric_alarms[0].alarm_name'
+ - 'ec2_instance_metric_alarm_update.metric == alarm_info.metric_alarms[0].metric_name'
+ - 'ec2_instance_metric_alarm_update.namespace == alarm_info.metric_alarms[0].namespace'
+ - 'ec2_instance_metric_alarm_update.statistic == alarm_info.metric_alarms[0].statistic'
+ - 'ec2_instance_metric_alarm_update.comparison == alarm_info.metric_alarms[0].comparison_operator'
+ - 'ec2_instance_metric_alarm_update.threshold == alarm_info.metric_alarms[0].threshold'
+ - 'ec2_instance_metric_alarm_update.unit == alarm_info.metric_alarms[0].unit'
+ - 'ec2_instance_metric_alarm_update.evaluation_periods == alarm_info.metric_alarms[0].evaluation_periods'
+ - 'ec2_instance_metric_alarm_update.treat_missing_data == alarm_info.metric_alarms[0].treat_missing_data'
+
+ - name: try to remove the alarm (check mode)
+ ec2_metric_alarm:
+ state: absent
+ name: '{{ alarm_full_name }}'
+ check_mode: true
+ register: ec2_instance_metric_alarm_deletion_check
+
+ - name: Verify that the alarm reports deleted/changed
+ assert:
+ that:
+ - ec2_instance_metric_alarm_deletion_check.changed
+
+ - name: get info on alarms
+ amazon.aws.cloudwatch_metric_alarm_info:
+ alarm_names:
+ - "{{ alarm_full_name }}"
+ register: alarm_info_query_check
+
+ - name: Verify that the alarm was not deleted in check mode using cli
+ assert:
+ that:
+ - 'alarm_info.metric_alarms | length > 0'
+
+ - name: try to remove the alarm
+ ec2_metric_alarm:
+ state: absent
+ name: '{{ alarm_full_name }}'
+ register: ec2_instance_metric_alarm_deletion
+
+ - name: Verify that the alarm reports deleted/changed
+ assert:
+ that:
+ - ec2_instance_metric_alarm_deletion.changed
+
+ - name: get info on alarms
+ amazon.aws.cloudwatch_metric_alarm_info:
+ alarm_names:
+ - "{{ alarm_full_name }}"
+ register: alarm_info
+
+ - name: Verify that the alarm was deleted using cli
+ assert:
+ that:
+ - 'alarm_info.metric_alarms | length == 0'
+
+ - name: create ec2 metric alarm with no unit on ec2 instance
+ ec2_metric_alarm:
+ dimensions:
+ InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}'
+ state: present
+ name: '{{ alarm_full_name }}'
+ metric: CPUUtilization
+ namespace: AWS/EC2
+ treat_missing_data: missing
+ statistic: Average
+ comparison: LessThanOrEqualToThreshold
+ threshold: 5.0
+ period: 300
+ evaluation_periods: 3
+ description: This will alarm when an instance's cpu usage average is lower than
+ 5% for 15 minutes
+ register: ec2_instance_metric_alarm_no_unit
+
+ - name: get info on alarms
+ amazon.aws.cloudwatch_metric_alarm_info:
+ alarm_names:
+ - "{{ alarm_full_name }}"
+ register: alarm_info_no_unit
+
+ - name: verify that an alarm was created
+ assert:
+ that:
+ - ec2_instance_metric_alarm_no_unit.changed
+ - ec2_instance_metric_alarm_no_unit.alarm_arn
+ - 'ec2_instance_metric_alarm_no_unit.statistic == alarm_info_no_unit.metric_alarms[0].statistic'
+ - 'ec2_instance_metric_alarm_no_unit.name == alarm_info_no_unit.metric_alarms[0].alarm_name'
+ - 'ec2_instance_metric_alarm_no_unit.metric == alarm_info_no_unit.metric_alarms[0].metric_name'
+ - 'ec2_instance_metric_alarm_no_unit.namespace == alarm_info_no_unit.metric_alarms[0].namespace'
+ - 'ec2_instance_metric_alarm_no_unit.comparison == alarm_info_no_unit.metric_alarms[0].comparison_operator'
+ - 'ec2_instance_metric_alarm_no_unit.threshold == alarm_info_no_unit.metric_alarms[0].threshold'
+ - 'ec2_instance_metric_alarm_no_unit.period == alarm_info_no_unit.metric_alarms[0].period'
+ - 'alarm_info_no_unit.metric_alarms[0].Unit is not defined'
+ - 'ec2_instance_metric_alarm_no_unit.evaluation_periods == alarm_info_no_unit.metric_alarms[0].evaluation_periods'
+ - 'ec2_instance_metric_alarm_no_unit.description == alarm_info_no_unit.metric_alarms[0].alarm_description'
+ - 'ec2_instance_metric_alarm_no_unit.treat_missing_data == alarm_info_no_unit.metric_alarms[0].treat_missing_data'
+
+ - name: try to remove the alarm
+ ec2_metric_alarm:
+ state: absent
+ name: '{{ alarm_full_name }}'
+ register: ec2_instance_metric_alarm_deletion
+
+ - name: Verify that the alarm reports deleted/changed
+ assert:
+ that:
+ - ec2_instance_metric_alarm_deletion.changed
+
+ - name: get info on alarms
+ amazon.aws.cloudwatch_metric_alarm_info:
+ alarm_names:
+ - "{{ alarm_full_name }}"
+ register: alarm_info
+
+ - name: Verify that the alarm was deleted using cli
+ assert:
+ that:
+ - 'alarm_info.metric_alarms | length == 0'
+
+ - name: create ec2 metric alarm with metrics
+ ec2_metric_alarm:
+ state: present
+ name: '{{ alarm_full_name }}'
+ treat_missing_data: missing
+ comparison: LessThanOrEqualToThreshold
+ threshold: 5.0
+ evaluation_periods: 3
+ description: This will alarm when an instance's cpu usage average is lower than
+ 5% for 15 minutes
+ metrics:
+ - id: cpu
+ metric_stat:
+ metric:
+ dimensions:
+ - name: "InstanceId"
+ value: "{{ ec2_instance_results.instances[0].instance_id }}"
+ metric_name: "CPUUtilization"
+ namespace: "AWS/EC2"
+ period: 300
+ stat: "Average"
+ unit: "Percent"
+ return_data: true
+ register: ec2_instance_metric_alarm_metrics
+
+ - name: get info on alarms
+ amazon.aws.cloudwatch_metric_alarm_info:
+ alarm_names:
+ - "{{ alarm_full_name }}"
+ register: alarm_info_metrics
+
+ - name: verify that an alarm was created
+ assert:
+ that:
+ - ec2_instance_metric_alarm_metrics.changed
+ - ec2_instance_metric_alarm_metrics.alarm_arn
+ - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.stat == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.stat'
+ - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.metric.namespace == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.metric.namespace'
+ - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.metric.metric_name == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.metric.metric_name'
+ - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.metric.dimensions[0].name == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.metric.dimensions[0].name'
+ - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.metric.dimensions[0].value == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.metric.dimensions[0].value'
+ - 'ec2_instance_metric_alarm_metrics.metrics[0].id == alarm_info_metrics.metric_alarms[0].metrics[0].id'
+ - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.period == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.period'
+ - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.unit == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.unit'
+ - 'ec2_instance_metric_alarm_metrics.metrics[0].return_data == alarm_info_metrics.metric_alarms[0].metrics[0].return_data'
+
+
+ - name: try to remove the alarm
+ ec2_metric_alarm:
+ state: absent
+ name: '{{ alarm_full_name }}'
+ register: ec2_instance_metric_alarm_deletion_no_unit
+
+ - name: Verify that the alarm reports deleted/changed
+ assert:
+ that:
+ - ec2_instance_metric_alarm_deletion_no_unit.changed
+
+ - name: get info on alarms
+ amazon.aws.cloudwatch_metric_alarm_info:
+ alarm_names:
+ - "{{ alarm_full_name }}"
+ register: alarm_info_no_unit
+
+ - name: Verify that the alarm was deleted using cli
+ assert:
+ that:
+ - 'alarm_info_no_unit.metric_alarms | length == 0'
+
+ - name: create ec2 metric alarm by providing mutually exclusive values
+ ec2_metric_alarm:
+ dimensions:
+ InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}'
+ state: present
+ name: '{{ alarm_full_name }}'
+ metric: CPUUtilization
+ namespace: AWS/EC2
+ treat_missing_data: missing
+ statistic: Average
+ comparison: LessThanOrEqualToThreshold
+ threshold: 5.0
+ period: 300
+ evaluation_periods: 3
+ description: This will alarm when an instance's cpu usage average is lower than
+ 5% for 15 minutes
+ metrics:
+ - id: cpu
+ metric_stat:
+ metric:
+ dimensions:
+ - name: "InstanceId"
+ value: "{{ ec2_instance_results.instances[0].instance_id }}"
+ metric_name: "CPUUtilization"
+ namespace: "AWS/EC2"
+ period: 300
+ stat: "Average"
+ unit: "Percent"
+ return_data: true
+ register: ec2_instance_metric_mutually_exclusive
+ ignore_errors: true
+
+ - assert:
+ that:
+ - ec2_instance_metric_mutually_exclusive.failed
+ - '"parameters are mutually exclusive" in ec2_instance_metric_mutually_exclusive.msg'
+
+ always:
+ - name: try to delete the alarm
+ ec2_metric_alarm:
+ state: absent
+ name: '{{ alarm_full_name }}'
+ ignore_errors: true
+
+ - name: try to stop the ec2 instance
+ ec2_instance:
+ instance_ids: '{{ ec2_instance_results.instances[0].instance_id }}'
+ state: terminated
+ ignore_errors: true
+
+ - include_tasks: env_cleanup.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/aliases b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/defaults/main.yml
new file mode 100644
index 000000000..3b6964ade
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+name_pattern: "cloudwatch_event_rule-{{ tiny_prefix }}"
+
+test_event_names:
+ - "{{ name_pattern }}-1"
+ - "{{ name_pattern }}-2"
+
+input_transformer_event_name: "{{ name_pattern }}-3"
+input_event_name: "{{ name_pattern }}-4"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml
new file mode 100644
index 000000000..0047831a7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml
@@ -0,0 +1,96 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+
+ block:
+ - name: Create SNS topic
+ sns_topic:
+ name: "TestSNSTopic"
+ state: present
+ display_name: "Test SNS Topic"
+ register: sns_topic_output
+
+ - name: Create classic cloudwatch event rules
+ cloudwatchevent_rule:
+ name: "{{ item }}"
+ description: "Rule for {{ item }}"
+ state: present
+ schedule_expression: "cron(0 20 * * ? *)"
+ targets:
+ - id: "{{ sns_topic_output.sns_topic.name }}"
+ arn: "{{ sns_topic_output.sns_topic.topic_arn }}"
+ register: event_rules_classic_output
+ loop: "{{ test_event_names }}"
+
+ - name: Assert that classic event rules were created
+ assert:
+ that:
+ - event_rules_classic_output.changed
+ - event_rules_classic_output.msg == "All items completed"
+
+ - name: Create cloudwatch event rule with input transformer
+ cloudwatchevent_rule:
+ name: "{{ input_transformer_event_name }}"
+ description: "Event rule with input transformer configuration"
+ state: present
+ event_pattern: '{"source":["aws.ec2"],"detail-type":["EC2 Instance State-change Notification"],"detail":{"state":["pending"]}}'
+ targets:
+ - id: "{{ sns_topic_output.sns_topic.name }}"
+ arn: "{{ sns_topic_output.sns_topic.topic_arn }}"
+ input_transformer:
+ input_paths_map:
+ instance: "$.detail.instance-id"
+ state: "$.detail.state"
+ input_template: "<instance> is in state <state>"
+ register: event_rule_input_transformer_output
+
+ - name: Assert that input transformer event rule was created
+ assert:
+ that:
+ - event_rule_input_transformer_output.changed
+
+ - name: Create cloudwatch event rule with inputs
+ cloudwatchevent_rule:
+ name: "{{ input_event_name }}"
+ description: "Event rule with input configuration"
+ state: present
+ event_pattern: '{"source":["aws.ec2"],"detail-type":["EC2 Instance State-change Notification"],"detail":{"state":["pending"]}}'
+ targets:
+ - id: "{{ sns_topic_output.sns_topic.name }}"
+ arn: "{{ sns_topic_output.sns_topic.topic_arn }}"
+ input: 'Hello World'
+ - id: "{{ sns_topic_output.sns_topic.name }}2"
+ arn: "{{ sns_topic_output.sns_topic.topic_arn }}"
+ input:
+ start: 'Hello World'
+ end: 'Goodbye oh cruel World'
+ register: event_rule_input_transformer_output
+
+ - name: Assert that input transformer event rule was created
+ assert:
+ that:
+ - event_rule_input_transformer_output.changed
+
+ always:
+
+ - name: Delete classic CloudWatch event rules
+ cloudwatchevent_rule:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ test_event_names }}"
+
+ - name: Delete input transformer CloudWatch event rules
+ cloudwatchevent_rule:
+ name: "{{ item }}"
+ state: absent
+ loop:
+ - "{{ input_transformer_event_name }}"
+ - "{{ input_event_name }}"
+
+ - name: Delete SNS topic
+ sns_topic:
+ name: "TestSNSTopic"
+ state: absent
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/aliases b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/aliases
new file mode 100644
index 000000000..f289eb392
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/aliases
@@ -0,0 +1,5 @@
+cloud/aws
+
+cloudwatchlogs_log_group
+cloudwatchlogs_log_group_info
+cloudwatchlogs_log_group_metric_filter
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/defaults/main.yml
new file mode 100644
index 000000000..178ae143f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/defaults/main.yml
@@ -0,0 +1,2 @@
+log_group_name: '{{ resource_prefix }}/integrationtest'
+filter_name: '{{ resource_prefix }}/AnsibleTest'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/cloudwatchlogs_tests.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/cloudwatchlogs_tests.yml
new file mode 100644
index 000000000..00545385a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/cloudwatchlogs_tests.yml
@@ -0,0 +1,151 @@
+# Tests for changes to the cloudwatchlogs_log_group and cloudwatchlogs_log_group_metric_filter
+
+- block:
+
+ - name: create cloudwatch log group for integration test
+ cloudwatchlogs_log_group:
+ state: present
+ log_group_name: '{{ log_group_name }}'
+ retention: 1
+
+ - name: check_mode set metric filter on '{{ log_group_name }}'
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ filter_pattern: '{ ($.value = *) && ($.hostname = "box")}'
+ state: present
+ metric_transformation:
+ metric_name: box_free_space
+ metric_namespace: fluentd_metrics
+ metric_value: $.value
+ check_mode: yes
+ register: out
+
+ - name: check_mode state must be changed
+ assert:
+ that:
+ - out is changed
+ - out.metric_filters | count == 1
+
+ - name: set metric filter on '{{ log_group_name }}'
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ filter_pattern: '{ ($.value = *) && ($.hostname = "box")}'
+ state: present
+ metric_transformation:
+ metric_name: box_free_space
+ metric_namespace: fluentd_metrics
+ metric_value: $.value
+ register: out
+
+ - name: create metric filter
+ assert:
+ that:
+ - out is changed
+ - out.metric_filters | count == 1
+
+ - name: re-set metric filter on '{{ log_group_name }}'
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ filter_pattern: '{ ($.value = *) && ($.hostname = "box")}'
+ state: present
+ metric_transformation:
+ metric_name: box_free_space
+ metric_namespace: fluentd_metrics
+ metric_value: $.value
+ register: out
+
+ - name: metric filter must not change
+ assert:
+ that:
+ - out is not changed
+
+ - name: update metric transformation on '{{ log_group_name }}'
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ filter_pattern: '{ ($.value = *) && ($.hostname = "box")}'
+ state: present
+ metric_transformation:
+ metric_name: box_free_space
+ metric_namespace: made_with_ansible
+ metric_value: $.value
+ default_value: 3.1415
+ register: out
+
+ - name: update metric filter
+ assert:
+ that:
+ - out is changed
+ - out.metric_filters[0].metric_namespace == "made_with_ansible"
+ - out.metric_filters[0].default_value == 3.1415
+
+ - name: update filter_pattern on '{{ log_group_name }}'
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ filter_pattern: '{ ($.value = *) && ($.hostname = "ansible")}'
+ state: present
+ metric_transformation:
+ metric_name: box_free_space
+ metric_namespace: made_with_ansible
+ metric_value: $.value
+ register: out
+
+ - name: update metric filter
+ assert:
+ that:
+ - out is changed
+ - out.metric_filters[0].metric_namespace == "made_with_ansible"
+
+ - name: checkmode delete metric filter on '{{ log_group_name }}'
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ state: absent
+ check_mode: yes
+ register: out
+
+ - name: check_mode state must be changed
+ assert:
+ that:
+ - out is changed
+
+ - name: delete metric filter on '{{ log_group_name }}'
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ state: absent
+ register: out
+
+ - name: delete metric filter
+ assert:
+ that:
+ - out is changed
+
+ - name: delete metric filter on '{{ log_group_name }}' which does not exist
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ state: absent
+ register: out
+
+ - name: delete metric filter
+ assert:
+ that:
+ - out is not changed
+
+ always:
+ - name: delete metric filter
+ cloudwatchlogs_log_group_metric_filter:
+ log_group_name: '{{ log_group_name }}'
+ filter_name: '{{ filter_name }}'
+ state: absent
+
+ - name: delete cloudwatch log group for integration test
+ cloudwatchlogs_log_group:
+ state: absent
+ log_group_name: '{{ log_group_name }}'
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/create-delete-tags.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/create-delete-tags.yml
new file mode 100644
index 000000000..b6f1da59e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/create-delete-tags.yml
@@ -0,0 +1,444 @@
+# Tests relating to create/delete and set tags on cloudwatchlogs_log_group
+
+- name: Tests relating to setting tags on cloudwatchlogs_log_group
+ vars:
+ first_tags:
+ Key with Spaces: Value with spaces
+ CamelCaseKey: CamelCaseValue
+ pascalCaseKey: pascalCaseValue
+ snake_case_key: snake_case_value
+ second_tags:
+ New Key with Spaces: Value with spaces
+ NewCamelCaseKey: CamelCaseValue
+ newPascalCaseKey: pascalCaseValue
+ new_snake_case_key: snake_case_value
+ third_tags:
+ Key with Spaces: Value with spaces
+ CamelCaseKey: CamelCaseValue
+ pascalCaseKey: pascalCaseValue
+ snake_case_key: snake_case_value
+ New Key with Spaces: Updated Value with spaces
+ final_tags:
+ Key with Spaces: Value with spaces
+ CamelCaseKey: CamelCaseValue
+ pascalCaseKey: pascalCaseValue
+ snake_case_key: snake_case_value
+ New Key with Spaces: Updated Value with spaces
+ NewCamelCaseKey: CamelCaseValue
+ newPascalCaseKey: pascalCaseValue
+ new_snake_case_key: snake_case_value
+ # Mandatory settings
+ module_defaults:
+ amazon.aws.cloudwatchlogs_log_group:
+ state: present
+ log_group_name: '{{ log_group_name }}'
+ amazon.aws.cloudwatchlogs_log_group_info:
+ log_group_name: '{{ log_group_name }}'
+ block:
+
+ - name: create cloudwatch log group for integration test (check_mode)
+ cloudwatchlogs_log_group:
+ state: present
+ log_group_name: '{{ log_group_name }}'
+ retention: 1
+ tags:
+ CamelCase: Value
+ snake_case: value
+ check_mode: true
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - '"log_groups" not in result'
+ - '"logs:CreateLogGroup" not in result.resource_actions'
+
+ - name: create cloudwatch log group for integration test
+ cloudwatchlogs_log_group:
+ state: present
+ log_group_name: '{{ log_group_name }}'
+ retention: 1
+ tags:
+ CamelCase: Value
+ snake_case: value
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - '"log_groups" in result'
+ - result.log_groups | length == 1
+ - '"log_group_name" in log_group'
+ - '"creation_time" in log_group'
+ - '"retention_in_days" in log_group'
+ - '"metric_filter_count" in log_group'
+ - '"arn" in log_group'
+ - '"stored_bytes" in log_group'
+ # - '"kms_key_id" in log_group'
+ # pre-4.0.0 upgrade compatibility
+ - '"log_group_name" in result'
+ - '"creation_time" in result'
+ - '"retention_in_days" in result'
+ - '"metric_filter_count" in result'
+ - '"arn" in result'
+ - '"stored_bytes" in result'
+ # - '"kms_key_id" in result'
+ - '"CamelCase" in log_group.tags'
+ - '"snake_case" in log_group.tags'
+ vars:
+ log_group: '{{ result.log_groups[0] }}'
+
+ - name: create cloudwatch log group for integration test (check_mode - idempotent)
+ cloudwatchlogs_log_group:
+ state: present
+ log_group_name: '{{ log_group_name }}'
+ retention: 1
+ check_mode: true
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - '"log_groups" in result'
+ - result.log_groups | length == 1
+
+ - name: create cloudwatch log group for integration test (idempotent)
+ cloudwatchlogs_log_group:
+ state: present
+ log_group_name: '{{ log_group_name }}'
+ retention: 1
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - '"log_groups" in result'
+ - result.log_groups | length == 1
+ vars:
+ log_group: '{{ result.log_groups[0] }}'
+
+ - name: describe all log groups
+ cloudwatchlogs_log_group_info: {}
+ register: result
+
+ - assert:
+ that:
+ - '"log_groups" in result'
+ - result.log_groups | length >= 1
+
+ - name: describe log group
+ cloudwatchlogs_log_group_info:
+ log_group_name: '{{ log_group_name }}'
+ register: result
+
+ - assert:
+ that:
+ - '"log_groups" in result'
+ - result.log_groups | length == 1
+ - '"log_group_name" in log_group'
+ - '"creation_time" in log_group'
+ - '"retention_in_days" in log_group'
+ - '"metric_filter_count" in log_group'
+ - '"arn" in log_group'
+ - '"stored_bytes" in log_group'
+ # - '"kms_key_id" in log_group'
+ - '"tags" in log_group'
+ vars:
+ log_group: '{{ result.log_groups[0] }}'
+ - name: test adding tags to cloudwatchlogs_log_group (check_mode)
+ cloudwatchlogs_log_group:
+ tags: '{{ first_tags }}'
+ purge_tags: true
+ check_mode: true
+ register: update_result
+
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+ - '"logs:UntagLogGroup" not in update_result'
+ - '"logs:TagLogGroup" not in update_result'
+
+ - name: test adding tags to cloudwatchlogs_log_group
+ cloudwatchlogs_log_group:
+ tags: '{{ first_tags }}'
+ purge_tags: true
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+ - update_result.log_groups[0].tags == first_tags
+
+ - name: test adding tags to cloudwatchlogs_log_group - idempotency (check mode)
+ cloudwatchlogs_log_group:
+ tags: '{{ first_tags }}'
+ purge_tags: true
+ check_mode: true
+ register: update_result
+
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+ - '"logs:UntagLogGroup" not in update_result'
+ - '"logs:TagLogGroup" not in update_result'
+
+ - name: test adding tags to cloudwatchlogs_log_group - idempotency
+ cloudwatchlogs_log_group:
+ tags: '{{ first_tags }}'
+ purge_tags: true
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.log_groups[0].tags == first_tags
+
+ ###
+
+ - name: test updating tags with purge on cloudwatchlogs_log_group (check mode)
+ cloudwatchlogs_log_group:
+ tags: '{{ second_tags }}'
+ purge_tags: true
+ check_mode: true
+ register: update_result
+
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+ - '"logs:UntagLogGroup" not in update_result'
+ - '"logs:TagLogGroup" not in update_result'
+
+ - name: test updating tags with purge on cloudwatchlogs_log_group
+ cloudwatchlogs_log_group:
+ tags: '{{ second_tags }}'
+ purge_tags: true
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+ - update_result.log_groups[0].tags == second_tags
+
+ - name: test updating tags with purge on cloudwatchlogs_log_group - idempotency
+ (check mode)
+ cloudwatchlogs_log_group:
+ tags: '{{ second_tags }}'
+ purge_tags: true
+ check_mode: true
+ register: update_result
+
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+ - '"logs:UntagLogGroup" not in update_result'
+ - '"logs:TagLogGroup" not in update_result'
+
+ - name: test updating tags with purge on cloudwatchlogs_log_group - idempotency
+ cloudwatchlogs_log_group:
+ tags: '{{ second_tags }}'
+ purge_tags: true
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.log_groups[0].tags == second_tags
+
+ ###
+
+ - name: test updating tags without purge on cloudwatchlogs_log_group (check mode)
+ cloudwatchlogs_log_group:
+ tags: '{{ third_tags }}'
+ purge_tags: false
+ check_mode: true
+ register: update_result
+
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+ - '"logs:UntagLogGroup" not in update_result'
+ - '"logs:TagLogGroup" not in update_result'
+
+ - name: test updating tags without purge on cloudwatchlogs_log_group
+ cloudwatchlogs_log_group:
+ tags: '{{ third_tags }}'
+ purge_tags: false
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+ - update_result.log_groups[0].tags == final_tags
+
+ - name: test updating tags without purge on cloudwatchlogs_log_group - idempotency
+ (check mode)
+ cloudwatchlogs_log_group:
+ tags: '{{ third_tags }}'
+ purge_tags: false
+ check_mode: true
+ register: update_result
+
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+ - '"logs:UntagLogGroup" not in update_result'
+ - '"logs:TagLogGroup" not in update_result'
+
+ - name: test updating tags without purge on cloudwatchlogs_log_group - idempotency
+ cloudwatchlogs_log_group:
+ tags: '{{ third_tags }}'
+ purge_tags: false
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.log_groups[0].tags == final_tags
+
+ ###
+
+ - name: test that cloudwatchlogs_log_group_info returns the tags
+ cloudwatchlogs_log_group_info:
+ register: tag_info
+ - name: assert tags present
+ assert:
+ that:
+ - tag_info.log_groups | length == 1
+ - tag_info.log_groups[0].tags == final_tags
+
+ ###
+
+ - name: test no tags param cloudwatchlogs_log_group (check mode)
+ cloudwatchlogs_log_group: {}
+ check_mode: true
+ register: update_result
+
+ - name: assert no change
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.log_groups[0].tags == final_tags
+
+ - name: test no tags param cloudwatchlogs_log_group
+ cloudwatchlogs_log_group: {}
+ register: update_result
+ - name: assert no change
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.log_groups[0].tags == final_tags
+
+ ###
+
+ - name: test removing tags from cloudwatchlogs_log_group (check mode)
+ cloudwatchlogs_log_group:
+ tags: {}
+ purge_tags: true
+ check_mode: true
+ register: update_result
+
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+
+ - name: test removing tags from cloudwatchlogs_log_group
+ cloudwatchlogs_log_group:
+ tags: {}
+ purge_tags: true
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+ - update_result.log_groups[0].tags == {}
+
+ - name: test removing tags from cloudwatchlogs_log_group - idempotency (check mode)
+ cloudwatchlogs_log_group:
+ tags: {}
+ purge_tags: true
+ check_mode: true
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+
+ - name: test removing tags from cloudwatchlogs_log_group - idempotency
+ cloudwatchlogs_log_group:
+ tags: {}
+ purge_tags: true
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.log_groups[0].tags == {}
+
+ - name: delete cloudwatch log group for integration test (check_mode)
+ cloudwatchlogs_log_group:
+ state: absent
+ log_group_name: '{{ log_group_name }}'
+ check_mode: true
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - '"logs:DeleteLogGroup" not in result.resource_actions'
+
+ - name: delete cloudwatch log group for integration test
+ cloudwatchlogs_log_group:
+ state: absent
+ log_group_name: '{{ log_group_name }}'
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: delete cloudwatch log group for integration test (check_mode - idempotent)
+ cloudwatchlogs_log_group:
+ state: absent
+ log_group_name: '{{ log_group_name }}'
+ check_mode: true
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - '"logs:DeleteLogGroup" not in result.resource_actions'
+
+ - name: delete cloudwatch log group for integration test (idempotent)
+ cloudwatchlogs_log_group:
+ state: absent
+ log_group_name: '{{ log_group_name }}'
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: describe missing log group
+ cloudwatchlogs_log_group_info:
+ log_group_name: '{{ log_group_name }}'
+ register: result
+
+ - assert:
+ that:
+ - '"log_groups" in result'
+ - result.log_groups | length == 0
+
+ always:
+
+ - name: delete cloudwatch log group for integration test
+ cloudwatchlogs_log_group:
+ state: absent
+ log_group_name: '{{ log_group_name }}'
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/main.yml
new file mode 100644
index 000000000..e5e0f072b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/main.yml
@@ -0,0 +1,16 @@
+# Tests for cloudwatchlogs_log_group, cloudwatchlogs_log_group_info, and cloudwatchlogs_log_group_metric_filter modules
+
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+
+ block:
+
+ - name: Run tests for changes to the cloudwatchlogs_log_group and cloudwatchlogs_log_group_metric_filter
+ include_tasks: cloudwatchlogs_tests.yml
+
+ - name: Run tests relating to create/delete and set tags on cloudwatchlogs_log_group
+ include_tasks: create-delete-tags.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/aliases
new file mode 100644
index 000000000..9b0b03cbf
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/aliases
@@ -0,0 +1,5 @@
+# duration: 15
+slow
+
+cloud/aws
+ec2_ami_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/defaults/main.yml
new file mode 100644
index 000000000..8dd565191
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+availability_zone: '{{ ec2_availability_zone_names[0] }}'
+
+# defaults file for test_ec2_ami
+ec2_ami_name: '{{resource_prefix}}'
+ec2_ami_description: 'Created by ansible integration tests'
+
+ec2_ami_image: '{{ ec2_ami_id }}'
+
+vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16'
+subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/meta/main.yml
new file mode 100644
index 000000000..3dc000aba
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/meta/main.yml
@@ -0,0 +1,5 @@
+dependencies:
+ - setup_ec2_facts
+ - role: setup_botocore_pip
+ vars:
+ botocore_version: '1.26.0' \ No newline at end of file
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/tasks/main.yml
new file mode 100644
index 000000000..3bfbcbf13
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/tasks/main.yml
@@ -0,0 +1,786 @@
+---
+# Test suite for ec2_ami
+- module_defaults:
+ group/aws:
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ collections:
+ - amazon.aws
+ block:
+
+ # AWS CLI is needed until there's a module to get instance uefi data
+ - name: Install AWS CLI
+ pip:
+ name: awscli==1.25.83
+ state: present
+
+ # ============================================================
+
+ # SETUP: vpc, ec2 key pair, subnet, security group, ec2 instance, snapshot
+ - name: create a VPC to work in
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ state: present
+ name: '{{ ec2_ami_name }}_setup'
+ resource_tags:
+ Name: '{{ ec2_ami_name }}_setup'
+ register: setup_vpc
+
+ - name: create a key pair to use for creating an ec2 instance
+ ec2_key:
+ name: '{{ ec2_ami_name }}_setup'
+ state: present
+ register: setup_key
+
+ - name: create a subnet to use for creating an ec2 instance
+ ec2_vpc_subnet:
+ az: '{{ availability_zone }}'
+ tags: '{{ ec2_ami_name }}_setup'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: '{{ subnet_cidr }}'
+ state: present
+ resource_tags:
+ Name: '{{ ec2_ami_name }}_setup'
+ register: setup_subnet
+
+ - name: create a security group to use for creating an ec2 instance
+ ec2_group:
+ name: '{{ ec2_ami_name }}_setup'
+ description: 'created by Ansible integration tests'
+ state: present
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ register: setup_sg
+
+ - name: provision ec2 instance to create an image
+ ec2_instance:
+ state: running
+ key_name: '{{ setup_key.key.name }}'
+ instance_type: t2.micro
+ image_id: '{{ ec2_ami_id }}'
+ tags:
+ '{{ec2_ami_name}}_instance_setup': 'integration_tests'
+ security_group: '{{ setup_sg.group_id }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+ volumes:
+ - device_name: /dev/sdc
+ virtual_name: ephemeral1
+ wait: yes
+ register: setup_instance
+
+ - name: Store EC2 Instance ID
+ set_fact:
+ ec2_instance_id: '{{ setup_instance.instances[0].instance_id }}'
+
+ - name: take a snapshot of the instance to create an image
+ ec2_snapshot:
+ instance_id: '{{ ec2_instance_id }}'
+ device_name: '{{ ec2_ami_root_disk }}'
+ state: present
+ register: setup_snapshot
+
+ # note: the current CI supported instance types (t2, t3, m1) do not support uefi boot mode + tpm_support
+ # disabling the task as aws documentation states that support for t3 will be coming soon
+ # - name: get instance UEFI data
+ # command: aws ec2 get-instance-uefi-data --instance-id {{ ec2_instance_id }} --region {{ aws_region }}
+ # environment:
+ # AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
+ # AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
+ # AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
+ # AWS_DEFAULT_REGION: "{{ aws_region }}"
+ # register: instance_uefi_data_output
+
+ # - name: Convert it to an object
+ # set_fact:
+ # instance_uefi_data: "{{ instance_uefi_data_output.stdout | from_json }}"
+
+ # ============================================================
+
+ - name: test clean failure if not providing image_id or name with state=present
+ ec2_ami:
+ instance_id: '{{ ec2_instance_id }}'
+ state: present
+ description: '{{ ec2_ami_description }}'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ wait: yes
+ root_device_name: '{{ ec2_ami_root_disk }}'
+ register: result
+ ignore_errors: yes
+
+ - name: assert error message is helpful
+ assert:
+ that:
+ - result.failed
+ - "result.msg == 'one of the following is required: name, image_id'"
+
+ # ============================================================
+
+ - name: create an image from the instance (check mode)
+ ec2_ami:
+ instance_id: '{{ ec2_instance_id }}'
+ state: present
+ name: '{{ ec2_ami_name }}_ami'
+ description: '{{ ec2_ami_description }}'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ wait: yes
+ root_device_name: '{{ ec2_ami_root_disk }}'
+ check_mode: true
+ register: check_mode_result
+
+ - name: assert that check_mode result is changed
+ assert:
+ that:
+ - check_mode_result is changed
+
+ - name: create an image from the instance
+ ec2_ami:
+ instance_id: '{{ ec2_instance_id }}'
+ state: present
+ name: '{{ ec2_ami_name }}_ami'
+ description: '{{ ec2_ami_description }}'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ wait: yes
+ root_device_name: '{{ ec2_ami_root_disk }}'
+ register: result
+
+ - name: set image id fact for deletion later
+ set_fact:
+ ec2_ami_image_id: "{{ result.image_id }}"
+
+ - name: assert that image has been created
+ assert:
+ that:
+ - "result.changed"
+ - "result.image_id.startswith('ami-')"
+ - "'Name' in result.tags and result.tags.Name == ec2_ami_name + '_ami'"
+
+ - name: get related snapshot info and ensure the tags have been propagated
+ ec2_snapshot_info:
+ snapshot_ids:
+ - "{{ result.block_device_mapping[ec2_ami_root_disk].snapshot_id }}"
+ register: snapshot_result
+
+ - name: ensure the tags have been propagated to the snapshot
+ assert:
+ that:
+ - "'tags' in snapshot_result.snapshots[0]"
+ - "'Name' in snapshot_result.snapshots[0].tags and snapshot_result.snapshots[0].tags.Name == ec2_ami_name + '_ami'"
+
+ # ============================================================
+
+ - name: create an image from the instance with attached devices with no_device true (check mode)
+ ec2_ami:
+ name: '{{ ec2_ami_name }}_no_device_true_ami'
+ instance_id: '{{ ec2_instance_id }}'
+ device_mapping:
+ - device_name: /dev/sda1
+ volume_size: 10
+ delete_on_termination: true
+ volume_type: gp2
+ - device_name: /dev/sdf
+ no_device: yes
+ state: present
+ wait: yes
+ root_device_name: '{{ ec2_ami_root_disk }}'
+ check_mode: true
+ register: check_mode_result
+
+ - name: assert that check_mode result is changed
+ assert:
+ that:
+ - check_mode_result is changed
+
+ - name: create an image from the instance with attached devices with no_device true
+ ec2_ami:
+ name: '{{ ec2_ami_name }}_no_device_true_ami'
+ instance_id: '{{ ec2_instance_id }}'
+ device_mapping:
+ - device_name: /dev/sda1
+ volume_size: 10
+ delete_on_termination: true
+ volume_type: gp2
+ - device_name: /dev/sdf
+ no_device: yes
+ state: present
+ wait: yes
+ root_device_name: '{{ ec2_ami_root_disk }}'
+ register: result_no_device_true
+
+ - name: set image id fact for deletion later
+ set_fact:
+ ec2_ami_no_device_true_image_id: "{{ result_no_device_true.image_id }}"
+
+ - name: assert that image with no_device option yes has been created
+ assert:
+ that:
+ - "result_no_device_true.changed"
+ - "'/dev/sdf' not in result_no_device_true.block_device_mapping"
+
+ - name: create an image from the instance with attached devices with no_device false
+ ec2_ami:
+ name: '{{ ec2_ami_name }}_no_device_false_ami'
+ instance_id: '{{ ec2_instance_id }}'
+ device_mapping:
+ - device_name: /dev/sda1
+ volume_size: 10
+ delete_on_termination: true
+ volume_type: gp2
+ no_device: no
+ state: present
+ wait: yes
+ root_device_name: '{{ ec2_ami_root_disk }}'
+ register: result_no_device_false
+
+ - name: set image id fact for deletion later
+ set_fact:
+ ec2_ami_no_device_false_image_id: "{{ result_no_device_false.image_id }}"
+
+ - name: assert that image with no_device option no has been created
+ assert:
+ that:
+ - "result_no_device_false.changed"
+ - "'/dev/sda1' in result_no_device_false.block_device_mapping"
+
+ # ============================================================
+
+ - name: gather facts about the image created
+ ec2_ami_info:
+ image_ids: '{{ ec2_ami_image_id }}'
+ register: ami_facts_result
+ ignore_errors: true
+
+ - name: assert that the right image was found
+ assert:
+ that:
+ - "ami_facts_result.images[0].image_id == ec2_ami_image_id"
+
+ # some ec2_ami_info tests to test if the filtering is working fine.
+ # ============================================================
+
+ - name: gather info about the image
+ ec2_ami_info:
+ image_ids: '{{ ec2_region_images[ec2_region] }}'
+ register: ami_info_result
+ ignore_errors: true
+
+ - name: assert that the right image was found
+ assert:
+ that:
+ - "ami_info_result.images[0].image_id == '{{ ec2_region_images[ec2_region] }}'"
+
+ # ============================================================
+
+ - name: gather info about the image using boolean filter
+ ec2_ami_info:
+ image_ids: '{{ ec2_region_images[ec2_region] }}'
+ filters:
+ is-public: true
+ register: ami_info_result
+ ignore_errors: true
+
+ - name: assert that the right image was found
+ assert:
+ that:
+ - "ami_info_result.images[0].image_id == '{{ ec2_region_images[ec2_region] }}'"
+
+ # ============================================================
+
+ - name: gather info about the image using integer filter
+ ec2_ami_info:
+ image_ids: '{{ ec2_region_images[ec2_region] }}'
+ filters:
+ # Amazon owned
+ owner-id: 137112412989
+ register: ami_info_result
+ ignore_errors: true
+
+ - name: assert that the right image was found
+ assert:
+ that:
+ - "ami_info_result.images[0].image_id == '{{ ec2_region_images[ec2_region] }}'"
+
+ # ============================================================
+
+ - name: gather info about the image using string filter
+ ec2_ami_info:
+ image_ids: '{{ ec2_region_images[ec2_region] }}'
+ filters:
+ name: 'amzn-ami-hvm-2017.09.0.20170930-x86_64-gp2'
+ register: ami_info_result
+ ignore_errors: true
+
+ - name: assert that the right image was found
+ assert:
+ that:
+ - "ami_info_result.images[0].image_id == '{{ ec2_region_images[ec2_region] }}'"
+
+ # e2_ami_info filtering tests ends
+ # ============================================================
+
+ - name: delete the image (check mode)
+ ec2_ami:
+ instance_id: '{{ ec2_instance_id }}'
+ state: absent
+ delete_snapshot: yes
+ name: '{{ ec2_ami_name }}_ami'
+ description: '{{ ec2_ami_description }}'
+ image_id: '{{ result.image_id }}'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ wait: yes
+ ignore_errors: true
+ check_mode: true
+ register: check_mode_result
+
+ - name: assert that check_mode result is changed
+ assert:
+ that:
+ - check_mode_result is changed
+
+ - name: delete the image
+ ec2_ami:
+ instance_id: '{{ ec2_instance_id }}'
+ state: absent
+ delete_snapshot: yes
+ name: '{{ ec2_ami_name }}_ami'
+ description: '{{ ec2_ami_description }}'
+ image_id: '{{ result.image_id }}'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ wait: yes
+ ignore_errors: true
+ register: result
+
+ - name: assert that the image has been deleted
+ assert:
+ that:
+ - "result.changed"
+ - "'image_id' not in result"
+ - "result.snapshots_deleted"
+
+ # ==============================================================
+
+ - name: test removing an ami if no image ID is provided (expected failed=true)
+ ec2_ami:
+ state: absent
+ register: result
+ ignore_errors: yes
+
+ - name: assert that an image ID is required
+ assert:
+ that:
+ - "result.failed"
+ - "result.msg == 'state is absent but all of the following are missing: image_id'"
+
+ # ============================================================
+
+ - name: create an image from the snapshot
+ ec2_ami:
+ name: '{{ ec2_ami_name }}_ami'
+ description: '{{ ec2_ami_description }}'
+ state: present
+ launch_permissions:
+ user_ids: []
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ root_device_name: '{{ ec2_ami_root_disk }}'
+ device_mapping:
+ - device_name: '{{ ec2_ami_root_disk }}'
+ volume_type: gp2
+ size: 8
+ delete_on_termination: true
+ snapshot_id: '{{ setup_snapshot.snapshot_id }}'
+ register: result
+ ignore_errors: true
+
+ - name: set image id fact for deletion later
+ set_fact:
+ ec2_ami_image_id: "{{ result.image_id }}"
+ ec2_ami_snapshot: "{{ result.block_device_mapping[ec2_ami_root_disk].snapshot_id }}"
+
+ - name: assert a new ami has been created
+ assert:
+ that:
+ - "result.changed"
+ - "result.image_id.startswith('ami-')"
+
+ # ============================================================
+
+ - name: test default launch permissions idempotence (check mode)
+ ec2_ami:
+ description: '{{ ec2_ami_description }}'
+ state: present
+ name: '{{ ec2_ami_name }}_ami'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ root_device_name: '{{ ec2_ami_root_disk }}'
+ image_id: '{{ result.image_id }}'
+ launch_permissions:
+ user_ids: []
+ device_mapping:
+ - device_name: '{{ ec2_ami_root_disk }}'
+ volume_type: gp2
+ size: 8
+ delete_on_termination: true
+ snapshot_id: '{{ setup_snapshot.snapshot_id }}'
+ check_mode: true
+ register: check_mode_result
+
+ - name: assert that check_mode result is not changed
+ assert:
+ that:
+ - check_mode_result is not changed
+
+ - name: test default launch permissions idempotence
+ ec2_ami:
+ description: '{{ ec2_ami_description }}'
+ state: present
+ name: '{{ ec2_ami_name }}_ami'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ root_device_name: '{{ ec2_ami_root_disk }}'
+ image_id: '{{ result.image_id }}'
+ launch_permissions:
+ user_ids: []
+ device_mapping:
+ - device_name: '{{ ec2_ami_root_disk }}'
+ volume_type: gp2
+ size: 8
+ delete_on_termination: true
+ snapshot_id: '{{ setup_snapshot.snapshot_id }}'
+ register: result
+
+ - name: assert a new ami has not been created
+ assert:
+ that:
+ - "not result.changed"
+ - "result.image_id.startswith('ami-')"
+
+ # ============================================================
+
+ - name: add a tag to the AMI
+ ec2_ami:
+ state: present
+ description: '{{ ec2_ami_description }}'
+ image_id: '{{ result.image_id }}'
+ name: '{{ ec2_ami_name }}_ami'
+ tags:
+ New: Tag
+ purge_tags: no
+ register: result
+
+ - name: assert a tag was added
+ assert:
+ that:
+ - "'Name' in result.tags and result.tags.Name == ec2_ami_name + '_ami'"
+ - "'New' in result.tags and result.tags.New == 'Tag'"
+
+ - name: use purge_tags to remove a tag from the AMI
+ ec2_ami:
+ state: present
+ description: '{{ ec2_ami_description }}'
+ image_id: '{{ result.image_id }}'
+ name: '{{ ec2_ami_name }}_ami'
+ tags:
+ New: Tag
+ register: result
+
+ - name: assert a tag was removed
+ assert:
+ that:
+ - "'Name' not in result.tags"
+ - "'New' in result.tags and result.tags.New == 'Tag'"
+
+ # ============================================================
+
+ - name: update AMI launch permissions (check mode)
+ ec2_ami:
+ state: present
+ image_id: '{{ result.image_id }}'
+ description: '{{ ec2_ami_description }}'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ launch_permissions:
+ group_names: ['all']
+ check_mode: true
+ register: check_mode_result
+
+ - name: assert that check_mode result is changed
+ assert:
+ that:
+ - check_mode_result is changed
+
+ - name: update AMI launch permissions
+ ec2_ami:
+ state: present
+ image_id: '{{ result.image_id }}'
+ description: '{{ ec2_ami_description }}'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ launch_permissions:
+ group_names: ['all']
+ register: result
+
+ - name: assert launch permissions were updated
+ assert:
+ that:
+ - "result.changed"
+
+ # ============================================================
+
+ - name: modify the AMI description (check mode)
+ ec2_ami:
+ state: present
+ image_id: '{{ result.image_id }}'
+ name: '{{ ec2_ami_name }}_ami'
+ description: '{{ ec2_ami_description }}CHANGED'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ launch_permissions:
+ group_names: ['all']
+ check_mode: true
+ register: check_mode_result
+
+ - name: assert that check_mode result is changed
+ assert:
+ that:
+ - check_mode_result is changed
+
+ - name: modify the AMI description
+ ec2_ami:
+ state: present
+ image_id: '{{ result.image_id }}'
+ name: '{{ ec2_ami_name }}_ami'
+ description: '{{ ec2_ami_description }}CHANGED'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ launch_permissions:
+ group_names: ['all']
+ register: result
+
+ - name: assert the description changed
+ assert:
+ that:
+ - "result.changed"
+
+ # ============================================================
+
+ - name: remove public launch permissions
+ ec2_ami:
+ state: present
+ image_id: '{{ result.image_id }}'
+ name: '{{ ec2_ami_name }}_ami'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ launch_permissions:
+ group_names: []
+ register: result
+
+ - name: assert launch permissions were updated
+ assert:
+ that:
+ - "result.changed"
+
+ # ============================================================
+
+ - name: delete ami without deleting the snapshot (default is not to delete)
+ ec2_ami:
+ instance_id: '{{ ec2_instance_id }}'
+ state: absent
+ name: '{{ ec2_ami_name }}_ami'
+ image_id: '{{ ec2_ami_image_id }}'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ wait: yes
+ ignore_errors: true
+ register: result
+
+ - name: assert that the image has been deleted
+ assert:
+ that:
+ - "result.changed"
+ - "'image_id' not in result"
+
+ - name: ensure the snapshot still exists
+ ec2_snapshot_info:
+ snapshot_ids:
+ - '{{ ec2_ami_snapshot }}'
+ register: snapshot_result
+
+ - name: assert the snapshot wasn't deleted
+ assert:
+ that:
+ - "snapshot_result.snapshots[0].snapshot_id == ec2_ami_snapshot"
+
+ - name: delete ami for a second time (check mode)
+ ec2_ami:
+ instance_id: '{{ ec2_instance_id }}'
+ state: absent
+ name: '{{ ec2_ami_name }}_ami'
+ image_id: '{{ ec2_ami_image_id }}'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ wait: yes
+ check_mode: true
+ register: check_mode_result
+
+ - name: assert that check_mode result is not changed
+ assert:
+ that:
+ - check_mode_result is not changed
+
+ - name: delete ami for a second time
+ ec2_ami:
+ instance_id: '{{ ec2_instance_id }}'
+ state: absent
+ name: '{{ ec2_ami_name }}_ami'
+ image_id: '{{ ec2_ami_image_id }}'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ wait: yes
+ register: result
+
+ - name: assert that image does not exist
+ assert:
+ that:
+ - not result.changed
+ - not result.failed
+
+ # ============================================================
+
+ - name: create an image from the snapshot with boot_mode and tpm_support
+ ec2_ami:
+ name: '{{ ec2_ami_name }}_ami-boot-tpm'
+ description: '{{ ec2_ami_description }}'
+ state: present
+ boot_mode: uefi
+ tpm_support: v2.0
+ launch_permissions:
+ user_ids: []
+ tags:
+ Name: '{{ ec2_ami_name }}_ami-boot-tpm'
+ root_device_name: '{{ ec2_ami_root_disk }}'
+ device_mapping:
+ - device_name: '{{ ec2_ami_root_disk }}'
+ volume_type: gp2
+ size: 8
+ delete_on_termination: true
+ snapshot_id: '{{ setup_snapshot.snapshot_id }}'
+ register: result
+ ignore_errors: true
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - name: set image id fact for deletion later
+ set_fact:
+ ec2_ami_image_id_boot_tpm: "{{ result.image_id }}"
+ ec2_ami_snapshot_boot_tpm: "{{ result.block_device_mapping[ec2_ami_root_disk].snapshot_id }}"
+
+ - name: gather facts about the image created
+ ec2_ami_info:
+ image_ids: '{{ ec2_ami_image_id_boot_tpm }}'
+ register: ami_facts_result_boot_tpm
+ ignore_errors: true
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - name: assert that new ami has been created with desired options
+ assert:
+ that:
+ - "result.changed"
+ - "result.image_id.startswith('ami-')"
+ - ami_facts_result_boot_tpm.images[0].image_id | length != 0
+ - ami_facts_result_boot_tpm.images[0].boot_mode == 'uefi'
+ - ami_facts_result_boot_tpm.images[0].tpm_support == 'v2.0'
+
+ # ============================================================
+
+ always:
+
+ # ============================================================
+
+ # TEAR DOWN: snapshot, ec2 instance, ec2 key pair, security group, vpc
+ - name: Announce teardown start
+ debug:
+ msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****"
+
+ - name: delete ami
+ ec2_ami:
+ state: absent
+ image_id: "{{ ec2_ami_image_id_boot_tpm }}"
+ wait: yes
+ ignore_errors: yes
+
+ - name: delete ami
+ ec2_ami:
+ state: absent
+ image_id: "{{ ec2_ami_image_id }}"
+ name: '{{ ec2_ami_name }}_ami'
+ wait: yes
+ ignore_errors: yes
+
+ - name: delete ami
+ ec2_ami:
+ state: absent
+ image_id: "{{ ec2_ami_no_device_true_image_id }}"
+ wait: yes
+ ignore_errors: yes
+
+ - name: delete ami
+ ec2_ami:
+ state: absent
+ image_id: "{{ ec2_ami_no_device_false_image_id }}"
+ wait: yes
+ ignore_errors: yes
+
+ - name: remove setup snapshot of ec2 instance
+ ec2_snapshot:
+ state: absent
+ snapshot_id: '{{ setup_snapshot.snapshot_id }}'
+ ignore_errors: yes
+
+ - name: remove setup ec2 instance
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - '{{ ec2_instance_id }}'
+ wait: true
+ ignore_errors: yes
+
+ - name: remove setup keypair
+ ec2_key:
+ name: '{{ec2_ami_name}}_setup'
+ state: absent
+ ignore_errors: yes
+
+ - name: remove setup security group
+ ec2_group:
+ name: '{{ ec2_ami_name }}_setup'
+ description: 'created by Ansible integration tests'
+ state: absent
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ ignore_errors: yes
+
+ - name: remove setup subnet
+ ec2_vpc_subnet:
+ az: '{{ availability_zone }}'
+ tags: '{{ec2_ami_name}}_setup'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: '{{ subnet_cidr }}'
+ state: absent
+ resource_tags:
+ Name: '{{ ec2_ami_name }}_setup'
+ ignore_errors: yes
+
+ - name: remove setup VPC
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ state: absent
+ name: '{{ ec2_ami_name }}_setup'
+ resource_tags:
+ Name: '{{ ec2_ami_name }}_setup'
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/vars/main.yml
new file mode 100644
index 000000000..dac1fda2e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/vars/main.yml
@@ -0,0 +1,20 @@
+---
+# vars file for test_ec2_ami
+
+# based on Amazon Linux AMI 2017.09.0 (HVM), SSD Volume Type
+ec2_region_images:
+ us-east-1: ami-8c1be5f6
+ us-east-2: ami-c5062ba0
+ us-west-1: ami-02eada62
+ us-west-2: ami-e689729e
+ ca-central-1: ami-fd55ec99
+ eu-west-1: ami-acd005d5
+ eu-central-1: ami-c7ee5ca8
+ eu-west-2: ami-1a7f6d7e
+ ap-southeast-1: ami-0797ea64
+ ap-southeast-2: ami-8536d6e7
+ ap-northeast-2: ami-9bec36f5
+ ap-northeast-1: ami-2a69be4c
+ ap-south-1: ami-4fc58420
+ sa-east-1: ami-f1344b9d
+ cn-north-1: ami-fba67596
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/aliases
new file mode 100644
index 000000000..78305e989
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/aliases
@@ -0,0 +1,5 @@
+# https://github.com/ansible-collections/community.aws/issues/159
+# unstable
+
+cloud/aws
+ec2_eip_info \ No newline at end of file
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/defaults/main.yml
new file mode 100644
index 000000000..115bcca12
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/defaults/main.yml
@@ -0,0 +1,5 @@
+# VPCs are identified by the CIDR. Don't hard code the CIDR. CI may
+# run multiple copies of the test concurrently.
+vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16
+subnet_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.42.0/24
+subnet_az: '{{ ec2_availability_zone_names[0] }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/meta/main.yml
new file mode 100644
index 000000000..1d40168d0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- setup_ec2_facts
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/tasks/main.yml
new file mode 100644
index 000000000..46f33a399
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/tasks/main.yml
@@ -0,0 +1,1442 @@
+- name: Integration testing for ec2_eip
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ amazon.aws.ec2_eip:
+ in_vpc: true
+
+ block:
+ - name: Get the current caller identity facts
+ aws_caller_info:
+ register: caller_info
+
+ - name: List available AZs
+ aws_az_info:
+ register: region_azs
+
+ - name: Create a VPC
+ ec2_vpc_net:
+ name: '{{ resource_prefix }}-vpc'
+ state: present
+ cidr_block: '{{ vpc_cidr }}'
+ tags:
+ AnsibleEIPTest: Pending
+ AnsibleEIPTestPrefix: '{{ resource_prefix }}'
+ register: vpc_result
+
+ - name: Look for signs of concurrent EIP tests. Pause if they are running or their
+ prefix comes before ours.
+ vars:
+ running_query: vpcs[?tags.AnsibleEIPTest=='Running']
+ pending_query: vpcs[?tags.AnsibleEIPTest=='Pending'].tags.AnsibleEIPTestPrefix
+ ec2_vpc_net_info:
+ filters:
+ tag:AnsibleEIPTest:
+ - Pending
+ - Running
+ register: vpc_info
+ retries: 10
+ delay: 5
+ until:
+ - ( vpc_info.vpcs | map(attribute='tags') | selectattr('AnsibleEIPTest', 'equalto',
+ 'Running') | length == 0 )
+ - ( vpc_info.vpcs | map(attribute='tags') | selectattr('AnsibleEIPTest', 'equalto',
+ 'Pending') | map(attribute='AnsibleEIPTestPrefix') | sort | first == resource_prefix
+ )
+
+ - name: Create subnet
+ ec2_vpc_subnet:
+ cidr: '{{ subnet_cidr }}'
+ az: '{{ subnet_az }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ register: vpc_subnet_create
+
+ - name: Create internet gateway
+ amazon.aws.ec2_vpc_igw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: vpc_igw
+
+ - name: Create security group
+ ec2_group:
+ state: present
+ name: '{{ resource_prefix }}-sg'
+ description: a security group for ansible tests
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ register: security_group
+
+ - name: Create instance for attaching
+ ec2_instance:
+ name: '{{ resource_prefix }}-instance'
+ image_id: '{{ ec2_ami_id }}'
+ security_group: '{{ security_group.group_id }}'
+ vpc_subnet_id: '{{ vpc_subnet_create.subnet.id }}'
+ wait: yes
+ state: running
+ register: create_ec2_instance_result
+
+ - name: Create ENI A
+ ec2_eni:
+ subnet_id: '{{ vpc_subnet_create.subnet.id }}'
+ register: eni_create_a
+
+ - name: Create ENI B
+ ec2_eni:
+ subnet_id: '{{ vpc_subnet_create.subnet.id }}'
+ register: eni_create_b
+
+ - name: Make a crude lock
+ ec2_vpc_net:
+ name: '{{ resource_prefix }}-vpc'
+ state: present
+ cidr_block: '{{ vpc_cidr }}'
+ tags:
+ AnsibleEIPTest: Running
+ AnsibleEIPTestPrefix: '{{ resource_prefix }}'
+
+ - name: Get current state of EIPs
+ ec2_eip_info:
+ register: eip_info_start
+
+ - name: Require that there are no free IPs when we start, otherwise we can't test
+ things properly
+ assert:
+ that:
+ - '"addresses" in eip_info_start'
+ - ( eip_info_start.addresses | length ) == ( eip_info_start.addresses | select('match',
+ 'association_id') | length )
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Allocate a new EIP with no conditions - check_mode
+ ec2_eip:
+ state: present
+ tags:
+ AnsibleEIPTestPrefix: '{{ resource_prefix }}'
+ register: eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - eip is changed
+
+ - name: Allocate a new EIP with no conditions
+ ec2_eip:
+ state: present
+ tags:
+ AnsibleEIPTestPrefix: '{{ resource_prefix }}'
+ register: eip
+
+ - ec2_eip_info:
+ register: eip_info
+ check_mode: yes
+
+ - assert:
+ that:
+ - eip is changed
+ - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr )
+ - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length
+ )
+
+ - name: Get EIP info via public ip
+ ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - '"addresses" in eip_info'
+ - eip_info.addresses | length == 1
+ - eip_info.addresses[0].allocation_id == eip.allocation_id
+ - eip_info.addresses[0].domain == "vpc"
+ - eip_info.addresses[0].public_ip == eip.public_ip
+ - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags'
+ - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix
+
+ - name: Get EIP info via allocation id
+ ec2_eip_info:
+ filters:
+ allocation-id: '{{ eip.allocation_id }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - '"addresses" in eip_info'
+ - eip_info.addresses | length == 1
+ - eip_info.addresses[0].allocation_id == eip.allocation_id
+ - eip_info.addresses[0].domain == "vpc"
+ - eip_info.addresses[0].public_ip == eip.public_ip
+ - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags'
+ - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix
+
+ - name: Allocate a new ip (idempotence) - check_mode
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ register: eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - eip is not changed
+
+ - name: Allocate a new ip (idempotence)
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ register: eip
+
+ - ec2_eip_info:
+ register: eip_info
+
+ - assert:
+ that:
+ - eip is not changed
+ - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr )
+ - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length
+ )
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Release EIP - check_mode
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ register: eip_release
+ check_mode: yes
+
+ - assert:
+ that:
+ - eip_release.changed
+
+ - name: Release eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ register: eip_release
+
+ - ec2_eip_info:
+ register: eip_info
+
+ - assert:
+ that:
+ - eip_release.changed
+ - not eip_release.disassociated
+ - eip_release.released
+ - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
+
+ - name: Release EIP (idempotence) - check_mode
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ register: eip_release
+ check_mode: yes
+
+ - assert:
+ that:
+ - eip_release is not changed
+
+ - name: Release EIP (idempotence)
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ register: eip_release
+
+ - ec2_eip_info:
+ register: eip_info
+
+ - assert:
+ that:
+ - not eip_release.changed
+ - not eip_release.disassociated
+ - not eip_release.released
+ - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Allocate a new EIP - attempt reusing unallocated ones (none available) -
+ check_mode
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ register: eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - eip is changed
+
+ - name: Allocate a new EIP - attempt reusing unallocated ones (none available)
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ register: eip
+
+ - ec2_eip_info:
+ register: eip_info
+
+ - assert:
+ that:
+ - eip is changed
+ - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr )
+ - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length
+ )
+
+ - name: Re-Allocate a new EIP - attempt reusing unallocated ones (one available)
+ - check_mode
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ register: reallocate_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - reallocate_eip is not changed
+
+ - name: Re-Allocate a new EIP - attempt reusing unallocated ones (one available)
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ register: reallocate_eip
+
+ - ec2_eip_info:
+ register: eip_info
+
+ - assert:
+ that:
+ - reallocate_eip is not changed
+ - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.utils.ipaddr
+ )
+ - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length
+ )
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: attempt reusing an existing EIP with a tag (No match available) - check_mode
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ tag_name: Team
+ register: no_tagged_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - no_tagged_eip is changed
+
+ - name: attempt reusing an existing EIP with a tag (No match available)
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ tag_name: Team
+ register: no_tagged_eip
+
+ - ec2_eip_info:
+ register: eip_info
+
+ - assert:
+ that:
+ - no_tagged_eip is changed
+ - no_tagged_eip.public_ip is defined and ( no_tagged_eip.public_ip | ansible.utils.ipaddr
+ )
+ - no_tagged_eip.allocation_id is defined and no_tagged_eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length
+ )
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Tag EIP so we can try matching it
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ tags:
+ Team: Frontend
+
+ - name: Attempt reusing an existing EIP with a tag (Match available) - check_mode
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ tag_name: Team
+ register: reallocate_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - reallocate_eip is not changed
+
+ - name: Attempt reusing an existing EIP with a tag (Match available)
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ tag_name: Team
+ register: reallocate_eip
+
+ - ec2_eip_info:
+ register: eip_info
+
+ - assert:
+ that:
+ - reallocate_eip is not changed
+ - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.utils.ipaddr
+ )
+ - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length
+ )
+
+ - name: Attempt reusing an existing EIP with a tag and it's value (no match available)
+ - check_mode
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ tag_name: Team
+ tag_value: Backend
+ register: backend_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - backend_eip is changed
+
+ - name: Attempt reusing an existing EIP with a tag and it's value (no match available)
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ tag_name: Team
+ tag_value: Backend
+ register: backend_eip
+
+ - ec2_eip_info:
+ register: eip_info
+
+ - assert:
+ that:
+ - backend_eip is changed
+ - backend_eip.public_ip is defined and ( backend_eip.public_ip | ansible.utils.ipaddr
+ )
+ - backend_eip.allocation_id is defined and backend_eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length
+ )
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Tag EIP so we can try matching it
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ tags:
+ Team: Backend
+
+ - name: Attempt reusing an existing EIP with a tag and it's value (match available)
+ - check_mode
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ tag_name: Team
+ tag_value: Backend
+ register: reallocate_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - reallocate_eip is not changed
+
+ - name: Attempt reusing an existing EIP with a tag and it's value (match available)
+ ec2_eip:
+ state: present
+ reuse_existing_ip_allowed: true
+ tag_name: Team
+ tag_value: Backend
+ register: reallocate_eip
+
+ - ec2_eip_info:
+ register: eip_info
+
+ - assert:
+ that:
+ - reallocate_eip is not changed
+ - reallocate_eip.public_ip is defined and reallocate_eip.public_ip != ""
+ - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id !=
+ ""
+ - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length
+ )
+
+ - name: Release backend_eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ backend_eip.public_ip }}'
+
+ - name: Release no_tagged_eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ no_tagged_eip.public_ip }}'
+
+ - name: Release eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+
+ - ec2_eip_info:
+ register: eip_info
+
+ - assert:
+ that:
+ - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Allocate a new EIP from a pool - check_mode
+ ec2_eip:
+ state: present
+ public_ipv4_pool: amazon
+ register: eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - eip is changed
+
+ - name: Allocate a new EIP from a pool
+ ec2_eip:
+ state: present
+ public_ipv4_pool: amazon
+ register: eip
+
+ - ec2_eip_info:
+ register: eip_info
+
+ - assert:
+ that:
+ - eip is changed
+ - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr )
+ - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-")
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length
+ )
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Attach EIP to ENI A - check_mode
+ ec2_eip:
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_a.interface.id }}'
+ register: associate_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - associate_eip is changed
+
+ - name: Attach EIP to ENI A
+ ec2_eip:
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_a.interface.id }}'
+ register: associate_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - associate_eip is changed
+ - eip_info.addresses | length == 1
+ - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip
+ - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id
+ - eip_info.addresses[0].allocation_id == eip.allocation_id
+ - eip_info.addresses[0].domain == "vpc"
+ - eip_info.addresses[0].public_ip == eip.public_ip
+ - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-")
+ - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id
+ - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address
+ | ansible.utils.ipaddr )
+ - eip_info.addresses[0].network_interface_owner_id == caller_info.account
+
+ - name: Attach EIP to ENI A (idempotence) - check_mode
+ ec2_eip:
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_a.interface.id }}'
+ register: associate_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - associate_eip is not changed
+
+ - name: Attach EIP to ENI A (idempotence)
+ ec2_eip:
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_a.interface.id }}'
+ register: associate_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - associate_eip is not changed
+ - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip
+ - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id
+ - eip_info.addresses | length == 1
+ - eip_info.addresses[0].allocation_id == eip.allocation_id
+ - eip_info.addresses[0].domain == "vpc"
+ - eip_info.addresses[0].public_ip == eip.public_ip
+ - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-")
+ - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id
+ - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address
+ | ansible.utils.ipaddr )
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Attach EIP to ENI B (should fail, already associated)
+ ec2_eip:
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_b.interface.id }}'
+ register: associate_eip
+ ignore_errors: true
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - associate_eip is failed
+ - eip_info.addresses | length == 1
+ - eip_info.addresses[0].allocation_id == eip.allocation_id
+ - eip_info.addresses[0].domain == "vpc"
+ - eip_info.addresses[0].public_ip == eip.public_ip
+ - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-")
+ - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id
+ - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address
+ | ansible.utils.ipaddr )
+
+ - name: Attach EIP to ENI B - check_mode
+ ec2_eip:
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_b.interface.id }}'
+ allow_reassociation: true
+ register: associate_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - associate_eip is changed
+
+ - name: Attach EIP to ENI B
+ ec2_eip:
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_b.interface.id }}'
+ allow_reassociation: true
+ register: associate_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - associate_eip is changed
+ - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip
+ - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id
+ - eip_info.addresses | length == 1
+ - eip_info.addresses[0].allocation_id == eip.allocation_id
+ - eip_info.addresses[0].domain == "vpc"
+ - eip_info.addresses[0].public_ip == eip.public_ip
+ - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-")
+ - eip_info.addresses[0].network_interface_id == eni_create_b.interface.id
+ - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address
+ | ansible.utils.ipaddr )
+
+ - name: Attach EIP to ENI B (idempotence) - check_mode
+ ec2_eip:
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_b.interface.id }}'
+ allow_reassociation: true
+ register: associate_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - associate_eip is not changed
+
+ - name: Attach EIP to ENI B (idempotence)
+ ec2_eip:
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_b.interface.id }}'
+ allow_reassociation: true
+ register: associate_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - associate_eip is not changed
+ - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip
+ - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id
+ - eip_info.addresses | length == 1
+ - eip_info.addresses[0].allocation_id == eip.allocation_id
+ - eip_info.addresses[0].domain == "vpc"
+ - eip_info.addresses[0].public_ip == eip.public_ip
+ - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-")
+ - eip_info.addresses[0].network_interface_id == eni_create_b.interface.id
+ - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address
+ | ansible.utils.ipaddr )
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Detach EIP from ENI B, without enabling release on disassociation - check_mode
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_b.interface.id }}'
+ register: disassociate_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - disassociate_eip is changed
+
+ - name: Detach EIP from ENI B, without enabling release on disassociation
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_b.interface.id }}'
+ register: disassociate_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - disassociate_eip.changed
+ - disassociate_eip.disassociated
+ - not disassociate_eip.released
+ - eip_info.addresses | length == 1
+
+ - name: Detach EIP from ENI B, without enabling release on disassociation (idempotence)
+ - check_mode
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_b.interface.id }}'
+ register: disassociate_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - disassociate_eip is not changed
+
+ - name: Detach EIP from ENI B, without enabling release on disassociation (idempotence)
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_b.interface.id }}'
+ register: disassociate_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - not disassociate_eip.changed
+ - not disassociate_eip.disassociated
+ - not disassociate_eip.released
+ - eip_info.addresses | length == 1
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Attach EIP to ENI A
+ ec2_eip:
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_a.interface.id }}'
+ register: associate_eip
+
+ - name: Detach EIP from ENI A, enabling release on disassociation - check_mode
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_a.interface.id }}'
+ release_on_disassociation: true
+ register: disassociate_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - disassociate_eip is changed
+
+ - name: Detach EIP from ENI A, enabling release on disassociation
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_a.interface.id }}'
+ release_on_disassociation: true
+ register: disassociate_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - disassociate_eip.changed
+ - disassociate_eip.disassociated
+ - disassociate_eip.released
+ - eip_info.addresses | length == 0
+
+ - name: Detach EIP from ENI A, enabling release on disassociation (idempotence)
+ - check_mode
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_a.interface.id }}'
+ release_on_disassociation: true
+ register: disassociate_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - disassociate_eip is not changed
+
+ - name: Detach EIP from ENI A, enabling release on disassociation (idempotence)
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ device_id: '{{ eni_create_a.interface.id }}'
+ release_on_disassociation: true
+ register: disassociate_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - not disassociate_eip.changed
+ - not disassociate_eip.disassociated
+ - not disassociate_eip.released
+ - eip_info.addresses | length == 0
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Attach EIP to an EC2 instance - check_mode
+ ec2_eip:
+ device_id: '{{ create_ec2_instance_result.instance_ids[0] }}'
+ state: present
+ release_on_disassociation: yes
+ register: instance_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - instance_eip is changed
+
+ - name: Attach EIP to an EC2 instance
+ ec2_eip:
+ device_id: '{{ create_ec2_instance_result.instance_ids[0] }}'
+ state: present
+ release_on_disassociation: yes
+ register: instance_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ instance_eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - instance_eip is changed
+ - eip_info.addresses[0].allocation_id is defined
+ - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0]
+ }}'
+
+ - name: Attach EIP to an EC2 instance (idempotence) - check_mode
+ ec2_eip:
+ device_id: '{{ create_ec2_instance_result.instance_ids[0] }}'
+ state: present
+ release_on_disassociation: yes
+ register: instance_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - instance_eip is not changed
+
+ - name: Attach EIP to an EC2 instance (idempotence)
+ ec2_eip:
+ device_id: '{{ create_ec2_instance_result.instance_ids[0] }}'
+ state: present
+ release_on_disassociation: yes
+ register: instance_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ instance_eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - instance_eip is not changed
+ - eip_info.addresses[0].allocation_id is defined
+ - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0]
+ }}'
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Detach EIP from EC2 instance, without enabling release on disassociation
+ - check_mode
+ ec2_eip:
+ state: absent
+ device_id: '{{ create_ec2_instance_result.instance_ids[0] }}'
+ register: detach_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - detach_eip is changed
+
+ - name: Detach EIP from EC2 instance, without enabling release on disassociation
+ ec2_eip:
+ state: absent
+ device_id: '{{ create_ec2_instance_result.instance_ids[0] }}'
+ register: detach_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ instance_eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - detach_eip.changed
+ - detach_eip.disassociated
+ - not detach_eip.released
+ - eip_info.addresses | length == 1
+
+ - name: Detach EIP from EC2 instance, without enabling release on disassociation
+ (idempotence) - check_mode
+ ec2_eip:
+ state: absent
+ device_id: '{{ create_ec2_instance_result.instance_ids[0] }}'
+ register: detach_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - detach_eip is not changed
+
+ - name: Detach EIP from EC2 instance, without enabling release on disassociation
+ (idempotence)
+ ec2_eip:
+ state: absent
+ device_id: '{{ create_ec2_instance_result.instance_ids[0] }}'
+ register: detach_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ instance_eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - not detach_eip.changed
+ - not detach_eip.disassociated
+ - not detach_eip.released
+ - eip_info.addresses | length == 1
+
+ - name: Release EIP
+ ec2_eip:
+ state: absent
+ public_ip: '{{ instance_eip.public_ip }}'
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Attach EIP to an EC2 instance with private Ip specified - check_mode
+ ec2_eip:
+ device_id: '{{ create_ec2_instance_result.instance_ids[0] }}'
+ private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address
+ }}'
+ state: present
+ release_on_disassociation: yes
+ register: instance_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - instance_eip is changed
+
+ - name: Attach EIP to an EC2 instance with private Ip specified
+ ec2_eip:
+ device_id: '{{ create_ec2_instance_result.instance_ids[0] }}'
+ private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address
+ }}'
+ state: present
+ release_on_disassociation: yes
+ register: instance_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ instance_eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - instance_eip is changed
+ - eip_info.addresses[0].allocation_id is defined
+ - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0]
+ }}'
+
+ - name: Attach EIP to an EC2 instance with private Ip specified (idempotence) -
+ check_mode
+ ec2_eip:
+ device_id: '{{ create_ec2_instance_result.instance_ids[0] }}'
+ private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address
+ }}'
+ state: present
+ release_on_disassociation: yes
+ register: instance_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - instance_eip is not changed
+
+ - name: Attach EIP to an EC2 instance with private Ip specified (idempotence)
+ ec2_eip:
+ device_id: '{{ create_ec2_instance_result.instance_ids[0] }}'
+ private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address
+ }}'
+ state: present
+ release_on_disassociation: yes
+ register: instance_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ instance_eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - instance_eip is not changed
+ - eip_info.addresses[0].allocation_id is defined
+ - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0]
+ }}'
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Detach EIP from EC2 instance, enabling release on disassociation - check_mode
+ ec2_eip:
+ state: absent
+ device_id: '{{ create_ec2_instance_result.instance_ids[0] }}'
+ release_on_disassociation: yes
+ register: disassociate_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - disassociate_eip is changed
+
+ - name: Detach EIP from EC2 instance, enabling release on disassociation
+ ec2_eip:
+ state: absent
+ device_id: '{{ create_ec2_instance_result.instance_ids[0] }}'
+ release_on_disassociation: yes
+ register: disassociate_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ instance_eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - disassociate_eip.changed
+ - disassociate_eip.disassociated
+ - disassociate_eip.released
+ - eip_info.addresses | length == 0
+
+ - name: Detach EIP from EC2 instance, enabling release on disassociation (idempotence)
+ - check_mode
+ ec2_eip:
+ state: absent
+ device_id: '{{ create_ec2_instance_result.instance_ids[0] }}'
+ release_on_disassociation: yes
+ register: disassociate_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - disassociate_eip is not changed
+
+ - name: Detach EIP from EC2 instance, enabling release on disassociation (idempotence)
+ ec2_eip:
+ state: absent
+ device_id: '{{ create_ec2_instance_result.instance_ids[0] }}'
+ release_on_disassociation: yes
+ register: disassociate_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ instance_eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - not disassociate_eip.changed
+ - not disassociate_eip.disassociated
+ - not disassociate_eip.released
+ - eip_info.addresses | length == 0
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Allocate a new eip
+ ec2_eip:
+ state: present
+ register: eip
+
+ - name: Tag EIP - check_mode
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ tags:
+ AnsibleEIPTestPrefix: '{{ resource_prefix }}'
+ another_tag: another Value {{ resource_prefix }}
+ register: tag_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - tag_eip is changed
+
+ - name: Tag EIP
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ tags:
+ AnsibleEIPTestPrefix: '{{ resource_prefix }}'
+ another_tag: another Value {{ resource_prefix }}
+ register: tag_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - tag_eip is changed
+ - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags'
+ - '"another_tag" in eip_info.addresses[0].tags'
+ - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix
+ - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length
+ )
+
+ - name: Tag EIP (idempotence) - check_mode
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ tags:
+ AnsibleEIPTestPrefix: '{{ resource_prefix }}'
+ another_tag: another Value {{ resource_prefix }}
+ register: tag_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - tag_eip is not changed
+
+ - name: Tag EIP (idempotence)
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ tags:
+ AnsibleEIPTestPrefix: '{{ resource_prefix }}'
+ another_tag: another Value {{ resource_prefix }}
+ register: tag_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - tag_eip is not changed
+ - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags'
+ - '"another_tag" in eip_info.addresses[0].tags'
+ - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix
+ - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length
+ )
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Add another Tag - check_mode
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ tags:
+ third tag: Third tag - {{ resource_prefix }}
+ purge_tags: false
+ register: tag_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - tag_eip is changed
+
+ - name: Add another Tag
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ tags:
+ third tag: Third tag - {{ resource_prefix }}
+ purge_tags: false
+ register: tag_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - tag_eip is changed
+ - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags'
+ - '"another_tag" in eip_info.addresses[0].tags'
+ - '"third tag" in eip_info.addresses[0].tags'
+ - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix
+ - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix
+ - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix
+ - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length
+ )
+
+ - name: Add another Tag (idempotence) - check_mode
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ tags:
+ third tag: Third tag - {{ resource_prefix }}
+ purge_tags: false
+ register: tag_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - tag_eip is not changed
+
+ - name: Add another Tag (idempotence)
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ tags:
+ third tag: Third tag - {{ resource_prefix }}
+ purge_tags: false
+ register: tag_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - tag_eip is not changed
+ - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags'
+ - '"another_tag" in eip_info.addresses[0].tags'
+ - '"third tag" in eip_info.addresses[0].tags'
+ - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix
+ - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix
+ - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Purge tags - check_mode
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ tags:
+ third tag: Third tag - {{ resource_prefix }}
+ purge_tags: true
+ register: tag_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - tag_eip is changed
+
+ - name: Purge tags
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ tags:
+ third tag: Third tag - {{ resource_prefix }}
+ purge_tags: true
+ register: tag_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - tag_eip is changed
+ - '"AnsibleEIPTestPrefix" not in eip_info.addresses[0].tags'
+ - '"another_tag" not in eip_info.addresses[0].tags'
+ - '"third tag" in eip_info.addresses[0].tags'
+ - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix
+
+ - name: Purge tags (idempotence) - check_mode
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ tags:
+ third tag: Third tag - {{ resource_prefix }}
+ purge_tags: true
+ register: tag_eip
+ check_mode: yes
+
+ - assert:
+ that:
+ - tag_eip is not changed
+
+ - name: Purge tags (idempotence)
+ ec2_eip:
+ state: present
+ public_ip: '{{ eip.public_ip }}'
+ tags:
+ third tag: Third tag - {{ resource_prefix }}
+ purge_tags: true
+ register: tag_eip
+
+ - ec2_eip_info:
+ filters:
+ public-ip: '{{ eip.public_ip }}'
+ register: eip_info
+
+ - assert:
+ that:
+ - tag_eip is not changed
+ - '"AnsibleEIPTestPrefix" not in eip_info.addresses[0].tags'
+ - '"another_tag" not in eip_info.addresses[0].tags'
+ - '"third tag" in eip_info.addresses[0].tags'
+ - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix
+
+ # ----- Cleanup ------------------------------------------------------------------------------
+
+ always:
+
+ - name: Cleanup instance (by id)
+ ec2_instance:
+ instance_ids: '{{ create_ec2_instance_result.instance_ids }}'
+ state: absent
+ wait: true
+ ignore_errors: true
+
+ - name: Cleanup instance (by name)
+ ec2_instance:
+ name: '{{ resource_prefix }}-instance'
+ state: absent
+ wait: true
+ ignore_errors: true
+
+ - name: Cleanup ENI A
+ ec2_eni:
+ state: absent
+ eni_id: '{{ eni_create_a.interface.id }}'
+ ignore_errors: true
+
+ - name: Cleanup ENI B
+ ec2_eni:
+ state: absent
+ eni_id: '{{ eni_create_b.interface.id }}'
+ ignore_errors: true
+
+ - name: Cleanup instance eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ instance_eip.public_ip }}'
+ retries: 5
+ delay: 5
+ until: eip_cleanup is successful
+ ignore_errors: true
+
+ - name: Cleanup IGW
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: vpc_igw
+ ignore_errors: true
+
+ - name: Cleanup security group
+ ec2_group:
+ state: absent
+ name: '{{ resource_prefix }}-sg'
+ ignore_errors: true
+
+ - name: Cleanup Subnet
+ ec2_vpc_subnet:
+ state: absent
+ cidr: '{{ subnet_cidr }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ ignore_errors: true
+
+ - name: Cleanup eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ eip.public_ip }}'
+ ignore_errors: true
+
+ - name: Cleanup reallocate_eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ reallocate_eip.public_ip }}'
+ ignore_errors: true
+
+ - name: Cleanup backend_eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ backend_eip.public_ip }}'
+ ignore_errors: true
+
+ - name: Cleanup no_tagged_eip
+ ec2_eip:
+ state: absent
+ public_ip: '{{ no_tagged_eip.public_ip }}'
+ ignore_errors: true
+
+ - name: Cleanup VPC
+ ec2_vpc_net:
+ state: absent
+ name: '{{ resource_prefix }}-vpc'
+ cidr_block: '{{ vpc_cidr }}'
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/aliases
new file mode 100644
index 000000000..9adce4567
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+
+ec2_eni_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/defaults/main.yml
new file mode 100644
index 000000000..364c435cf
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/defaults/main.yml
@@ -0,0 +1,16 @@
+---
+availability_zone: '{{ ec2_availability_zone_names[0] }}'
+
+vpc_seed_a: '{{ resource_prefix }}'
+vpc_seed_b: '{{ resource_prefix }}-ec2_eni'
+vpc_prefix: '10.{{ 256 | random(seed=vpc_seed_a) }}.{{ 256 | random(seed=vpc_seed_b ) }}'
+vpc_cidr: '{{ vpc_prefix}}.128/26'
+ip_1: "{{ vpc_prefix }}.132"
+ip_2: "{{ vpc_prefix }}.133"
+ip_3: "{{ vpc_prefix }}.134"
+ip_4: "{{ vpc_prefix }}.135"
+ip_5: "{{ vpc_prefix }}.136"
+
+ec2_ips:
+- "{{ vpc_prefix }}.137"
+- "{{ vpc_prefix }}.138"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/meta/main.yml
new file mode 100644
index 000000000..2bff8543a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- role: setup_ec2_facts
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/main.yaml
new file mode 100644
index 000000000..b55f6563b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/main.yaml
@@ -0,0 +1,159 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+
+ collections:
+ - amazon.aws
+ - ansible.utils
+ - community.aws
+
+ block:
+
+ # ============================================================
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ register: vpc_result
+
+ - name: create a subnet
+ ec2_vpc_subnet:
+ cidr: "{{ vpc_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ state: present
+ register: vpc_subnet_result
+
+ - name: create a security group
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: "Created by {{ resource_prefix }}"
+ rules: []
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ register: vpc_sg_result
+
+ - name: Set facts to simplify use of extra resources
+ set_fact:
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ vpc_subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ vpc_sg_id: "{{ vpc_sg_result.group_id }}"
+
+ # ============================================================
+
+ - name: Create 2 instances to test attaching and detaching network interfaces
+ ec2_instance:
+ name: "{{ resource_prefix }}-eni-instance-{{ item }}"
+ image_id: "{{ ec2_ami_id }}"
+ vpc_subnet_id: "{{ vpc_subnet_id }}"
+ instance_type: t2.micro
+ wait: false
+ security_group: "{{ vpc_sg_id }}"
+ network:
+ private_ip_address: '{{ ec2_ips[item] }}'
+ register: ec2_instances
+ loop:
+ - 0
+ - 1
+
+ # We only need these instances to be running
+ - name: set variables for the instance IDs
+ set_fact:
+ instance_id_1: "{{ ec2_instances.results[0].instance_ids[0] }}"
+ instance_id_2: "{{ ec2_instances.results[1].instance_ids[0] }}"
+
+ # ============================================================
+ - name: test attaching and detaching network interfaces
+ include_tasks: ./test_eni_basic_creation.yaml
+
+ - name: test attaching and detaching network interfaces
+ include_tasks: ./test_ipaddress_assign.yaml
+
+ - name: test attaching and detaching network interfaces
+ include_tasks: ./test_attachment.yaml
+
+ - name: test attaching and detaching multiple network interfaces
+ include_tasks: ./test_create_attached_multiple.yml
+
+ - name: test modifying source_dest_check
+ include_tasks: ./test_modifying_source_dest_check.yaml
+
+ - name: test modifying tags
+ include_tasks: ./test_modifying_tags.yaml
+
+ # Note: will delete *both* EC2 instances
+ - name: test modifying delete_on_termination
+ include_tasks: ./test_modifying_delete_on_termination.yaml
+
+ - name: test deleting ENIs
+ include_tasks: ./test_deletion.yaml
+
+ always:
+ # ============================================================
+ # Some test problems are caused by "eventual consistency"
+ # describe the ENIs in the account so we can see what's happening
+ - name: Describe ENIs in account
+ ec2_eni_info: {}
+
+ # ============================================================
+ - name: remove the network interfaces
+ ec2_eni:
+ eni_id: "{{ item }}"
+ force_detach: True
+ state: absent
+ ignore_errors: true
+ retries: 5
+ loop:
+ - "{{ eni_id_1 | default(omit) }}"
+ - "{{ eni_id_2 | default(omit) }}"
+ - "{{ eni_id_3 | default(omit) }}"
+
+ - name: terminate the instances
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ instance_id_1 }}"
+ - "{{ instance_id_2 }}"
+ wait: True
+ ignore_errors: true
+ retries: 5
+ when: instance_id_1 is defined and instance_id_2 is defined
+
+ - name: remove the security group
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: "{{ resource_prefix }}"
+ rules: []
+ state: absent
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ignore_errors: true
+ retries: 5
+
+ - name: remove the subnet
+ ec2_vpc_subnet:
+ cidr: "{{ vpc_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ ignore_errors: true
+ retries: 5
+ when: vpc_subnet_result is defined
+
+ - name: remove the VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: "{{ vpc_cidr }}"
+ state: absent
+ ignore_errors: true
+ retries: 5
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_attachment.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_attachment.yaml
new file mode 100644
index 000000000..3ce0e9353
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_attachment.yaml
@@ -0,0 +1,278 @@
+ # ============================================================
+# If we don't stop the instances they can get stuck "detaching"
+- name: Ensure test instances are stopped
+ ec2_instance:
+ state: stopped
+ instance_ids:
+ - "{{ instance_id_1 }}"
+ - "{{ instance_id_2 }}"
+ wait: True
+
+- name: attach the network interface to instance 1 (check mode)
+ ec2_eni:
+ instance_id: "{{ instance_id_1 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - result_check_mode.changed
+
+- name: attach the network interface to instance 1
+ ec2_eni:
+ instance_id: "{{ instance_id_1 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.attachment is defined
+ - result.interface.attachment is mapping
+ - result.interface.attachment.instance_id == instance_id_1
+ - _interface_0.attachment is defined
+ - _interface_0.attachment is mapping
+ - '"attach_time" in _interface_0.attachment'
+ - _interface_0.attachment.attach_time is string
+ - '"attachment_id" in _interface_0.attachment'
+ - _interface_0.attachment.attachment_id.startswith("eni-attach-")
+ - '"delete_on_termination" in _interface_0.attachment'
+ - _interface_0.attachment.delete_on_termination == False
+ - '"device_index" in _interface_0.attachment'
+ - _interface_0.attachment.device_index == 1
+ - '"instance_id" in _interface_0.attachment'
+ - _interface_0.attachment.instance_id == instance_id_1
+ - '"instance_owner_id" in _interface_0.attachment'
+ - _interface_0.attachment.instance_owner_id is string
+ - '"status" in _interface_0.attachment'
+ - _interface_0.attachment.status == "attached"
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- name: verify the eni is attached
+ ec2_eni:
+ instance_id: "{{ instance_id_1 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.attachment is defined
+ - result.interface.attachment.instance_id == instance_id_1
+ - _interface_0.attachment is defined
+ - _interface_0.attachment is mapping
+ - '"attach_time" in _interface_0.attachment'
+ - _interface_0.attachment.attach_time is string
+ - '"attachment_id" in _interface_0.attachment'
+ - _interface_0.attachment.attachment_id.startswith("eni-attach-")
+ - '"delete_on_termination" in _interface_0.attachment'
+ - _interface_0.attachment.delete_on_termination == False
+ - '"device_index" in _interface_0.attachment'
+ - _interface_0.attachment.device_index == 1
+ - '"instance_id" in _interface_0.attachment'
+ - _interface_0.attachment.instance_id == instance_id_1
+ - '"instance_owner_id" in _interface_0.attachment'
+ - _interface_0.attachment.instance_owner_id is string
+ - '"status" in _interface_0.attachment'
+ - _interface_0.attachment.status == "attached"
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+ # ============================================================
+- name: test attaching the network interface to a different instance (check mode)
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - result_check_mode.changed
+
+- name: test attaching the network interface to a different instance
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.attachment is defined
+ - result.interface.attachment.instance_id == instance_id_2
+ - _interface_0.attachment is defined
+ - '"instance_id" in _interface_0.attachment'
+ - _interface_0.attachment.instance_id == instance_id_2
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+ # ============================================================
+- name: detach the network interface (check mode)
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: False
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - result_check_mode.changed
+
+- name: detach the network interface
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: False
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.attachment is undefined
+ - _interface_0.attachment is undefined
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- name: verify the network interface was detached
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: False
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.attachment is undefined
+
+ # ============================================================
+- name: reattach the network interface to test deleting it
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ register: result
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.attachment is defined
+ - result.interface.attachment.instance_id == instance_id_2
+
+- name: test that deleting the network interface while attached must be intentional
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: absent
+ register: result
+ ignore_errors: True
+
+- assert:
+ that:
+ - result.failed
+ - '"currently in use" in result.msg'
+
+# ============================================================
+- name: Ensure test instances is running (will block non-forced detachment)
+ ec2_instance:
+ state: running
+ instance_ids:
+ - "{{ instance_id_2 }}"
+ wait: True
+
+- name: delete an attached network interface with force_detach (check mode)
+ ec2_eni:
+ force_detach: True
+ eni_id: "{{ eni_id_1 }}"
+ state: absent
+ check_mode: true
+ register: result_check_mode
+ ignore_errors: True
+
+- assert:
+ that:
+ - result_check_mode.changed
+
+- name: delete an attached network interface with force_detach
+ ec2_eni:
+ force_detach: True
+ eni_id: "{{ eni_id_1 }}"
+ state: absent
+ register: result
+ ignore_errors: True
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.attachment is undefined
+
+- name: test removing a network interface that does not exist
+ ec2_eni:
+ force_detach: True
+ eni_id: "{{ eni_id_1 }}"
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.attachment is undefined
+
+# ============================================================
+- name: recreate the network interface
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ register: result
+
+- set_fact:
+ eni_id_1: "{{ result.interface.id }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_create_attached_multiple.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_create_attached_multiple.yml
new file mode 100644
index 000000000..c82139140
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_create_attached_multiple.yml
@@ -0,0 +1,121 @@
+---
+ - name: Create instance to test attaching and detaching network interfaces for this test
+ ec2_instance:
+ name: "{{ resource_prefix }}-instance"
+ image_id: "{{ ec2_ami_id }}"
+ vpc_subnet_id: "{{ vpc_subnet_id }}"
+ instance_type: t2.micro
+ register: ec2_instances
+
+ - name: set variable for the instance ID
+ set_fact:
+ instance_id_3: "{{ ec2_instances.instances[0].instance_id }}"
+
+#=================================================================
+
+ - name: Create and attach another interface to above instance - check_mode
+ amazon.aws.ec2_eni:
+ name: "{{ resource_prefix }}-eni"
+ instance_id: "{{ instance_id_3 }}"
+ device_index: 1
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ attached: true
+ delete_on_termination: true
+ check_mode: true
+ register: result
+
+ # Get the instance info and ENI info to verify attachment of second eni
+ - ec2_instance_info:
+ instance_ids:
+ - "{{ instance_id_3 }}"
+ register: instance_info_result
+
+ - assert:
+ that:
+ - result is changed
+ - result is not failed
+ - instance_info_result.instances[0].network_interfaces | length == 1
+ - '"Would have created ENI if not in check mode." in result.msg'
+ - "'ec2:CreateNetworkInterface' not in {{ result.resource_actions }}"
+
+ - name: Create and attach another interface to above instance
+ amazon.aws.ec2_eni:
+ name: "{{ resource_prefix }}-eni"
+ instance_id: "{{ instance_id_3 }}"
+ device_index: 1
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ attached: true
+ delete_on_termination: true
+ register: result
+
+ - name: Set variable for the ENI ID
+ set_fact:
+ eni_id_attached_multiple: "{{ result.interface.id }}"
+
+ # Get the instance info and ENI info to verify attachment of second eni
+ - ec2_instance_info:
+ instance_ids:
+ - "{{ instance_id_3 }}"
+ register: instance_info_result
+ - ec2_eni_info:
+ eni_id: "{{ eni_id_attached_multiple }}"
+ register: eni_info
+
+ - name: Assert that the interface attachment was successful
+ assert:
+ that:
+ - result is changed
+ - result is not failed
+ - instance_info_result.instances[0].network_interfaces | length == 2
+ - eni_info.network_interfaces[0].attachment.instance_id == instance_id_3
+ - eni_info.network_interfaces[0].attachment.device_index == 1
+
+ - name: Create and attach another interface to above instance - check_mode - idempotent
+ amazon.aws.ec2_eni:
+ name: "{{ resource_prefix }}-eni"
+ instance_id: "{{ instance_id_3 }}"
+ device_index: 1
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ attached: true
+ delete_on_termination: true
+ check_mode: true
+ register: result
+
+ # Get the instance info and ENI info to verify attachment of second eni
+ - ec2_instance_info:
+ instance_ids:
+ - "{{ instance_id_3 }}"
+ register: instance_info_result
+
+ - name: Assert that the interface would have been modified if not in check_mode
+ assert:
+ that:
+ - result is changed
+ - result is not failed
+ - instance_info_result.instances[0].network_interfaces | length == 2
+ - '"Would have modified ENI: {{ eni_id_attached_multiple }} if not in check mode" in result.msg'
+ - "'ec2:CreateNetworkInterface' not in {{ result.resource_actions }}"
+ - "'ec2:ModifyNetworkInterfaceAttribute' not in {{ result.resource_actions }}"
+
+#=================================================================
+
+ - name: remove the network interface created in this test
+ ec2_eni:
+ eni_id: "{{ eni_id_attached_multiple }}"
+ force_detach: True
+ state: absent
+ ignore_errors: true
+ retries: 5
+
+ - name: terminate the instance created in this test
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ instance_id_3 }}"
+ wait: True
+ ignore_errors: true
+ retries: 5
+ when: instance_id_3 is defined
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_deletion.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_deletion.yaml
new file mode 100644
index 000000000..a0144aaba
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_deletion.yaml
@@ -0,0 +1,118 @@
+---
+# ============================================================
+- name: test deleting the unattached network interface by using the ID (check mode)
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ name: "{{ resource_prefix }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: absent
+ check_mode: True
+ register: result_check_mode
+
+- assert:
+ that:
+ - result_check_mode.changed
+
+- name: test deleting the unattached network interface by using the ID
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ name: "{{ resource_prefix }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: absent
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface is undefined
+ - '"network_interfaces" in eni_info'
+ - eni_id_1 not in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list )
+
+- name: test removing the network interface by ID is idempotent (check mode)
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ name: "{{ resource_prefix }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: absent
+ check_mode: True
+ register: result_check_mode
+
+- assert:
+ that:
+ - not result_check_mode.changed
+
+- name: test removing the network interface by ID is idempotent
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ name: "{{ resource_prefix }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface is undefined
+
+# ============================================================
+- name: add a name tag to the other network interface before deleting it
+ ec2_eni:
+ eni_id: "{{ eni_id_2 }}"
+ name: "{{ resource_prefix }}"
+ state: present
+
+- name: test deleting the unattached network interface by using the name
+ ec2_eni:
+ name: "{{ resource_prefix }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: absent
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_2 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface is undefined
+ - '"network_interfaces" in eni_info'
+ - eni_id_2 not in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list )
+
+- name: test removing the network interface by name is idempotent
+ ec2_eni:
+ name: "{{ resource_prefix }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface is undefined
+
+- name: verify that the network interface ID does not exist (retry-delete by ID)
+ ec2_eni:
+ eni_id: "{{ eni_id_2 }}"
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface is undefined
+
+# ============================================================
+
+- name: Fetch ENI info without filter
+ ec2_eni_info:
+ register: eni_info
+
+- name: Assert that ec2_eni_info doesn't contain the two interfaces we just deleted
+ assert:
+ that:
+ - '"network_interfaces" in eni_info'
+ - eni_id_1 not in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list )
+ - eni_id_2 not in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list )
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml
new file mode 100644
index 000000000..3f0530348
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml
@@ -0,0 +1,263 @@
+---
+# ============================================================
+- name: create a network interface (check mode)
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - result_check_mode.changed
+
+- name: create a network interface
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ register: result
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.private_ip_addresses | length == 1
+
+- set_fact:
+ eni_id_1: "{{ result.interface.id }}"
+
+- name: Fetch ENI info (by ID)
+ ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- name: Assert that ec2_eni_info returns all the values we expect
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+ assert:
+ that:
+ - '"network_interfaces" in eni_info'
+ - eni_info.network_interfaces | length == 1
+ - '"association" not in _interface_0'
+ - '"attachment" not in _interface_0'
+ - '"availability_zone" in _interface_0'
+ - _interface_0.availability_zone.startswith(aws_region)
+ - '"description" in _interface_0'
+ - _interface_0.description == ""
+ - '"groups" in _interface_0'
+ - _interface_0.groups is iterable
+ - _interface_0.groups | length == 1
+ - '"id" in _interface_0'
+ - _interface_0.id.startswith("eni-")
+ - _interface_0.id == eni_id_1
+ - '"interface_type" in _interface_0'
+ - _interface_0.owner_id is string
+ - '"ipv6_addresses" in _interface_0'
+ - _interface_0.ipv6_addresses is iterable
+ - _interface_0.ipv6_addresses | length == 0
+ - '"mac_address" in _interface_0'
+ - _interface_0.owner_id is string
+ - _interface_0.mac_address | length == 17
+ - '"network_interface_id" in _interface_0'
+ - _interface_0.network_interface_id.startswith("eni-")
+ - _interface_0.network_interface_id == eni_id_1
+ - '"owner_id" in _interface_0'
+ - _interface_0.owner_id is string
+ - '"private_dns_name" in _interface_0'
+ - _interface_0.private_dns_name is string
+ - _interface_0.private_dns_name.endswith("ec2.internal")
+ - '"private_ip_address" in _interface_0'
+ - _interface_0.private_ip_address | ansible.utils.ipaddr
+ - _interface_0.private_ip_address == ip_1
+ - '"private_ip_addresses" in _interface_0'
+ - _interface_0.private_ip_addresses | length == 1
+ - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list )
+ - '"requester_id" in _interface_0'
+ - _interface_0.requester_id is string
+ - '"requester_managed" in _interface_0'
+ - _interface_0.requester_managed == False
+ - '"source_dest_check" in _interface_0'
+ - _interface_0.source_dest_check == True
+ - '"status" in _interface_0'
+ - _interface_0.status == "available"
+ - '"subnet_id" in _interface_0'
+ - _interface_0.subnet_id == vpc_subnet_id
+ - '"tag_set" in _interface_0'
+ - _interface_0.tag_set is mapping
+ - '"vpc_id" in _interface_0'
+ - _interface_0.vpc_id == vpc_id
+
+- name: test idempotence by using the same private_ip_address (check mode)
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - not result_check_mode.changed
+
+- name: test idempotence by using the same private_ip_address
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.private_ip_addresses | length == 1
+
+# ============================================================
+
+- name: create a second network interface to test IP reassignment
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_5 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ register: result
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.id != eni_id_1
+
+- name: save the second network interface ID for cleanup
+ set_fact:
+ eni_id_2: "{{ result.interface.id }}"
+
+- name: Fetch ENI info (using filter)
+ ec2_eni_info:
+ filters:
+ network-interface-id: '{{ eni_id_2 }}'
+ register: eni_info
+
+- name: Assert that ec2_eni_info returns all the values we expect
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+ assert:
+ that:
+ - '"network_interfaces" in eni_info'
+ - eni_info.network_interfaces | length == 1
+ - '"association" not in _interface_0'
+ - '"attachment" not in _interface_0'
+ - '"availability_zone" in _interface_0'
+ - _interface_0.availability_zone.startswith(aws_region)
+ - '"description" in _interface_0'
+ - _interface_0.description == ""
+ - '"groups" in _interface_0'
+ - _interface_0.groups is iterable
+ - _interface_0.groups | length == 1
+ - '"id" in _interface_0'
+ - _interface_0.id.startswith("eni-")
+ - _interface_0.id == eni_id_2
+ - '"interface_type" in _interface_0'
+ - _interface_0.owner_id is string
+ - '"ipv6_addresses" in _interface_0'
+ - _interface_0.ipv6_addresses is iterable
+ - _interface_0.ipv6_addresses | length == 0
+ - '"mac_address" in _interface_0'
+ - _interface_0.owner_id is string
+ - _interface_0.mac_address | length == 17
+ - '"network_interface_id" in _interface_0'
+ - _interface_0.network_interface_id.startswith("eni-")
+ - _interface_0.network_interface_id == eni_id_2
+ - '"owner_id" in _interface_0'
+ - _interface_0.owner_id is string
+ - '"private_dns_name" in _interface_0'
+ - _interface_0.private_dns_name is string
+ - _interface_0.private_dns_name.endswith("ec2.internal")
+ - '"private_ip_address" in _interface_0'
+ - _interface_0.private_ip_address | ansible.utils.ipaddr
+ - _interface_0.private_ip_address == ip_5
+ - '"private_ip_addresses" in _interface_0'
+ - _interface_0.private_ip_addresses | length == 1
+ - ip_5 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list )
+ - '"requester_id" in _interface_0'
+ - _interface_0.requester_id is string
+ - '"requester_managed" in _interface_0'
+ - _interface_0.requester_managed == False
+ - '"source_dest_check" in _interface_0'
+ - _interface_0.source_dest_check == True
+ - '"status" in _interface_0'
+ - _interface_0.status == "available"
+ - '"subnet_id" in _interface_0'
+ - _interface_0.subnet_id == vpc_subnet_id
+ - '"tag_set" in _interface_0'
+ - _interface_0.tag_set is mapping
+ - '"vpc_id" in _interface_0'
+ - _interface_0.vpc_id == vpc_id
+
+- name: Fetch ENI info without filter
+ ec2_eni_info:
+ register: eni_info
+
+- name: Assert that ec2_eni_info contains at least the two interfaces we expect
+ assert:
+ that:
+ - '"network_interfaces" in eni_info'
+ - eni_info.network_interfaces | length >= 2
+ - eni_id_1 in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list )
+ - eni_id_2 in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list )
+
+# ============================================================
+# Run some VPC filter based tests of ec2_eni_info
+
+- name: Fetch ENI info with VPC filters - Available
+ ec2_eni_info:
+ filters:
+ vpc-id: '{{ vpc_id }}'
+ status: 'available'
+ register: eni_info
+
+- name: Assert that ec2_eni_info contains at least the two interfaces we expect
+ assert:
+ that:
+ - '"network_interfaces" in eni_info'
+ - eni_info.network_interfaces | length == 2
+ - eni_id_1 in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list )
+ - eni_id_2 in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list )
+
+- name: Fetch ENI info with VPC filters - VPC
+ ec2_eni_info:
+ filters:
+ vpc-id: '{{ vpc_id }}'
+ register: eni_info
+
+- name: Assert that ec2_eni_info contains at least the two interfaces we expect
+ assert:
+ that:
+ - '"network_interfaces" in eni_info'
+ - eni_info.network_interfaces | length == 4
+ - eni_id_1 in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list )
+ - eni_id_2 in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list )
+ - ec2_ips[0] in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list )
+ - ec2_ips[1] in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list )
+
+
+# =========================================================
+
+- name: create another network interface without private_ip_address
+ ec2_eni:
+ device_index: 1
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ register: result_no_private_ip
+
+- assert:
+ that:
+ - result_no_private_ip.changed
+
+- name: save the third network interface ID for cleanup
+ set_fact:
+ eni_id_3: "{{ result_no_private_ip.interface.id }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml
new file mode 100644
index 000000000..3f6d85b81
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml
@@ -0,0 +1,325 @@
+---
+# ============================================================
+- name: add two implicit secondary IPs (check mode)
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_address_count: 2
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - result_check_mode.changed
+
+- name: add two implicit secondary IPs
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_address_count: 2
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.private_ip_addresses | length == 3
+ - _interface_0.private_ip_addresses | length == 3
+ - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list )
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- name: test idempotence with two implicit secondary IPs (check mode)
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_address_count: 2
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - not result_check_mode.changed
+
+- name: test idempotence with two implicit secondary IPs
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_address_count: 2
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.private_ip_addresses | length == 3
+ - _interface_0.private_ip_addresses | length == 3
+ - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list )
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+# ============================================================
+- name: ensure secondary addresses are only removed if purge is set to true
+ ec2_eni:
+ purge_secondary_private_ip_addresses: false
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses: []
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.private_ip_addresses | length == 3
+ - _interface_0.private_ip_addresses | length == 3
+ - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list )
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+# ============================================================
+
+# Using secondary_private_ip_address_count leads to unpredicable IP assignment
+# For the following test, first find an IP that has not been used yet
+
+- name: save the list of private IPs in use
+ set_fact:
+ current_private_ips: "{{ result.interface | json_query('private_ip_addresses[*].private_ip_address') | list }}"
+
+- name: set new_secondary_ip to an IP that has not been used
+ set_fact:
+ new_secondary_ip: "{{ [ip_2, ip_3, ip_4] | difference(current_private_ips) | first }}"
+
+- name: add an explicit secondary address without purging the ones added implicitly
+ ec2_eni:
+ purge_secondary_private_ip_addresses: false
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses:
+ - "{{ new_secondary_ip }}"
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.private_ip_addresses | length == 4
+ - _interface_0.private_ip_addresses | length == 4
+ # Only ip_1 and the explicitly requested IP are guaranteed to be present
+ - ip_1 in _private_ips
+ - new_secondary_ip in _private_ips
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+ _private_ips: "{{ eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list }}"
+
+# ============================================================
+- name: remove secondary address (check mode)
+ ec2_eni:
+ purge_secondary_private_ip_addresses: true
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses: []
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - result_check_mode.changed
+
+- name: remove secondary address
+ ec2_eni:
+ purge_secondary_private_ip_addresses: true
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses: []
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.private_ip_addresses | length == 1
+ - _interface_0.private_ip_addresses | length == 1
+ - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list )
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- name: test idempotent behavior purging secondary addresses (check mode)
+ ec2_eni:
+ purge_secondary_private_ip_addresses: true
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses: []
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - not result_check_mode.changed
+
+- name: test idempotent behavior purging secondary addresses
+ ec2_eni:
+ purge_secondary_private_ip_addresses: true
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses: []
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.private_ip_addresses | length == 1
+ - result.interface.private_ip_addresses | length == 1
+ - _interface_0.private_ip_addresses | length == 1
+ - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list )
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+# ============================================================
+
+- name: Assign secondary IP addess to second ENI
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_5 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses:
+ - "{{ ip_4 }}"
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_2 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.id == eni_id_2
+ - result.interface.private_ip_addresses | length == 2
+ - _interface_0.private_ip_addresses | length == 2
+ - ip_5 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list )
+ - ip_4 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list )
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- name: test that reassignment of an IP already in use fails when not explcitly allowed (default for allow_reassignment == False)
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses:
+ - "{{ ip_2 }}"
+ - "{{ ip_3 }}"
+ - "{{ ip_4 }}"
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result.failed
+ - '"move is not allowed" in result.msg'
+
+# ============================================================
+- name: allow reassignment to add the list of secondary addresses
+ ec2_eni:
+ allow_reassignment: true
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses:
+ - "{{ ip_2 }}"
+ - "{{ ip_3 }}"
+ - "{{ ip_4 }}"
+ register: result
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.private_ip_addresses | length == 4
+
+- name: test reassigment is idempotent
+ ec2_eni:
+ allow_reassignment: true
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses:
+ - "{{ ip_2 }}"
+ - "{{ ip_3 }}"
+ - "{{ ip_4 }}"
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+
+# ============================================================
+
+- name: purge all the secondary addresses
+ ec2_eni:
+ purge_secondary_private_ip_addresses: true
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses: []
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+ until: _interface_0.private_ip_addresses | length == 1
+ retries: 5
+ delay: 2
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- assert:
+ that:
+ - result.changed
+ - _interface_0.private_ip_addresses | length == 1
+ - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list )
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml
new file mode 100644
index 000000000..f8c6e23b1
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml
@@ -0,0 +1,214 @@
+# ============================================================
+
+- name: ensure delete_on_termination defaults to False
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result is successful
+ - result.interface.attachment.delete_on_termination == false
+ - _interface_0.attachment.delete_on_termination == False
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+# ============================================================
+
+- name: enable delete_on_termination (check mode)
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ delete_on_termination: True
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - result_check_mode.changed
+
+- name: enable delete_on_termination
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ delete_on_termination: True
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.attachment.delete_on_termination == true
+ - _interface_0.attachment.delete_on_termination == True
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- name: test idempotent behavior enabling delete_on_termination (check mode)
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ delete_on_termination: True
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - not result_check_mode.changed
+
+- name: test idempotent behavior enabling delete_on_termination
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ delete_on_termination: True
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.attachment.delete_on_termination == true
+
+# ============================================================
+
+- name: disable delete_on_termination (check mode)
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ delete_on_termination: False
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - result_check_mode.changed
+
+- name: disable delete_on_termination
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ delete_on_termination: False
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.attachment.delete_on_termination == false
+ - _interface_0.attachment.delete_on_termination == False
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+# ============================================================
+
+- name: terminate the instance to make sure the attached ENI remains
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ instance_id_2 }}"
+ wait: True
+
+- name: verify the eni still exists
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: present
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.attachment is undefined
+
+# ============================================================
+
+- name: ensure the network interface is attached
+ ec2_eni:
+ instance_id: "{{ instance_id_1 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ attached: True
+ register: result
+
+- name: ensure delete_on_termination is true
+ ec2_eni:
+ instance_id: "{{ instance_id_1 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ attached: True
+ delete_on_termination: True
+ register: result
+
+- name: test terminating the instance after setting delete_on_termination to true
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ instance_id_1 }}"
+ wait: True
+
+- name: verify the eni was also removed
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: absent
+ register: result
+- ec2_eni_info:
+ register: eni_info
+
+- assert:
+ that:
+ - not result.changed
+ - '"network_interfaces" in eni_info'
+ - eni_info.network_interfaces | length >= 1
+ - eni_id_1 not in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list )
+ - eni_id_2 in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list )
+
+# ============================================================
+
+- name: recreate the network interface
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ register: result
+
+- set_fact:
+ eni_id_1: "{{ result.interface.id }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml
new file mode 100644
index 000000000..4259d3a81
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml
@@ -0,0 +1,98 @@
+ # ============================================================
+- name: test source_dest_check defaults to true (check mode)
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ source_dest_check: true
+ state: present
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - not result_check_mode.changed
+
+- name: test source_dest_check defaults to true
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ source_dest_check: true
+ state: present
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.source_dest_check == true
+
+ # ============================================================
+- name: disable source_dest_check
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ source_dest_check: false
+ state: present
+ register: result
+
+- name: Check source_dest_check state
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+ ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+ until: _interface_0.source_dest_check == False
+ retries: 5
+ delay: 2
+
+- assert:
+ that:
+ - result.changed
+ - _interface_0.source_dest_check == False
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- name: test idempotence disabling source_dest_check (check mode)
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ source_dest_check: false
+ state: present
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - not result_check_mode.changed
+
+- name: test idempotence disabling source_dest_check
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ source_dest_check: false
+ state: present
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.source_dest_check == false
+
+ # ============================================================
+- name: enable source_dest_check
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ source_dest_check: true
+ state: present
+ register: result
+
+- name: Check source_dest_check state
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+ ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+ until: _interface_0.source_dest_check == True
+ retries: 5
+ delay: 2
+
+- assert:
+ that:
+ - result.changed
+ - _interface_0.source_dest_check == True
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml
new file mode 100644
index 000000000..d26d96b5b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml
@@ -0,0 +1,251 @@
+ # ============================================================
+- name: verify there are no tags associated with the network interface
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: present
+ tags: {}
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - not result.interface.tags
+ - result.interface.name is undefined
+
+ # ============================================================
+- name: add tags to the network interface (check mode)
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: present
+ name: "{{ resource_prefix }}"
+ tags:
+ CreatedBy: "{{ resource_prefix }}"
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - result_check_mode.changed
+
+- name: add tags to the network interface
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: present
+ name: "{{ resource_prefix }}"
+ tags:
+ CreatedBy: "{{ resource_prefix }}"
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.tags | length == 2
+ - result.interface.tags.CreatedBy == resource_prefix
+ - result.interface.tags.Name == resource_prefix
+ - result.interface.name == resource_prefix
+ - _interface_0.tags | length == 2
+ - _interface_0.tags.CreatedBy == resource_prefix
+ - _interface_0.tags.Name == resource_prefix
+ - _interface_0.tag_set | length == 2
+ - _interface_0.tag_set.CreatedBy == resource_prefix
+ - _interface_0.tag_set.Name == resource_prefix
+ - _interface_0.name == resource_prefix
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+ # ============================================================
+- name: test idempotence by using the Name tag and the subnet (check mode)
+ ec2_eni:
+ name: "{{ resource_prefix }}"
+ state: present
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - not result_check_mode.changed
+
+- name: test idempotence by using the Name tag and the subnet
+ ec2_eni:
+ name: "{{ resource_prefix }}"
+ state: present
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+
+ # ============================================================
+- name: test tags are not purged if tags are null even if name is provided (check mode)
+ ec2_eni:
+ name: "{{ resource_prefix }}"
+ state: present
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ check_mode: true
+ register: result_check_mode
+
+- assert:
+ that:
+ - not result_check_mode.changed
+
+- name: test tags are not purged if tags are null even if name is provided
+ ec2_eni:
+ name: "{{ resource_prefix }}"
+ state: present
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.tags | length == 2
+ - result.interface.tags.CreatedBy == resource_prefix
+ - result.interface.tags.Name == resource_prefix
+ - result.interface.name == resource_prefix
+ - _interface_0.tag_set | length == 2
+ - _interface_0.tag_set.CreatedBy == resource_prefix
+ - _interface_0.tag_set.Name == resource_prefix
+ - _interface_0.name == resource_prefix
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+ # ============================================================
+- name: test setting purge tags to false
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: present
+ purge_tags: false
+ tags: {}
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.tags | length == 2
+ - result.interface.tags.CreatedBy == resource_prefix
+ - result.interface.tags.Name == resource_prefix
+ - result.interface.name == resource_prefix
+ - _interface_0.tag_set | length == 2
+ - _interface_0.tag_set.CreatedBy == resource_prefix
+ - _interface_0.tag_set.Name == resource_prefix
+ - _interface_0.name == resource_prefix
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+ # ============================================================
+- name: test adding a new tag without removing any others
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: present
+ purge_tags: false
+ tags:
+ environment: test
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.tags | length == 3
+ - result.interface.tags.environment == 'test'
+ - result.interface.tags.CreatedBy == resource_prefix
+ - result.interface.tags.Name == resource_prefix
+ - result.interface.name == resource_prefix
+ - _interface_0.tag_set | length == 3
+ - _interface_0.tag_set.environment == 'test'
+ - _interface_0.tag_set.CreatedBy == resource_prefix
+ - _interface_0.tag_set.Name == resource_prefix
+ - _interface_0.name == resource_prefix
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+ # ============================================================
+- name: test purging tags and adding a new one
+ ec2_eni:
+ name: "{{ resource_prefix }}"
+ state: present
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ tags:
+ Description: "{{ resource_prefix }}"
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.tags | length == 2
+ - result.interface.tags.Description == resource_prefix
+ - result.interface.tags.Name == resource_prefix
+ - result.interface.name == resource_prefix
+ - _interface_0.tag_set | length == 2
+ - _interface_0.tag_set.Description == resource_prefix
+ - _interface_0.tag_set.Name == resource_prefix
+ - _interface_0.name == resource_prefix
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- name: test purging tags and adding a new one is idempotent
+ ec2_eni:
+ name: "{{ resource_prefix }}"
+ state: present
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ tags:
+ Description: "{{ resource_prefix }}"
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.tags | length == 2
+ - result.interface.tags.Description == resource_prefix
+ - result.interface.tags.Name == resource_prefix
+ - result.interface.name == resource_prefix
+ - _interface_0.tag_set | length == 2
+ - _interface_0.tag_set.Description == resource_prefix
+ - _interface_0.tag_set.Name == resource_prefix
+ - _interface_0.name == resource_prefix
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+ # ============================================================
+- name: test purging all tags
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: present
+ tags: {}
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - not result.interface.tags
+ - result.interface.name is undefined
+ - _interface_0.tag_set | length == 0
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/aliases
new file mode 100644
index 000000000..7497e8011
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/aliases
@@ -0,0 +1,6 @@
+time=10m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/defaults/main.yml
new file mode 100644
index 000000000..364c37f82
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_instance_block_devices
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-block-devices'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/meta/main.yml
new file mode 100644
index 000000000..320728605
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/meta/main.yml
@@ -0,0 +1,6 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: block_devices
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/tasks/main.yml
new file mode 100644
index 000000000..5e27d5ab0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/tasks/main.yml
@@ -0,0 +1,110 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: "New instance with an extra block device"
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-test-ebs-vols"
+ image_id: "{{ ec2_ami_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ volumes:
+ - device_name: /dev/sdb
+ ebs:
+ volume_size: 20
+ delete_on_termination: true
+ volume_type: standard
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: true
+ register: block_device_instances
+
+ - name: "Gather instance info"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-ebs-vols"
+ register: block_device_instances_info
+
+ - assert:
+ that:
+ - block_device_instances is not failed
+ - block_device_instances is changed
+ - block_device_instances_info.instances[0].block_device_mappings[0]
+ - block_device_instances_info.instances[0].block_device_mappings[1]
+ - block_device_instances_info.instances[0].block_device_mappings[1].device_name == '/dev/sdb'
+
+ - name: "New instance with an extra block device (check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-ebs-vols-checkmode"
+ image_id: "{{ ec2_ami_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ volumes:
+ - device_name: /dev/sdb
+ ebs:
+ volume_size: 20
+ delete_on_termination: true
+ volume_type: standard
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ check_mode: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-ebs-vols"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-ebs-vols-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm instance was created without check mode"
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+
+ - name: "Confirm instance was not created with check mode"
+ assert:
+ that:
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Terminate instances"
+ ec2_instance:
+ state: absent
+ instance_ids: "{{ block_device_instances.instance_ids }}"
+
+ - name: "New instance with an extra block device - gp3 volume_type and throughput"
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-test-ebs-vols-gp3"
+ image_id: "{{ ec2_ami_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ volumes:
+ - device_name: /dev/sdb
+ ebs:
+ volume_size: 20
+ delete_on_termination: true
+ volume_type: gp3
+ throughput: 500
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: true
+ register: block_device_instances_gp3
+
+ - assert:
+ that:
+ - block_device_instances_gp3 is not failed
+ - block_device_instances_gp3 is changed
+ - block_device_instances_gp3.spec.BlockDeviceMappings[0].DeviceName == '/dev/sdb'
+ - block_device_instances_gp3.spec.BlockDeviceMappings[0].Ebs.VolumeType == 'gp3'
+ - block_device_instances_gp3.spec.BlockDeviceMappings[0].Ebs.VolumeSize == 20
+ - block_device_instances_gp3.spec.BlockDeviceMappings[0].Ebs.Throughput == 500
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/aliases
new file mode 100644
index 000000000..7497e8011
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/aliases
@@ -0,0 +1,6 @@
+time=10m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/defaults/main.yml
new file mode 100644
index 000000000..829070a1e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_instance_checkmode_tests
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-checkmode'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/meta/main.yml
new file mode 100644
index 000000000..634b3aa6e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/meta/main.yml
@@ -0,0 +1,6 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: check_mode
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/tasks/main.yml
new file mode 100644
index 000000000..2ffa2f9df
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/tasks/main.yml
@@ -0,0 +1,208 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: "Make basic instance"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ image_id: "{{ ec2_ami_id }}"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ wait: false
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Value"
+ register: basic_instance
+
+ - name: "Make basic instance (check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-checkmode-comparison-checkmode"
+ image_id: "{{ ec2_ami_id }}"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Value"
+ check_mode: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Stop instance (check mode)"
+ ec2_instance:
+ state: stopped
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Value"
+ check_mode: yes
+
+ - name: "fact ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_checkmode_stopinstance_fact
+
+ - name: "Verify that it was not stopped."
+ assert:
+ that:
+ - confirm_checkmode_stopinstance_fact.instances[0].state.name not in ["stopped", "stopping"]
+
+ - name: "Stop instance."
+ ec2_instance:
+ state: stopped
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Value"
+ wait: true
+ register: instance_stop
+
+ - name: "fact stopped ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_stopinstance_fact
+
+ - name: "Verify that it was stopped."
+ assert:
+ that:
+ - confirm_stopinstance_fact.instances[0].state.name in ["stopped", "stopping"]
+
+ - name: "Running instance in check mode."
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Value"
+ check_mode: yes
+
+ - name: "fact ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_checkmode_runninginstance_fact
+
+ - name: "Verify that it was not running."
+ assert:
+ that:
+ - '"{{ confirm_checkmode_runninginstance_fact.instances[0].state.name }}" != "running"'
+
+ - name: "Running instance."
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Value"
+
+ - name: "fact ec2 instance."
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_runninginstance_fact
+
+ - name: "Verify that it was running."
+ assert:
+ that:
+ - '"{{ confirm_runninginstance_fact.instances[0].state.name }}" == "running"'
+
+ - name: "Tag instance."
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Other Value"
+ check_mode: yes
+
+ - name: "fact ec2 instance."
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_not_tagged
+
+ - name: "Verify that it hasn't been re-tagged."
+ assert:
+ that:
+ - '"{{ confirm_not_tagged.instances[0].tags.TestTag }}" == "Some Value"'
+
+ - name: "Terminate instance in check mode."
+ ec2_instance:
+ state: absent
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Value"
+ wait: True
+ check_mode: yes
+
+ - name: "fact ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_checkmode_terminatedinstance_fact
+
+ - name: "Verify that it was not terminated,"
+ assert:
+ that:
+ - '"{{ confirm_checkmode_terminatedinstance_fact.instances[0].state.name }}" != "terminated"'
+
+ - name: "Terminate instance."
+ ec2_instance:
+ state: absent
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ TestTag: "Some Value"
+ wait: True
+
+ - name: "fact ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_terminatedinstance_fact
+
+ - name: "Verify that it was terminated,"
+ assert:
+ that:
+ - '"{{ confirm_terminatedinstance_fact.instances[0].state.name }}" == "terminated"'
+
+ always:
+ - name: "Terminate checkmode instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/aliases
new file mode 100644
index 000000000..ca83d373d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/aliases
@@ -0,0 +1,6 @@
+time=6m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/defaults/main.yml
new file mode 100644
index 000000000..eb1859b3f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_instance_cpu_options
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-cpu-options'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/meta/main.yml
new file mode 100644
index 000000000..2d7d140d4
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/meta/main.yml
@@ -0,0 +1,6 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: cpu_options
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/tasks/main.yml
new file mode 100644
index 000000000..a0bdd4106
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/tasks/main.yml
@@ -0,0 +1,85 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: "create t3.nano instance with cpu_options"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ instance_type: t3.nano
+ cpu_options:
+ core_count: 1
+ threads_per_core: 1
+ wait: true
+ register: instance_creation
+
+ - name: "instance with cpu_options created with the right options"
+ assert:
+ that:
+ - instance_creation is success
+ - instance_creation is changed
+
+ - name: "modify cpu_options on existing instance (warning displayed)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ instance_type: t3.nano
+ cpu_options:
+ core_count: 1
+ threads_per_core: 2
+ wait: true
+ register: cpu_options_update
+ ignore_errors: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-t3nano-1-threads-per-core"
+ register: presented_instance_fact
+
+ - name: "modify cpu_options has no effect on existing instance"
+ assert:
+ that:
+ - cpu_options_update is success
+ - cpu_options_update is not changed
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "'{{ presented_instance_fact.instances.0.state.name }}' in ['running','pending']"
+ - "{{ presented_instance_fact.instances.0.cpu_options.core_count }} == 1"
+ - "{{ presented_instance_fact.instances.0.cpu_options.threads_per_core }} == 1"
+
+ - name: "create t3.nano instance with cpu_options(check mode)"
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core-checkmode"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ instance_type: t3.nano
+ cpu_options:
+ core_count: 1
+ threads_per_core: 1
+ wait: true
+ check_mode: yes
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-t3nano-1-threads-per-core-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm existence of instance id."
+ assert:
+ that:
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/aliases
new file mode 100644
index 000000000..7497e8011
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/aliases
@@ -0,0 +1,6 @@
+time=10m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/defaults/main.yml
new file mode 100644
index 000000000..b233d4547
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_instance_default_vpc
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-default-vpc'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/meta/main.yml
new file mode 100644
index 000000000..7622736b4
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/meta/main.yml
@@ -0,0 +1,6 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: default_vpc
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/tasks/main.yml
new file mode 100644
index 000000000..3abcf0f8a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/tasks/main.yml
@@ -0,0 +1,63 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: "Make instance in a default subnet of the VPC"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-default-vpc"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_group: "default"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: false
+ register: in_default_vpc
+
+ - name: "Make instance in a default subnet of the VPC(check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-default-vpc-checkmode"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_group: "default"
+ instance_type: "{{ ec2_instance_type }}"
+ check_mode: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-default-vpc"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-default-vpc-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Terminate instances"
+ ec2_instance:
+ state: absent
+ instance_ids: "{{ in_default_vpc.instance_ids }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+
+ always:
+ - name: "Terminate vpc_tests instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/aliases
new file mode 100644
index 000000000..ca83d373d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/aliases
@@ -0,0 +1,6 @@
+time=6m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/defaults/main.yml
new file mode 100644
index 000000000..feec2e7c1
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_instance_ebs_optimized
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-ebs-optimized'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/meta/main.yml
new file mode 100644
index 000000000..9ee97b6f4
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/meta/main.yml
@@ -0,0 +1,6 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: ebs_optimized
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/tasks/main.yml
new file mode 100644
index 000000000..d01ee77ee
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/tasks/main.yml
@@ -0,0 +1,31 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: "Make EBS optimized instance in the testing subnet of the test VPC"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-ebs-optimized-instance-in-vpc"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ ebs_optimized: true
+ instance_type: t3.nano
+ wait: false
+ register: ebs_opt_in_vpc
+
+ - name: "Get ec2 instance info"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-ebs-optimized-instance-in-vpc"
+ register: ebs_opt_instance_info
+
+ - name: "Assert instance is ebs_optimized"
+ assert:
+ that:
+ - "{{ ebs_opt_instance_info.instances.0.ebs_optimized }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/aliases
new file mode 100644
index 000000000..ca83d373d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/aliases
@@ -0,0 +1,6 @@
+time=6m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/defaults/main.yml
new file mode 100644
index 000000000..7dca186d8
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_instance_external_resource_attach
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-external-attach'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/meta/main.yml
new file mode 100644
index 000000000..f30ad80c4
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/meta/main.yml
@@ -0,0 +1,6 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: external_resources
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml
new file mode 100644
index 000000000..7aa2c1960
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml
@@ -0,0 +1,161 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ # Make custom ENIs and attach via the `network` parameter
+ - ec2_eni:
+ state: present
+ delete_on_termination: true
+ subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ security_groups:
+ - "{{ sg.group_id }}"
+ register: eni_a
+
+ - ec2_eni:
+ state: present
+ delete_on_termination: true
+ subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ security_groups:
+ - "{{ sg.group_id }}"
+ register: eni_b
+
+ - ec2_eni:
+ state: present
+ delete_on_termination: true
+ subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ security_groups:
+ - "{{ sg.group_id }}"
+ register: eni_c
+
+ - ec2_key:
+ name: "{{ resource_prefix }}_test_key"
+
+ - name: "Make instance in the testing subnet created in the test VPC"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-eni-vpc"
+ key_name: "{{ resource_prefix }}_test_key"
+ network:
+ interfaces:
+ - id: "{{ eni_a.interface.id }}"
+ image_id: "{{ ec2_ami_id }}"
+ availability_zone: '{{ subnet_b_az }}'
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: false
+ register: in_test_vpc
+
+ - name: "Gather {{ resource_prefix }}-test-eni-vpc info"
+ ec2_instance_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}-test-eni-vpc'
+ register: in_test_vpc_instance
+
+ - assert:
+ that:
+ - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"'
+ - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 1'
+
+ - name: "Add a second interface (check_mode=true)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-eni-vpc"
+ network:
+ interfaces:
+ - id: "{{ eni_a.interface.id }}"
+ - id: "{{ eni_b.interface.id }}"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: false
+ register: add_interface_check_mode
+ check_mode: true
+
+ - name: Validate task reported changed
+ assert:
+ that:
+ - add_interface_check_mode is changed
+
+ - name: "Gather {{ resource_prefix }}-test-eni-vpc info"
+ ec2_instance_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}-test-eni-vpc'
+ register: in_test_vpc_instance
+
+ - name: Validate that only 1 ENI is attached to instance as we run using check_mode=true
+ assert:
+ that:
+ - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"'
+ - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 1'
+
+ - name: "Add a second interface"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-eni-vpc"
+ network:
+ interfaces:
+ - id: "{{ eni_a.interface.id }}"
+ - id: "{{ eni_b.interface.id }}"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: false
+ register: add_interface
+ until: add_interface is not failed
+ ignore_errors: true
+ retries: 10
+
+ - name: Validate that the instance has now 2 interfaces attached
+ block:
+ - name: "Gather {{ resource_prefix }}-test-eni-vpc info"
+ ec2_instance_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}-test-eni-vpc'
+ register: in_test_vpc_instance
+
+ - name: Validate that only 1 ENI is attached to instance as we run using check_mode=true
+ assert:
+ that:
+ - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"'
+ - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 2'
+
+ when: add_interface is successful
+
+ - name: "Make instance in the testing subnet created in the test VPC(check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-eni-vpc-checkmode"
+ key_name: "{{ resource_prefix }}_test_key"
+ network:
+ interfaces:
+ - id: "{{ eni_c.interface.id }}"
+ image_id: "{{ ec2_ami_id }}"
+ availability_zone: '{{ subnet_b_az }}'
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ check_mode: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-eni-vpc"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-eni-vpc-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm existence of instance id."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/aliases
new file mode 100644
index 000000000..ca83d373d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/aliases
@@ -0,0 +1,6 @@
+time=6m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/defaults/main.yml
new file mode 100644
index 000000000..28e57b948
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_instance_hibernation_options
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-hibernation-options'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/meta/main.yml
new file mode 100644
index 000000000..80a82ca0b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/meta/main.yml
@@ -0,0 +1,9 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: hibernation_options
+- role: setup_botocore_pip
+ vars:
+ boto3_version: "1.20.30"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/tasks/main.yml
new file mode 100644
index 000000000..e6aace728
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/tasks/main.yml
@@ -0,0 +1,145 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: Create instance with hibernation option (check mode)
+ ec2_instance:
+ name: "{{ resource_prefix }}-hibernation-options"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ hibernation_options: true
+ instance_type: "{{ ec2_instance_type }}"
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ delete_on_termination: true
+ encrypted: true
+ state: running
+ wait: yes
+ check_mode: yes
+ register: create_instance_check_mode_results
+
+ - name: Check the returned value for the earlier task
+ assert:
+ that:
+ - create_instance_check_mode_results is changed
+ - create_instance_check_mode_results.spec.HibernationOptions.Configured == True
+
+ - name: Create instance with hibernation config
+ ec2_instance:
+ name: "{{ resource_prefix }}-hibernation-options"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ hibernation_options: true
+ instance_type: "{{ ec2_instance_type }}"
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ delete_on_termination: true
+ encrypted: true
+ state: running
+ wait: yes
+ register: create_instance_results
+
+ - set_fact:
+ instance_id: '{{ create_instance_results.instances[0].instance_id }}'
+
+ - name: Check return values of the create instance task
+ assert:
+ that:
+ - "{{ create_instance_results.instances | length }} > 0"
+ - "'{{ create_instance_results.instances.0.state.name }}' == 'running'"
+ - "'{{ create_instance_results.spec.HibernationOptions.Configured }}'"
+
+ - name: Gather information about the instance to get the hibernation status
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-hibernation-options"
+ register: instance_hibernation_status
+
+ - name: Assert hibernation options is true
+ assert:
+ that:
+ - instance_hibernation_status.instances[0].hibernation_options.configured == true
+
+ - name: Create instance with hibernation option (check mode) (idempotent)
+ ec2_instance:
+ name: "{{ resource_prefix }}-hibernation-options"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ hibernation_options: true
+ instance_type: "{{ ec2_instance_type }}"
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ delete_on_termination: true
+ encrypted: true
+ state: running
+ wait: yes
+ check_mode: yes
+ register: create_instance_check_mode_results
+
+ - name: Check the returned value for the earlier task
+ assert:
+ that:
+ - create_instance_check_mode_results is not changed
+
+ - name: Create instance with hibernation options configured (idempotent)
+ ec2_instance:
+ name: "{{ resource_prefix }}-hibernation-options"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ hibernation_options: true
+ instance_type: "{{ ec2_instance_type }}"
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ delete_on_termination: true
+ encrypted: true
+ state: running
+ wait: yes
+ register: create_instance_results
+
+ - name: Check return values of the create instance task
+ assert:
+ that:
+ - "{{ not create_instance_results.changed }}"
+ - "{{ create_instance_results.instances | length }} > 0"
+
+ - name: Create instance with hibernation options configured with unencrypted volume
+ ec2_instance:
+ name: "{{ resource_prefix }}-hibernation-options-error"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ hibernation_options: true
+ instance_type: "{{ ec2_instance_type }}"
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ delete_on_termination: true
+ register: create_instance_results
+ failed_when: "'Hibernation prerequisites not satisfied' not in create_instance_results.msg"
+
+ - name: Terminate the instance
+ ec2_instance:
+ filters:
+ tag:TestId: "{{ resource_prefix }}"
+ state: absent \ No newline at end of file
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/aliases
new file mode 100644
index 000000000..7497e8011
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/aliases
@@ -0,0 +1,6 @@
+time=10m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/defaults/main.yml
new file mode 100644
index 000000000..2dc4d467b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# defaults file for ec2_instance_iam_instance_profile
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-profile'
+
+first_iam_role: "ansible-test-{{ tiny_prefix }}-instance_role"
+second_iam_role: "ansible-test-{{ tiny_prefix }}-instance_role-2"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/files/assume-role-policy.json b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/files/assume-role-policy.json
new file mode 100644
index 000000000..72413abdd
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/files/assume-role-policy.json
@@ -0,0 +1,13 @@
+{
+ "Version": "2008-10-17",
+ "Statement": [
+ {
+ "Sid": "",
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "ec2.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/meta/main.yml
new file mode 100644
index 000000000..1e3a6043a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/meta/main.yml
@@ -0,0 +1,6 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: instance_role
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml
new file mode 100644
index 000000000..2f28ae3b8
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml
@@ -0,0 +1,131 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: "Create IAM role for test"
+ iam_role:
+ state: present
+ name: '{{ first_iam_role }}'
+ assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
+ create_instance_profile: yes
+ managed_policy:
+ - AmazonEC2ContainerServiceRole
+ register: iam_role
+
+ - name: "Create second IAM role for test"
+ iam_role:
+ state: present
+ name: '{{ second_iam_role }}'
+ assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
+ create_instance_profile: yes
+ managed_policy:
+ - AmazonEC2ContainerServiceRole
+ register: iam_role_2
+
+ - name: "wait 10 seconds for roles to become available"
+ wait_for:
+ timeout: 10
+ delegate_to: localhost
+
+ - name: "Make instance with an instance_role"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-instance-role"
+ image_id: "{{ ec2_ami_id }}"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ instance_role: "{{ first_iam_role }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ register: instance_with_role
+
+ - assert:
+ that:
+ - 'instance_with_role.instances[0].iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")'
+
+ - name: "Make instance with an instance_role(check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-instance-role-checkmode"
+ image_id: "{{ ec2_ami_id }}"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ instance_role: "{{ iam_role.arn.replace(':role/', ':instance-profile/') }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ check_mode: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-instance-role"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-instance-role-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Update instance with new instance_role"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-instance-role"
+ image_id: "{{ ec2_ami_id }}"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ instance_role: "{{ iam_role_2.arn.replace(':role/', ':instance-profile/') }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ register: instance_with_updated_role
+
+ - name: "wait 10 seconds for role update to complete"
+ wait_for:
+ timeout: 10
+ delegate_to: localhost
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-instance-role"
+ register: updates_instance_info
+
+ - assert:
+ that:
+ - 'updates_instance_info.instances[0].iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")'
+ - 'updates_instance_info.instances[0].instance_id == instance_with_role.instances[0].instance_id'
+
+ always:
+ # We need to delete the instances before we can delete the roles
+ - name: "Terminate iam_instance_role instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
+
+ - name: "Delete IAM role for test"
+ iam_role:
+ state: absent
+ name: "{{ item }}"
+ delete_instance_profile: true
+ loop:
+ - '{{ first_iam_role }}'
+ - '{{ second_iam_role }}'
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/aliases
new file mode 100644
index 000000000..ca83d373d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/aliases
@@ -0,0 +1,6 @@
+time=6m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/defaults/main.yml
new file mode 100644
index 000000000..d5a60251e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_instance_minimal
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-minimal'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/meta/main.yml
new file mode 100644
index 000000000..7fa5de555
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/meta/main.yml
@@ -0,0 +1,6 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: minimal
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/tasks/main.yml
new file mode 100644
index 000000000..8dcfca437
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/tasks/main.yml
@@ -0,0 +1,699 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: "Create a new instance (check_mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic"
+ instance_type: "{{ ec2_instance_type }}"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_instance
+ check_mode: true
+
+ - assert:
+ that:
+ - create_instance is not failed
+ - create_instance is changed
+ - '"instance_ids" not in create_instance'
+ - '"ec2:RunInstances" not in create_instance.resource_actions'
+
+ - name: "Create a new instance"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic"
+ instance_type: "{{ ec2_instance_type }}"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_instance
+
+ - assert:
+ that:
+ - create_instance is not failed
+ - create_instance is changed
+ - '"ec2:RunInstances" in create_instance.resource_actions'
+ - '"instance_ids" in create_instance'
+ - create_instance.instance_ids | length == 1
+ - create_instance.instance_ids[0].startswith("i-")
+
+ - name: "Save instance ID"
+ set_fact:
+ create_instance_id_1: "{{ create_instance.instance_ids[0] }}"
+
+ - name: "Create a new instance - idempotency (check_mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic"
+ instance_type: "{{ ec2_instance_type }}"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_instance
+ check_mode: true
+
+ - assert:
+ that:
+ - create_instance is not failed
+ - create_instance is not changed
+ - '"ec2:RunInstances" not in create_instance.resource_actions'
+ - '"instance_ids" in create_instance'
+ - create_instance.instance_ids | length == 1
+ - create_instance.instance_ids[0] == create_instance_id_1
+
+ - name: "Create a new instance - idempotency"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic"
+ instance_type: "{{ ec2_instance_type }}"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_instance
+
+ - assert:
+ that:
+ - create_instance is not failed
+ - create_instance is not changed
+ - '"ec2:RunInstances" not in create_instance.resource_actions'
+ - '"instance_ids" in create_instance'
+ - create_instance.instance_ids | length == 1
+ - create_instance.instance_ids[0] == create_instance_id_1
+
+################################################################
+
+ - name: "Create a new instance with a different name (check_mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-2"
+ instance_type: "{{ ec2_instance_type }}"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_instance_2
+ check_mode: true
+
+ - assert:
+ that:
+ - create_instance_2 is not failed
+ - create_instance_2 is changed
+ - '"instance_ids" not in create_instance_2'
+ - '"ec2:RunInstances" not in create_instance_2.resource_actions'
+
+ - name: "Create a new instance with a different name"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-2"
+ instance_type: "{{ ec2_instance_type }}"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_instance_2
+
+ - assert:
+ that:
+ - create_instance_2 is not failed
+ - create_instance_2 is changed
+ - '"ec2:RunInstances" in create_instance_2.resource_actions'
+ - '"instance_ids" in create_instance_2'
+ - create_instance_2.instance_ids | length == 1
+ - create_instance_2.instance_ids[0].startswith("i-")
+ - create_instance_2.instance_ids[0] != create_instance_id_1
+
+ - name: "Save instance ID"
+ set_fact:
+ create_instance_id_2: "{{ create_instance_2.instance_ids[0] }}"
+
+ - name: "Create a new instance with a different name - idempotency (check_mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-2"
+ instance_type: "{{ ec2_instance_type }}"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_instance_2
+ check_mode: true
+
+ - assert:
+ that:
+ - create_instance_2 is not failed
+ - create_instance_2 is not changed
+ - '"ec2:RunInstances" not in create_instance_2.resource_actions'
+ - '"instance_ids" in create_instance_2'
+ - create_instance_2.instance_ids | length == 1
+ - create_instance_2.instance_ids[0] == create_instance_id_2
+
+ - name: "Create a new instance with a different name - idempotency"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-2"
+ instance_type: "{{ ec2_instance_type }}"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_instance_2
+
+ - assert:
+ that:
+ - create_instance_2 is not failed
+ - create_instance_2 is not changed
+ - '"ec2:RunInstances" not in create_instance_2.resource_actions'
+ - '"instance_ids" in create_instance_2'
+ - create_instance_2.instance_ids | length == 1
+ - create_instance_2.instance_ids[0] == create_instance_id_2
+
+################################################################
+
+ - name: "Create a new instance with a different name in tags (check_mode)"
+ ec2_instance:
+ state: present
+ instance_type: "{{ ec2_instance_type }}"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ Name: "{{ resource_prefix }}-test-basic-tag"
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_instance_tag
+ check_mode: true
+
+ - assert:
+ that:
+ - create_instance_tag is not failed
+ - create_instance_tag is changed
+ - '"instance_ids" not in create_instance_tag'
+ - '"ec2:RunInstances" not in create_instance_tag.resource_actions'
+
+ - name: "Create a new instance with a different name in tags"
+ ec2_instance:
+ state: present
+ instance_type: "{{ ec2_instance_type }}"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ Name: "{{ resource_prefix }}-test-basic-tag"
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_instance_tag
+
+ - assert:
+ that:
+ - create_instance_tag is not failed
+ - create_instance_tag is changed
+ - '"ec2:RunInstances" in create_instance_tag.resource_actions'
+ - '"instance_ids" in create_instance_tag'
+ - create_instance_tag.instance_ids | length == 1
+ - create_instance_tag.instance_ids[0].startswith("i-")
+ - create_instance_tag.instance_ids[0] != create_instance_id_1
+ - create_instance_tag.instance_ids[0] != create_instance_id_2
+
+ - name: "Save instance ID"
+ set_fact:
+ create_instance_id_tag: "{{ create_instance_tag.instance_ids[0] }}"
+
+ - name: "Create a new instance with a different name in tags - idempotency (check_mode)"
+ ec2_instance:
+ state: present
+ instance_type: "{{ ec2_instance_type }}"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ Name: "{{ resource_prefix }}-test-basic-tag"
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_instance_tag
+ check_mode: true
+
+ - assert:
+ that:
+ - create_instance_tag is not failed
+ - create_instance_tag is not changed
+ - '"ec2:RunInstances" not in create_instance_tag.resource_actions'
+ - '"instance_ids" in create_instance_tag'
+ - create_instance_tag.instance_ids | length == 1
+ - create_instance_tag.instance_ids[0] == create_instance_id_tag
+
+ - name: "Create a new instance with a different name in tags - idempotency"
+ ec2_instance:
+ state: present
+ instance_type: "{{ ec2_instance_type }}"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ Name: "{{ resource_prefix }}-test-basic-tag"
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_instance_tag
+
+ - assert:
+ that:
+ - create_instance_tag is not failed
+ - create_instance_tag is not changed
+ - '"ec2:RunInstances" not in create_instance_tag.resource_actions'
+ - '"instance_ids" in create_instance_tag'
+ - create_instance_tag.instance_ids | length == 1
+ - create_instance_tag.instance_ids[0] == create_instance_id_tag
+
+###############################################################
+
+ - name: "Create a new instance in AZ {{ aws_region }}a"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-{{ aws_region }}a"
+ instance_type: "{{ ec2_instance_type }}"
+ image_id: "{{ ec2_ami_id }}"
+ region: "{{ aws_region }}"
+ availability_zone: "{{ aws_region }}a"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_instance
+
+ - name: "Save instance ID"
+ set_fact:
+ create_instance_id_3: "{{ create_instance.instance_ids[0] }}"
+
+ - name: Get instance info
+ ec2_instance_info:
+ instance_ids:
+ - "{{ create_instance_id_3 }}"
+ register: info_result
+
+ - assert:
+ that:
+ - create_instance is not failed
+ - create_instance is changed
+ - '"ec2:RunInstances" in create_instance.resource_actions'
+ - '"instance_ids" in create_instance'
+ - create_instance.instance_ids | length == 1
+ - create_instance.instance_ids[0].startswith("i-")
+ - info_result.instances[0].placement.availability_zone == '{{ aws_region }}a'
+
+ - name: "Create a new instance in AZ {{ aws_region }}b"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-{{ aws_region }}b"
+ instance_type: "{{ ec2_instance_type }}"
+ image_id: "{{ ec2_ami_id }}"
+ region: "{{ aws_region }}"
+ availability_zone: "{{ aws_region }}b"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_instance
+
+ - name: "Save instance ID"
+ set_fact:
+ create_instance_id_4: "{{ create_instance.instance_ids[0] }}"
+
+ - name: Get instance info
+ ec2_instance_info:
+ instance_ids:
+ - "{{ create_instance_id_4 }}"
+ register: info_result
+
+ - assert:
+ that:
+ - create_instance is not failed
+ - create_instance is changed
+ - '"ec2:RunInstances" in create_instance.resource_actions'
+ - '"instance_ids" in create_instance'
+ - create_instance.instance_ids | length == 1
+ - create_instance.instance_ids[0].startswith("i-")
+ - info_result.instances[0].placement.availability_zone == '{{ aws_region }}b'
+
+################################################################
+
+ - name: "Terminate instance based on name parameter (check_mode)"
+ ec2_instance:
+ state: absent
+ name: "{{ resource_prefix }}-test-basic"
+ wait: true
+ register: terminate_name
+ check_mode: true
+
+ - assert:
+ that:
+ - terminate_name is not failed
+ - terminate_name is changed
+ - '"ec2:TerminateInstances" not in terminate_name.resource_actions'
+ - '"terminate_failed" in terminate_name'
+ - '"terminate_success" in terminate_name'
+ - terminate_name.terminate_failed | length == 0
+ - terminate_name.terminate_success | length == 1
+ - terminate_name.terminate_success[0] == create_instance_id_1
+
+ - name: "Terminate instance based on name parameter"
+ ec2_instance:
+ state: absent
+ name: "{{ resource_prefix }}-test-basic"
+ wait: true
+ register: terminate_name
+
+ - assert:
+ that:
+ - terminate_name is not failed
+ - terminate_name is changed
+ - '"ec2:TerminateInstances" in terminate_name.resource_actions'
+ - '"terminate_failed" in terminate_name'
+ - '"terminate_success" in terminate_name'
+ - terminate_name.terminate_failed | length == 0
+ - terminate_name.terminate_success | length == 1
+ - terminate_name.terminate_success[0] == create_instance_id_1
+
+ - name: "Terminate instance based on name parameter - idempotency (check_mode)"
+ ec2_instance:
+ state: absent
+ name: "{{ resource_prefix }}-test-basic"
+ wait: true
+ register: terminate_name
+ check_mode: true
+
+ - assert:
+ that:
+ - terminate_name is not failed
+ - terminate_name is not changed
+ - '"ec2:TerminateInstances" not in terminate_name.resource_actions'
+ - '"terminate_failed" not in terminate_name'
+ - '"terminate_success" not in terminate_name'
+
+ - name: "Terminate instance based on name parameter - idempotency"
+ ec2_instance:
+ state: absent
+ name: "{{ resource_prefix }}-test-basic"
+ wait: true
+ register: terminate_name
+
+ - assert:
+ that:
+ - terminate_name is not failed
+ - terminate_name is not changed
+ - '"ec2:TerminateInstances" not in terminate_name.resource_actions'
+ - '"terminate_failed" not in terminate_name'
+ - '"terminate_success" not in terminate_name'
+
+################################################################
+
+ - name: "Terminate instance based on name tag (check_mode)"
+ ec2_instance:
+ state: absent
+ tags:
+ Name: "{{ resource_prefix }}-test-basic-tag"
+ wait: true
+ register: terminate_tag
+ check_mode: true
+
+ - assert:
+ that:
+ - terminate_tag is not failed
+ - terminate_tag is changed
+ - '"ec2:TerminateInstances" not in terminate_tag.resource_actions'
+ - '"terminate_failed" in terminate_tag'
+ - '"terminate_success" in terminate_tag'
+ - terminate_tag.terminate_failed | length == 0
+ - terminate_tag.terminate_success | length == 1
+ - terminate_tag.terminate_success[0] == create_instance_id_tag
+
+ - name: "Terminate instance based on name tag"
+ ec2_instance:
+ state: absent
+ tags:
+ Name: "{{ resource_prefix }}-test-basic-tag"
+ wait: true
+ register: terminate_tag
+
+ - assert:
+ that:
+ - terminate_tag is not failed
+ - terminate_tag is changed
+ - '"ec2:TerminateInstances" in terminate_tag.resource_actions'
+ - '"terminate_failed" in terminate_tag'
+ - '"terminate_success" in terminate_tag'
+ - terminate_tag.terminate_failed | length == 0
+ - terminate_tag.terminate_success | length == 1
+ - terminate_tag.terminate_success[0] == create_instance_id_tag
+
+ - name: "Terminate instance based on name tag - idempotency (check_mode)"
+ ec2_instance:
+ state: absent
+ tags:
+ Name: "{{ resource_prefix }}-test-basic-tag"
+ wait: true
+ register: terminate_tag
+ check_mode: true
+
+ - assert:
+ that:
+ - terminate_tag is not failed
+ - terminate_tag is not changed
+ - '"ec2:TerminateInstances" not in terminate_tag.resource_actions'
+ - '"terminate_failed" not in terminate_tag'
+ - '"terminate_success" not in terminate_tag'
+
+ - name: "Terminate instance based on name tag - idempotency"
+ ec2_instance:
+ state: absent
+ tags:
+ Name: "{{ resource_prefix }}-test-basic-tag"
+ wait: true
+ register: terminate_tag
+
+ - assert:
+ that:
+ - terminate_tag is not failed
+ - terminate_tag is not changed
+ - '"ec2:TerminateInstances" not in terminate_tag.resource_actions'
+ - '"terminate_failed" not in terminate_tag'
+ - '"terminate_success" not in terminate_tag'
+
+################################################################
+
+ - name: "Terminate instance based on id (check_mode)"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ create_instance_id_2 }}"
+ wait: true
+ register: terminate_id
+ check_mode: true
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is changed
+ - '"ec2:TerminateInstances" not in terminate_id.resource_actions'
+ - '"terminate_failed" in terminate_id'
+ - '"terminate_success" in terminate_id'
+ - terminate_id.terminate_failed | length == 0
+ - terminate_id.terminate_success | length == 1
+ - terminate_id.terminate_success[0] == create_instance_id_2
+
+ - name: "Terminate instance based on id"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ create_instance_id_2 }}"
+ wait: true
+ register: terminate_id
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is changed
+ - '"ec2:TerminateInstances" in terminate_id.resource_actions'
+ - '"terminate_failed" in terminate_id'
+ - '"terminate_success" in terminate_id'
+ - terminate_id.terminate_failed | length == 0
+ - terminate_id.terminate_success | length == 1
+ - terminate_id.terminate_success[0] == create_instance_id_2
+
+ - name: "Terminate instance based on id - idempotency (check_mode)"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ create_instance_id_2 }}"
+ wait: true
+ register: terminate_id
+ check_mode: true
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is not changed
+ - '"ec2:TerminateInstances" not in terminate_id.resource_actions'
+ - '"terminate_failed" not in terminate_id'
+ - '"terminate_success" not in terminate_id'
+
+ - name: "Terminate instance based on id - idempotency"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ create_instance_id_2 }}"
+ wait: true
+ register: terminate_id
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is not changed
+ - '"ec2:TerminateInstances" not in terminate_id.resource_actions'
+ - '"terminate_failed" not in terminate_id'
+ - '"terminate_success" not in terminate_id'
+
+################################################################
+
+ - name: "Terminate instance based on id (check_mode)"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ create_instance_id_3 }}"
+ wait: true
+ register: terminate_id
+ check_mode: true
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is changed
+ - '"ec2:TerminateInstances" not in terminate_id.resource_actions'
+ - '"terminate_failed" in terminate_id'
+ - '"terminate_success" in terminate_id'
+ - terminate_id.terminate_failed | length == 0
+ - terminate_id.terminate_success | length == 1
+ - terminate_id.terminate_success[0] == create_instance_id_3
+
+ - name: "Terminate instance based on id"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ create_instance_id_3 }}"
+ wait: true
+ register: terminate_id
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is changed
+ - '"ec2:TerminateInstances" in terminate_id.resource_actions'
+ - '"terminate_failed" in terminate_id'
+ - '"terminate_success" in terminate_id'
+ - terminate_id.terminate_failed | length == 0
+ - terminate_id.terminate_success | length == 1
+ - terminate_id.terminate_success[0] == create_instance_id_3
+
+ - name: "Terminate instance based on id - idempotency (check_mode)"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ create_instance_id_3 }}"
+ wait: true
+ register: terminate_id
+ check_mode: true
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is not changed
+ - '"ec2:TerminateInstances" not in terminate_id.resource_actions'
+ - '"terminate_failed" not in terminate_id'
+ - '"terminate_success" not in terminate_id'
+
+ - name: "Terminate instance based on id - idempotency"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ create_instance_id_3 }}"
+ wait: true
+ register: terminate_id
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is not changed
+ - '"ec2:TerminateInstances" not in terminate_id.resource_actions'
+ - '"terminate_failed" not in terminate_id'
+ - '"terminate_success" not in terminate_id'
+
+################################################################
+
+ - name: "Terminate instance based on id (check_mode)"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ create_instance_id_4 }}"
+ wait: true
+ register: terminate_id
+ check_mode: true
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is changed
+ - '"ec2:TerminateInstances" not in terminate_id.resource_actions'
+ - '"terminate_failed" in terminate_id'
+ - '"terminate_success" in terminate_id'
+ - terminate_id.terminate_failed | length == 0
+ - terminate_id.terminate_success | length == 1
+ - terminate_id.terminate_success[0] == create_instance_id_4
+
+ - name: "Terminate instance based on id"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ create_instance_id_4 }}"
+ wait: true
+ register: terminate_id
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is changed
+ - '"ec2:TerminateInstances" in terminate_id.resource_actions'
+ - '"terminate_failed" in terminate_id'
+ - '"terminate_success" in terminate_id'
+ - terminate_id.terminate_failed | length == 0
+ - terminate_id.terminate_success | length == 1
+ - terminate_id.terminate_success[0] == create_instance_id_4
+
+ - name: "Terminate instance based on id - idempotency (check_mode)"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ create_instance_id_4 }}"
+ wait: true
+ register: terminate_id
+ check_mode: true
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is not changed
+ - '"ec2:TerminateInstances" not in terminate_id.resource_actions'
+ - '"terminate_failed" not in terminate_id'
+ - '"terminate_success" not in terminate_id'
+
+ - name: "Terminate instance based on id - idempotency"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ create_instance_id_4 }}"
+ wait: true
+ register: terminate_id
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is not changed
+ - '"ec2:TerminateInstances" not in terminate_id.resource_actions'
+ - '"terminate_failed" not in terminate_id'
+ - '"terminate_success" not in terminate_id'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/aliases
new file mode 100644
index 000000000..b81074d57
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/aliases
@@ -0,0 +1,6 @@
+time=30m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/defaults/main.yml
new file mode 100644
index 000000000..065610b00
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_instance_multiple
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-multiple'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/meta/main.yml
new file mode 100644
index 000000000..c3ba887f7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/meta/main.yml
@@ -0,0 +1,6 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: multiple_instances
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/tasks/main.yml
new file mode 100644
index 000000000..911e4c170
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/tasks/main.yml
@@ -0,0 +1,443 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+################################################################
+
+ - name: "Create multiple instance (check_mode)"
+ ec2_instance:
+ instance_type: "{{ ec2_instance_type }}"
+ count: 5
+ region: "{{ aws_region }}"
+ image_id: "{{ ec2_ami_id }}"
+ state: present
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ register: create_multiple_instances
+ check_mode: true
+
+ - assert:
+ that:
+ - create_multiple_instances is not failed
+ - create_multiple_instances is changed
+ - '"instance_ids" not in create_multiple_instances'
+ - '"ec2:RunInstances" not in create_multiple_instances.resource_actions'
+
+ - name: "Create multiple instances"
+ ec2_instance:
+ instance_type: "{{ ec2_instance_type }}"
+ count: 5
+ region: "{{ aws_region }}"
+ image_id: "{{ ec2_ami_id }}"
+ state: present
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_multiple_instances
+
+ - assert:
+ that:
+ - create_multiple_instances is not failed
+ - create_multiple_instances is changed
+ - '"ec2:RunInstances" in create_multiple_instances.resource_actions'
+ - '"instance_ids" in create_multiple_instances'
+ - create_multiple_instances.instance_ids | length == 5
+
+ - name: "Save instance IDs"
+ set_fact:
+ created_instance_ids: "{{ create_multiple_instances.instance_ids }}"
+
+# Terminate instances created in count test
+
+ - name: "Terminate instance based on id (check_mode)"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ item }}"
+ register: terminate_id
+ check_mode: true
+ with_items: "{{ created_instance_ids }}"
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is changed
+
+ - name: "Terminate instance based on id"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ item }}"
+ wait: true
+ register: terminate_id
+ with_items: "{{ created_instance_ids }}"
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is changed
+
+ - name: "Terminate instance based on id - Idempotency (check_mode)"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ item }}"
+ register: terminate_id
+ check_mode: true
+ with_items: "{{ created_instance_ids }}"
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is not changed
+
+ - name: "Terminate instance based on id - Idempotency"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ item }}"
+ register: terminate_id
+ with_items: "{{ created_instance_ids }}"
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is not changed
+
+################################################################
+
+ - name: "Enforce instance count - launch 5 instances (check_mode)"
+ ec2_instance:
+ instance_type: "{{ ec2_instance_type }}"
+ exact_count: 5
+ region: "{{ aws_region }}"
+ name: "{{ resource_prefix }}-test-enf_cnt"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ register: create_multiple_instances
+ check_mode: true
+
+ - assert:
+ that:
+ - create_multiple_instances is not failed
+ - create_multiple_instances is changed
+ - '"instance_ids" not in create_multiple_instances'
+ - '"ec2:RunInstances" not in create_multiple_instances.resource_actions'
+
+ - name: "Enforce instance count - launch 5 instances"
+ ec2_instance:
+ instance_type: "{{ ec2_instance_type }}"
+ exact_count: 5
+ region: "{{ aws_region }}"
+ image_id: "{{ ec2_ami_id }}"
+ name: "{{ resource_prefix }}-test-enf_cnt"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_multiple_instances
+
+ - assert:
+ that:
+ - create_multiple_instances is not failed
+ - create_multiple_instances is changed
+ - '"ec2:RunInstances" in create_multiple_instances.resource_actions'
+ - '"instance_ids" in create_multiple_instances'
+ - create_multiple_instances.instance_ids | length == 5
+
+ - name: "Enforce instance count - launch 5 instances (check_mode - Idempotency)"
+ ec2_instance:
+ instance_type: "{{ ec2_instance_type }}"
+ exact_count: 5
+ region: "{{ aws_region }}"
+ image_id: "{{ ec2_ami_id }}"
+ name: "{{ resource_prefix }}-test-enf_cnt"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ register: create_multiple_instances
+ check_mode: true
+
+ - assert:
+ that:
+ - create_multiple_instances is not failed
+ - create_multiple_instances is not changed
+ - '"instance_ids" in create_multiple_instances'
+ - create_multiple_instances.instance_ids | length == 5
+ - '"ec2:RunInstances" not in create_multiple_instances.resource_actions'
+
+ - name: "Enforce instance count - launch 5 instances (Idempotency)"
+ ec2_instance:
+ instance_type: "{{ ec2_instance_type }}"
+ exact_count: 5
+ region: "{{ aws_region }}"
+ image_id: "{{ ec2_ami_id }}"
+ name: "{{ resource_prefix }}-test-enf_cnt"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_multiple_instances
+
+ - assert:
+ that:
+ - create_multiple_instances is not failed
+ - create_multiple_instances is not changed
+ - '"instance_ids" in create_multiple_instances'
+ - create_multiple_instances.instance_ids | length == 5
+ - '"ec2:RunInstances" not in create_multiple_instances.resource_actions'
+
+ - name: "Enforce instance count to 3 - Terminate 2 instances (check_mode)"
+ ec2_instance:
+ instance_type: "{{ ec2_instance_type }}"
+ exact_count: 3
+ region: "{{ aws_region }}"
+ image_id: "{{ ec2_ami_id }}"
+ name: "{{ resource_prefix }}-test-enf_cnt"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ register: terminate_multiple_instances
+ check_mode: true
+
+ - assert:
+ that:
+ - terminate_multiple_instances is not failed
+ - terminate_multiple_instances is changed
+ - '"instance_ids" in terminate_multiple_instances'
+ - terminate_multiple_instances.instance_ids | length == 5
+ - '"terminated_ids" in terminate_multiple_instances'
+ - terminate_multiple_instances.terminated_ids | length == 2
+ - '"ec2:RunInstances" not in terminate_multiple_instances.resource_actions'
+
+ - name: "Enforce instance count to 3 - Terminate 2 instances"
+ ec2_instance:
+ instance_type: "{{ ec2_instance_type }}"
+ exact_count: 3
+ region: "{{ aws_region }}"
+ image_id: "{{ ec2_ami_id }}"
+ name: "{{ resource_prefix }}-test-enf_cnt"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: terminate_multiple_instances
+
+ - assert:
+ that:
+ - terminate_multiple_instances is not failed
+ - terminate_multiple_instances is changed
+ - '"instance_ids" in terminate_multiple_instances'
+ - terminate_multiple_instances.instance_ids | length == 5
+ - '"terminated_ids" in terminate_multiple_instances'
+ - terminate_multiple_instances.terminated_ids | length == 2
+
+ - name: "Enforce instance count to 3 - Terminate 2 instances (check_mode - Idempotency)"
+ ec2_instance:
+ instance_type: "{{ ec2_instance_type }}"
+ exact_count: 3
+ region: "{{ aws_region }}"
+ image_id: "{{ ec2_ami_id }}"
+ name: "{{ resource_prefix }}-test-enf_cnt"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ register: terminate_multiple_instances
+ check_mode: true
+
+ - assert:
+ that:
+ - terminate_multiple_instances is not failed
+ - terminate_multiple_instances is not changed
+ - '"instance_ids" in terminate_multiple_instances'
+ - terminate_multiple_instances.instance_ids | length == 3
+ - '"terminated_ids" not in terminate_multiple_instances'
+ - '"ec2:TerminateInstances" not in terminate_multiple_instances.resource_actions'
+
+ - name: "Enforce instance count to 3 - Terminate 2 instances (Idempotency)"
+ ec2_instance:
+ instance_type: "{{ ec2_instance_type }}"
+ exact_count: 3
+ region: "{{ aws_region }}"
+ image_id: "{{ ec2_ami_id }}"
+ name: "{{ resource_prefix }}-test-enf_cnt"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ register: terminate_multiple_instances
+
+ - assert:
+ that:
+ - terminate_multiple_instances is not failed
+ - terminate_multiple_instances is not changed
+ - '"instance_ids" in terminate_multiple_instances'
+ - terminate_multiple_instances.instance_ids | length == 3
+ - '"terminated_ids" not in terminate_multiple_instances'
+ - '"ec2:TerminateInstances" not in terminate_multiple_instances.resource_actions'
+
+ - name: "Enforce instance count to 6 - Launch 3 more instances (check_mode)"
+ ec2_instance:
+ instance_type: "{{ ec2_instance_type }}"
+ exact_count: 6
+ region: "{{ aws_region }}"
+ image_id: "{{ ec2_ami_id }}"
+ name: "{{ resource_prefix }}-test-enf_cnt"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ check_mode: true
+ register: create_multiple_instances
+
+ - assert:
+ that:
+ - create_multiple_instances is not failed
+ - create_multiple_instances is changed
+ - '"instance_ids" in create_multiple_instances'
+ - create_multiple_instances.instance_ids | length == 3
+ - '"changed_ids" not in create_multiple_instances'
+ - '"ec2:RunInstances" not in create_multiple_instances.resource_actions'
+
+ - name: "Enforce instance count to 6 - Launch 3 more instances"
+ ec2_instance:
+ instance_type: "{{ ec2_instance_type }}"
+ exact_count: 6
+ region: "{{ aws_region }}"
+ image_id: "{{ ec2_ami_id }}"
+ name: "{{ resource_prefix }}-test-enf_cnt"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_multiple_instances
+
+ - name: debug is here
+ debug: msg="{{ create_multiple_instances.instance_ids }}"
+
+ - assert:
+ that:
+ - create_multiple_instances is not failed
+ - create_multiple_instances is changed
+ - '"instance_ids" in create_multiple_instances'
+ - create_multiple_instances.instance_ids | length == 6
+ - '"changed_ids" in create_multiple_instances'
+ - create_multiple_instances.changed_ids | length == 3
+
+ - name: "Enforce instance count to 6 - Launch 3 more instances (check_mode - Idempotency)"
+ ec2_instance:
+ instance_type: "{{ ec2_instance_type }}"
+ exact_count: 6
+ region: "{{ aws_region }}"
+ image_id: "{{ ec2_ami_id }}"
+ name: "{{ resource_prefix }}-test-enf_cnt"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ check_mode: true
+ register: create_multiple_instances
+
+ - assert:
+ that:
+ - create_multiple_instances is not failed
+ - create_multiple_instances is not changed
+ - '"instance_ids" in create_multiple_instances'
+ - create_multiple_instances.instance_ids | length == 6
+ - '"changed_ids" not in create_multiple_instances'
+ - '"ec2:RunInstances" not in create_multiple_instances.resource_actions'
+
+ - name: "Enforce instance count to 6 - Launch 3 more instances (Idempotency)"
+ ec2_instance:
+ instance_type: "{{ ec2_instance_type }}"
+ exact_count: 6
+ region: "{{ aws_region }}"
+ image_id: "{{ ec2_ami_id }}"
+ name: "{{ resource_prefix }}-test-enf_cnt"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: true
+ register: create_multiple_instances
+
+ - assert:
+ that:
+ - create_multiple_instances is not failed
+ - create_multiple_instances is not changed
+ - '"instance_ids" in create_multiple_instances'
+ - create_multiple_instances.instance_ids | length == 6
+ - '"changed_ids" not in create_multiple_instances'
+ - '"ec2:RunInstances" not in create_multiple_instances.resource_actions'
+
+
+ - name: Gather information about any running instance with Name ending with "-test-enf_cnt"
+ ec2_instance_info:
+ region: "{{ ec2_region }}"
+ filters:
+ "tag:Name": "*-test-enf_cnt"
+ instance-state-name: [ "running"]
+ register: test_instances
+
+ - name: set fact
+ set_fact: test_instances_ids="{{ test_instances.instances[item].instance_id }}"
+ loop: "{{ range(0, test_instances.instances | length) | list }}"
+ register: test_instances_list
+
+ - name: Make a list of ids
+ set_fact: instances_to_terminate="{{ test_instances_list.results | map(attribute='ansible_facts.test_instances_ids') | list }}"
+
+# Terminate instances created in enforce count test
+
+ - name: "Terminate instance based on id (check_mode)"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ item }}"
+ wait: true
+ register: terminate_id
+ check_mode: true
+ with_items: "{{ instances_to_terminate }}"
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is changed
+
+ - name: "Terminate instance based on id"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ item }}"
+ wait: true
+ register: terminate_id
+ with_items: "{{ instances_to_terminate }}"
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is changed
+
+ - name: "Terminate instance based on id - Idempotency (check_mode)"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ item }}"
+ wait: true
+ register: terminate_id
+ check_mode: true
+ with_items: "{{ instances_to_terminate }}"
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is not changed
+
+ - name: "Terminate instance based on id - Idempotency"
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ item }}"
+ wait: true
+ register: terminate_id
+ with_items: "{{ instances_to_terminate }}"
+
+ - assert:
+ that:
+ - terminate_id is not failed
+ - terminate_id is not changed
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/aliases
new file mode 100644
index 000000000..ca83d373d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/aliases
@@ -0,0 +1,6 @@
+time=6m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/defaults/main.yml
new file mode 100644
index 000000000..154ca799c
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_instance_no_wait
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-no-wait'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/meta/main.yml
new file mode 100644
index 000000000..3014864e5
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/meta/main.yml
@@ -0,0 +1,6 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: no_wait
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/tasks/main.yml
new file mode 100644
index 000000000..f279e46c3
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/tasks/main.yml
@@ -0,0 +1,58 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: "New instance and don't wait for it to complete"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-no-wait"
+ image_id: "{{ ec2_ami_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: false
+ instance_type: "{{ ec2_instance_type }}"
+ register: in_test_vpc
+
+ - assert:
+ that:
+ - in_test_vpc is not failed
+ - in_test_vpc is changed
+ - in_test_vpc.instances is not defined
+ - in_test_vpc.instance_ids is defined
+ - in_test_vpc.instance_ids | length > 0
+
+ - name: "New instance and don't wait for it to complete ( check mode )"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-no-wait-checkmode"
+ image_id: "{{ ec2_ami_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: false
+ instance_type: "{{ ec2_instance_type }}"
+ check_mode: yes
+
+ - name: "Facts for ec2 test instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-no-wait"
+ register: real_instance_fact
+ until: real_instance_fact.instances | length > 0
+ retries: 10
+
+ - name: "Facts for checkmode ec2 test instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-no-wait-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ real_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/aliases
new file mode 100644
index 000000000..ca83d373d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/aliases
@@ -0,0 +1,6 @@
+time=6m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/defaults/main.yml
new file mode 100644
index 000000000..07d18b5a8
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_instance_metadata_options
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-metadata'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/meta/main.yml
new file mode 100644
index 000000000..78ebf425e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/meta/main.yml
@@ -0,0 +1,9 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_botocore_pip
+ vars:
+ botocore_version: 1.23.30
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: metadata
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/tasks/main.yml
new file mode 100644
index 000000000..57d588151
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/tasks/main.yml
@@ -0,0 +1,98 @@
+- name: test with boto3 version that does not support instance_metadata_tags
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: "fail create t3.nano instance with metadata_options"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-t3nano-enabled-required"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ instance_type: t3.nano
+ metadata_options:
+ http_endpoint: enabled
+ http_tokens: required
+ instance_metadata_tags: enabled
+ wait: false
+ ignore_errors: yes
+ register: instance_creation
+
+ - name: verify fail instance with metadata_options because insufficient boto3 requirements
+ assert:
+ that:
+ - instance_creation is failed
+ - instance_creation is not changed
+ - "'This is required to set instance_metadata_tags' in instance_creation.msg"
+
+- name: test with boto3 version that supports instance_metadata_tags
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: "create t3.nano instance with metadata_options"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-t3nano-enabled-required"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ instance_type: t3.nano
+ metadata_options:
+ http_endpoint: enabled
+ http_tokens: required
+ instance_metadata_tags: enabled
+ wait: false
+ register: instance_creation
+
+ - name: "instance with metadata_options created with the right options"
+ assert:
+ that:
+ - instance_creation is success
+ - instance_creation is changed
+ - "'{{ instance_creation.spec.MetadataOptions.HttpEndpoint }}' == 'enabled'"
+ - "'{{ instance_creation.spec.MetadataOptions.HttpTokens }}' == 'required'"
+ - "'{{ instance_creation.spec.MetadataOptions.InstanceMetadataTags }}' == 'enabled'"
+
+ - name: "modify metadata_options on existing instance"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-t3nano-enabled-required"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ instance_type: t3.nano
+ metadata_options:
+ http_endpoint: enabled
+ http_tokens: optional
+ wait: false
+ register: metadata_options_update
+ ignore_errors: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-t3nano-enabled-required"
+ register: presented_instance_fact
+
+ - name: "modify metadata_options has no effect on existing instance"
+ assert:
+ that:
+ - metadata_options_update is success
+ - metadata_options_update is not changed
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "'{{ presented_instance_fact.instances.0.state.name }}' in ['running','pending']"
+ - "'{{ presented_instance_fact.instances.0.metadata_options.http_endpoint }}' == 'enabled'"
+ - "'{{ presented_instance_fact.instances.0.metadata_options.http_tokens }}' == 'required'"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/aliases
new file mode 100644
index 000000000..ca83d373d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/aliases
@@ -0,0 +1,6 @@
+time=6m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/defaults/main.yml
new file mode 100644
index 000000000..3645fcabd
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_instance_security_group
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-sg'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/meta/main.yml
new file mode 100644
index 000000000..2c8aa2e43
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/meta/main.yml
@@ -0,0 +1,6 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: security_groups
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/tasks/main.yml
new file mode 100644
index 000000000..47b1c963e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/tasks/main.yml
@@ -0,0 +1,87 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: "New instance with 2 security groups"
+ ec2_instance:
+ name: "{{ resource_prefix }}-test-security-groups"
+ image_id: "{{ ec2_ami_id }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ instance_type: t2.micro
+ wait: false
+ security_groups:
+ - "{{ sg.group_id }}"
+ - "{{ sg2.group_id }}"
+ register: security_groups_test
+
+ - name: "Recreate same instance with 2 security groups ( Idempotency )"
+ ec2_instance:
+ name: "{{ resource_prefix }}-test-security-groups"
+ image_id: "{{ ec2_ami_id }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ instance_type: t2.micro
+ wait: false
+ security_groups:
+ - "{{ sg.group_id }}"
+ - "{{ sg2.group_id }}"
+ register: security_groups_test_idempotency
+
+ - name: "Gather ec2 facts to check SGs have been added"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-security-groups"
+ "instance-state-name": "running"
+ register: dual_sg_instance_facts
+ until: dual_sg_instance_facts.instances | length > 0
+ retries: 10
+
+ - name: "Remove secondary security group from instance"
+ ec2_instance:
+ name: "{{ resource_prefix }}-test-security-groups"
+ image_id: "{{ ec2_ami_id }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ instance_type: t2.micro
+ security_groups:
+ - "{{ sg.group_id }}"
+ register: remove_secondary_security_group
+
+ - name: "Gather ec2 facts to check seconday SG has been removed"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-security-groups"
+ "instance-state-name": "running"
+ register: single_sg_instance_facts
+ until: single_sg_instance_facts.instances | length > 0
+ retries: 10
+
+ - name: "Add secondary security group to instance"
+ ec2_instance:
+ name: "{{ resource_prefix }}-test-security-groups"
+ image_id: "{{ ec2_ami_id }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ instance_type: t2.micro
+ security_groups:
+ - "{{ sg.group_id }}"
+ - "{{ sg2.group_id }}"
+ register: add_secondary_security_group
+
+ - assert:
+ that:
+ - security_groups_test is not failed
+ - security_groups_test is changed
+ - security_groups_test_idempotency is not changed
+ - remove_secondary_security_group is changed
+ - single_sg_instance_facts.instances.0.security_groups | length == 1
+ - dual_sg_instance_facts.instances.0.security_groups | length == 2
+ - add_secondary_security_group is changed
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/aliases
new file mode 100644
index 000000000..ca83d373d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/aliases
@@ -0,0 +1,6 @@
+time=6m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/defaults/main.yml
new file mode 100644
index 000000000..269677f92
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_instance_state_config_updates
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-state-config-updates'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/meta/main.yml
new file mode 100644
index 000000000..c9fdd98d9
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/meta/main.yml
@@ -0,0 +1,6 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: state_config_updates
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/tasks/main.yml
new file mode 100644
index 000000000..0d5d5a5c2
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/tasks/main.yml
@@ -0,0 +1,133 @@
+# Test that configuration changes, like security groups and instance attributes,
+# are updated correctly when the instance has different states, and also when
+# changing the state of an instance.
+# https://github.com/ansible-collections/community.aws/issues/16
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: "Make instance with sg and termination protection enabled"
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-test-state-param-changes"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: False
+ instance_type: "{{ ec2_instance_type }}"
+ wait: True
+ register: create_result
+
+ - assert:
+ that:
+ - create_result is not failed
+ - create_result.changed
+ - '"instances" in create_result'
+ - '"instance_ids" in create_result'
+ - '"spec" in create_result'
+ - create_result.instances[0].security_groups[0].group_id == "{{ sg.group_id }}"
+ - create_result.spec.DisableApiTermination == False
+
+ - name: "Change sg and termination protection while instance is in state running"
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-test-state-param-changes"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_groups: "{{ sg2.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: True
+ instance_type: "{{ ec2_instance_type }}"
+ register: change_params_result
+
+ - assert:
+ that:
+ - change_params_result is not failed
+ - change_params_result.changed
+ - '"instances" in change_params_result'
+ - '"instance_ids" in change_params_result'
+ - '"changes" in change_params_result'
+ - change_params_result.instances[0].security_groups[0].group_id == "{{ sg2.group_id }}"
+ - change_params_result.changes[0].DisableApiTermination.Value == True
+ - change_params_result.changes[1].Groups[0] == "{{ sg2.group_id }}" # TODO fix this to be less fragile
+
+
+ - name: "Change instance state from running to stopped, and change sg and termination protection"
+ ec2_instance:
+ state: stopped
+ name: "{{ resource_prefix }}-test-state-param-changes"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: False
+ instance_type: "{{ ec2_instance_type }}"
+ register: change_state_params_result
+
+ - assert:
+ that:
+ - change_state_params_result is not failed
+ - change_state_params_result.changed
+ - '"instances" in change_state_params_result'
+ - '"instance_ids" in change_state_params_result'
+ - '"changes" in change_state_params_result'
+ - '"stop_success" in change_state_params_result'
+ - '"stop_failed" in change_state_params_result'
+ - change_state_params_result.instances[0].security_groups[0].group_id == "{{ sg.group_id }}"
+ - change_state_params_result.changes[0].DisableApiTermination.Value == False
+
+ - name: "Change sg and termination protection while instance is in state stopped"
+ ec2_instance:
+ state: stopped
+ name: "{{ resource_prefix }}-test-state-param-changes"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_groups: "{{ sg2.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: True
+ instance_type: "{{ ec2_instance_type }}"
+ register: change_params_stopped_result
+
+ - assert:
+ that:
+ - change_params_stopped_result is not failed
+ - change_params_stopped_result.changed
+ - '"instances" in change_params_stopped_result'
+ - '"instance_ids" in change_params_stopped_result'
+ - '"changes" in change_params_stopped_result'
+ - change_params_stopped_result.instances[0].security_groups[0].group_id == "{{ sg2.group_id }}"
+ - change_params_stopped_result.changes[0].DisableApiTermination.Value == True
+
+ - name: "Change instance state from stopped to running, and change sg and termination protection"
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-test-state-param-changes"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: False
+ instance_type: "{{ ec2_instance_type }}"
+ wait: True
+ register: change_params_start_result
+
+ - assert:
+ that:
+ - change_params_start_result is not failed
+ - change_params_start_result.changed
+ - '"instances" in change_params_start_result'
+ - '"instance_ids" in change_params_start_result'
+ - '"changes" in change_params_start_result'
+ - '"start_success" in change_params_start_result'
+ - '"start_failed" in change_params_start_result'
+ - change_params_start_result.instances[0].security_groups[0].group_id == "{{ sg.group_id }}"
+ - change_params_start_result.changes[0].DisableApiTermination.Value == False
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/aliases
new file mode 100644
index 000000000..ca83d373d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/aliases
@@ -0,0 +1,6 @@
+time=6m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults/main.yml
new file mode 100644
index 000000000..0c09a7aab
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_instance_tags_and_vpc_settings
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-tags-vpc'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta/main.yml
new file mode 100644
index 000000000..3a3510065
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta/main.yml
@@ -0,0 +1,6 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: tags_and_vpc
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks/main.yml
new file mode 100644
index 000000000..71551ef29
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks/main.yml
@@ -0,0 +1,179 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: "Make instance in the testing subnet created in the test VPC"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_id }}"
+ user_data: |
+ #cloud-config
+ package_upgrade: true
+ package_update: true
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ Something: else
+ security_groups: "{{ sg.group_id }}"
+ network:
+ source_dest_check: false
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: false
+ register: in_test_vpc
+
+ - name: "Make instance in the testing subnet created in the test VPC(check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create-checkmode"
+ image_id: "{{ ec2_ami_id }}"
+ user_data: |
+ #cloud-config
+ package_upgrade: true
+ package_update: true
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ Something: else
+ security_groups: "{{ sg.group_id }}"
+ network:
+ source_dest_check: false
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ check_mode: yes
+
+ - name: "Try to re-make the instance, hopefully this shows changed=False"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_id }}"
+ user_data: |
+ #cloud-config
+ package_upgrade: true
+ package_update: true
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ Something: else
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ register: remake_in_test_vpc
+ - name: "Remaking the same instance resulted in no changes"
+ assert:
+ that: not remake_in_test_vpc.changed
+ - name: "check that instance IDs match anyway"
+ assert:
+ that: 'remake_in_test_vpc.instance_ids[0] == in_test_vpc.instance_ids[0]'
+ - name: "check that source_dest_check was set to false"
+ assert:
+ that: 'not remake_in_test_vpc.instances[0].source_dest_check'
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-basic-vpc-create"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-basic-vpc-create-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Alter it by adding tags"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ Another: thing
+ purge_tags: false
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ register: add_another_tag
+
+ - ec2_instance_info:
+ instance_ids: "{{ add_another_tag.instance_ids }}"
+ register: check_tags
+ - name: "Remaking the same instance resulted in no changes"
+ assert:
+ that:
+ - check_tags.instances[0].tags.Another == 'thing'
+ - check_tags.instances[0].tags.Something == 'else'
+
+ - name: "Purge a tag"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ Another: thing
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+
+ - ec2_instance_info:
+ instance_ids: "{{ add_another_tag.instance_ids }}"
+ register: check_tags
+
+ - name: "Remaking the same instance resulted in no changes"
+ assert:
+ that:
+ - "'Something' not in check_tags.instances[0].tags"
+
+ - name: "check that subnet-default public IP rule was followed"
+ assert:
+ that:
+ - check_tags.instances[0].public_dns_name == ""
+ - check_tags.instances[0].private_ip_address.startswith(subnet_b_startswith)
+ - check_tags.instances[0].subnet_id == testing_subnet_b.subnet.id
+ - name: "check that tags were applied"
+ assert:
+ that:
+ - check_tags.instances[0].tags.Name.startswith(resource_prefix)
+ - "'{{ check_tags.instances[0].state.name }}' in ['pending', 'running']"
+
+ - name: "Try setting purge_tags to True without specifiying tags (should NOT purge tags)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_id }}"
+ purge_tags: true
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ register: _purge_tags_without_tags
+
+ - name: Assert tags were not purged
+ assert:
+ that:
+ - _purge_tags_without_tags.instances[0].tags | length > 1
+
+ - name: "Purge all tags (aside from Name)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_id }}"
+ purge_tags: true
+ tags: {}
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ register: _purge_tags
+
+ - name: Assert tags were purged
+ assert:
+ that:
+ - _purge_tags.instances[0].tags | length == 1
+ - _purge_tags.instances[0].tags.Name.startswith(resource_prefix)
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/aliases
new file mode 100644
index 000000000..ca83d373d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/aliases
@@ -0,0 +1,6 @@
+time=6m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/defaults/main.yml
new file mode 100644
index 000000000..a5cac7423
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_instance_termination_protection
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-temination'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/meta/main.yml
new file mode 100644
index 000000000..b75f3dd58
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/meta/main.yml
@@ -0,0 +1,6 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: terminaion_protection
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/tasks/main.yml
new file mode 100644
index 000000000..4c888592b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/tasks/main.yml
@@ -0,0 +1,250 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: Create instance with termination protection (check mode)
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: true
+ instance_type: "{{ ec2_instance_type }}"
+ state: running
+ wait: yes
+ check_mode: yes
+ register: create_instance_check_mode_results
+
+ - name: Check the returned value for the earlier task
+ assert:
+ that:
+ - create_instance_check_mode_results is changed
+ - create_instance_check_mode_results.spec.DisableApiTermination == True
+
+ - name: Create instance with termination protection
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: true
+ instance_type: "{{ ec2_instance_type }}"
+ state: running
+ wait: yes
+ register: create_instance_results
+
+ - set_fact:
+ instance_id: '{{ create_instance_results.instances[0].instance_id }}'
+
+ - name: Check return values of the create instance task
+ assert:
+ that:
+ - "{{ create_instance_results.instances | length }} > 0"
+ - "'{{ create_instance_results.instances.0.state.name }}' == 'running'"
+ - "'{{ create_instance_results.spec.DisableApiTermination }}'"
+
+ - name: Get info on termination protection
+ command: 'aws ec2 describe-instance-attribute --attribute disableApiTermination --instance-id {{ instance_id }}'
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
+ AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
+ AWS_DEFAULT_REGION: "{{ aws_region }}"
+ register: instance_termination_check
+
+ - name: convert it to an object
+ set_fact:
+ instance_termination_status: "{{ instance_termination_check.stdout | from_json }}"
+
+ - name: Assert termination protection status did not change in check_mode
+ assert:
+ that:
+ - instance_termination_status.DisableApiTermination.Value == true
+
+ - name: Create instance with termination protection (check mode) (idempotent)
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: true
+ instance_type: "{{ ec2_instance_type }}"
+ state: running
+ wait: yes
+ check_mode: yes
+ register: create_instance_check_mode_results
+
+ - name: Check the returned value for the earlier task
+ assert:
+ that:
+ - create_instance_check_mode_results is not changed
+
+ - name: Create instance with termination protection (idempotent)
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: true
+ instance_type: "{{ ec2_instance_type }}"
+ state: running
+ wait: yes
+ register: create_instance_results
+
+ - name: Check return values of the create instance task
+ assert:
+ that:
+ - "{{ not create_instance_results.changed }}"
+ - "{{ create_instance_results.instances | length }} > 0"
+
+ - name: Try to terminate the instance (expected to fail)
+ ec2_instance:
+ filters:
+ tag:Name: "{{ resource_prefix }}-termination-protection"
+ state: absent
+ failed_when: "'Unable to terminate instances' not in terminate_instance_results.msg"
+ register: terminate_instance_results
+
+ - name: Set termination protection to false (check_mode)
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ termination_protection: false
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ check_mode: True
+ register: set_termination_protectioncheck_mode_results
+
+ - name: Check return value
+ assert:
+ that:
+ - "{{ set_termination_protectioncheck_mode_results.changed }}"
+
+ - name: Get info on termination protection
+ command: 'aws ec2 describe-instance-attribute --attribute disableApiTermination --instance-id {{ instance_id }}'
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
+ AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
+ AWS_DEFAULT_REGION: "{{ aws_region }}"
+ register: instance_termination_check
+
+ - name: convert it to an object
+ set_fact:
+ instance_termination_status: "{{ instance_termination_check.stdout | from_json }}"
+
+ - assert:
+ that:
+ - instance_termination_status.DisableApiTermination.Value == true
+
+ - name: Set termination protection to false
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ termination_protection: false
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ register: set_termination_protection_results
+
+ - name: Check return value
+ assert:
+ that:
+ - set_termination_protection_results.changed
+
+ - name: Get info on termination protection
+ command: 'aws ec2 describe-instance-attribute --attribute disableApiTermination --instance-id {{ instance_id }}'
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
+ AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
+ AWS_DEFAULT_REGION: "{{ aws_region }}"
+ register: instance_termination_check
+
+ - name: convert it to an object
+ set_fact:
+ instance_termination_status: "{{ instance_termination_check.stdout | from_json }}"
+
+ - assert:
+ that:
+ - instance_termination_status.DisableApiTermination.Value == false
+
+ - name: Set termination protection to false (idempotent)
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ termination_protection: false
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ register: set_termination_protection_results
+
+ - name: Check return value
+ assert:
+ that:
+ - "{{ not set_termination_protection_results.changed }}"
+
+ - name: Set termination protection to true
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ termination_protection: true
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ register: set_termination_protection_results
+
+ - name: Check return value
+ assert:
+ that:
+ - "{{ set_termination_protection_results.changed }}"
+ - "{{ set_termination_protection_results.changes[0].DisableApiTermination.Value }}"
+
+ - name: Set termination protection to true (idempotent)
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ termination_protection: true
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ register: set_termination_protection_results
+
+ - name: Check return value
+ assert:
+ that:
+ - "{{ not set_termination_protection_results.changed }}"
+
+ - name: Set termination protection to false (so we can terminate instance)
+ ec2_instance:
+ name: "{{ resource_prefix }}-termination-protection"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ resource_prefix }}"
+ termination_protection: false
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ register: set_termination_protection_results
+
+ - name: Terminate the instance
+ ec2_instance:
+ filters:
+ tag:TestId: "{{ resource_prefix }}"
+ state: absent
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/aliases
new file mode 100644
index 000000000..ca83d373d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/aliases
@@ -0,0 +1,6 @@
+time=6m
+
+cloud/aws
+
+ec2_instance_info
+ec2_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/defaults/main.yml
new file mode 100644
index 000000000..a51f9bf2c
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# defaults file for ec2_instance_uptime
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-uptime'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/meta/main.yml
new file mode 100644
index 000000000..6651aa834
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/meta/main.yml
@@ -0,0 +1,6 @@
+# this just makes sure they're in the right place
+dependencies:
+- role: setup_ec2_facts
+- role: setup_ec2_instance_env
+ vars:
+ ec2_instance_test_name: uptime
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/tasks/main.yml
new file mode 100644
index 000000000..6f7cf38dd
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/tasks/main.yml
@@ -0,0 +1,63 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: "create t3.nano instance"
+ ec2_instance:
+ name: "{{ resource_prefix }}-test-uptime"
+ region: "{{ ec2_region }}"
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ instance_type: t3.nano
+ wait: yes
+
+ - name: "check ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-uptime"
+ instance-state-name: [ "running"]
+ register: instance_facts
+
+ - name: "Confirm existence of instance id."
+ assert:
+ that:
+ - "{{ instance_facts.instances | length }} == 1"
+
+ - name: "check using uptime 100 hours - should find nothing"
+ ec2_instance_info:
+ region: "{{ ec2_region }}"
+ uptime: 6000
+ filters:
+ instance-state-name: [ "running"]
+ "tag:Name": "{{ resource_prefix }}-test-uptime"
+ register: instance_facts
+
+ - name: "Confirm there is no running instance"
+ assert:
+ that:
+ - "{{ instance_facts.instances | length }} == 0"
+
+ - name: Sleep for 61 seconds and continue with play
+ wait_for:
+ timeout: 61
+ delegate_to: localhost
+
+ - name: "check using uptime 1 minute"
+ ec2_instance_info:
+ region: "{{ ec2_region }}"
+ uptime: 1
+ filters:
+ instance-state-name: [ "running"]
+ "tag:Name": "{{ resource_prefix }}-test-uptime"
+ register: instance_facts
+
+ - name: "Confirm there is one running instance"
+ assert:
+ that:
+ - "{{ instance_facts.instances | length }} == 1"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/aliases
new file mode 100644
index 000000000..e1a28da55
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/aliases
@@ -0,0 +1,5 @@
+# reason: missing-dependency
+# We need either the openssl binary, pycrpto, or a compiler on the Py36 and Py38
+# Zuul nodes
+# https://github.com/ansible-collections/amazon.aws/issues/428
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/defaults/main.yml
new file mode 100644
index 000000000..df0082d99
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# defaults file for test_ec2_key
+ec2_key_name: '{{resource_prefix}}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/meta/main.yml
new file mode 100644
index 000000000..d9abc1110
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/meta/main.yml
@@ -0,0 +1,5 @@
+dependencies:
+ - setup_sshkey
+ - role: setup_botocore_pip
+ vars:
+ botocore_version: '1.21.23'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/tasks/main.yml
new file mode 100644
index 000000000..8aa461039
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/tasks/main.yml
@@ -0,0 +1,461 @@
+---
+# TODO - name: test 'validate_certs' parameter
+# =============================================================
+
+- module_defaults:
+ group/aws:
+ region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ block:
+
+ # ============================================================
+ - name: test with no parameters
+ ec2_key:
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "missing required arguments: name"'
+
+ # ============================================================
+ - name: test removing a non-existent key pair (check mode)
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: absent
+ register: result
+ check_mode: true
+
+ - name: assert removing a non-existent key pair
+ assert:
+ that:
+ - 'not result.changed'
+
+ - name: test removing a non-existent key pair
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: absent
+ register: result
+
+ - name: assert removing a non-existent key pair
+ assert:
+ that:
+ - 'not result.changed'
+
+ # ============================================================
+ - name: test creating a new key pair (check_mode)
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: present
+ tags:
+ snake_case: 'a_snake_case_value'
+ CamelCase: 'CamelCaseValue'
+ "spaced key": 'Spaced value'
+ register: result
+ check_mode: true
+
+ - name: assert creating a new key pair
+ assert:
+ that:
+ - result is changed
+
+ - name: test creating a new key pair
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: present
+ tags:
+ snake_case: 'a_snake_case_value'
+ CamelCase: 'CamelCaseValue'
+ "spaced key": 'Spaced value'
+ register: result
+
+ - name: assert creating a new key pair
+ assert:
+ that:
+ - result is changed
+ - '"key" in result'
+ - '"name" in result.key'
+ - '"fingerprint" in result.key'
+ - '"private_key" in result.key'
+ - '"id" in result.key'
+ - '"tags" in result.key'
+ - result.key.name == ec2_key_name
+ - result.key.id.startswith('key-')
+ - '"snake_case" in result.key.tags'
+ - result.key.tags['snake_case'] == 'a_snake_case_value'
+ - '"CamelCase" in result.key.tags'
+ - result.key.tags['CamelCase'] == 'CamelCaseValue'
+ - '"spaced key" in result.key.tags'
+ - result.key.tags['spaced key'] == 'Spaced value'
+
+ - set_fact:
+ key_id_1: '{{ result.key.id }}'
+
+ - name: 'test re-"creating" the same key (check_mode)'
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: present
+ tags:
+ snake_case: 'a_snake_case_value'
+ CamelCase: 'CamelCaseValue'
+ "spaced key": 'Spaced value'
+ register: result
+ check_mode: true
+
+ - name: assert re-creating the same key
+ assert:
+ that:
+ - result is not changed
+
+ - name: 'test re-"creating" the same key'
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: present
+ tags:
+ snake_case: 'a_snake_case_value'
+ CamelCase: 'CamelCaseValue'
+ "spaced key": 'Spaced value'
+ register: result
+
+ # ============================================================
+ - name: test updating tags without purge (check mode)
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: present
+ tags:
+ newKey: 'Another value'
+ purge_tags: false
+ register: result
+ check_mode: true
+
+ - name: assert updated tags
+ assert:
+ that:
+ - result is changed
+
+ - name: test updating tags without purge
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: present
+ tags:
+ newKey: 'Another value'
+ purge_tags: false
+ register: result
+
+ - name: assert updated tags
+ assert:
+ that:
+ - result is changed
+ - '"key" in result'
+ - '"name" in result.key'
+ - '"fingerprint" in result.key'
+ - '"private_key" not in result.key'
+ - '"id" in result.key'
+ - result.key.id == key_id_1
+ - '"tags" in result.key'
+ - result.key.name == ec2_key_name
+ - '"snake_case" in result.key.tags'
+ - result.key.tags['snake_case'] == 'a_snake_case_value'
+ - '"CamelCase" in result.key.tags'
+ - result.key.tags['CamelCase'] == 'CamelCaseValue'
+ - '"spaced key" in result.key.tags'
+ - result.key.tags['spaced key'] == 'Spaced value'
+ - '"newKey" in result.key.tags'
+ - result.key.tags['newKey'] == 'Another value'
+
+ - name: test updating tags without purge - idempotency (check mode)
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: present
+ tags:
+ newKey: 'Another value'
+ purge_tags: false
+ register: result
+ check_mode: true
+
+ - name: assert updated tags
+ assert:
+ that:
+ - result is not changed
+
+ - name: test updating tags without purge - idempotency
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: present
+ tags:
+ newKey: 'Another value'
+ purge_tags: false
+ register: result
+
+ - name: assert updated tags
+ assert:
+ that:
+ - result is not changed
+ - '"key" in result'
+ - '"name" in result.key'
+ - '"fingerprint" in result.key'
+ - '"private_key" not in result.key'
+ - '"id" in result.key'
+ - '"tags" in result.key'
+ - result.key.name == ec2_key_name
+ - result.key.id == key_id_1
+ - '"snake_case" in result.key.tags'
+ - result.key.tags['snake_case'] == 'a_snake_case_value'
+ - '"CamelCase" in result.key.tags'
+ - result.key.tags['CamelCase'] == 'CamelCaseValue'
+ - '"spaced key" in result.key.tags'
+ - result.key.tags['spaced key'] == 'Spaced value'
+ - '"newKey" in result.key.tags'
+ - result.key.tags['newKey'] == 'Another value'
+
+ # ============================================================
+ - name: test updating tags with purge (check mode)
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: present
+ tags:
+ newKey: 'Another value'
+ purge_tags: true
+ register: result
+ check_mode: true
+
+ - name: assert updated tags
+ assert:
+ that:
+ - result is changed
+
+ - name: test updating tags with purge
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: present
+ tags:
+ newKey: 'Another value'
+ purge_tags: true
+ register: result
+
+ - name: assert updated tags
+ assert:
+ that:
+ - result is changed
+ - '"key" in result'
+ - '"name" in result.key'
+ - '"fingerprint" in result.key'
+ - '"private_key" not in result.key'
+ - '"id" in result.key'
+ - result.key.id == key_id_1
+ - '"tags" in result.key'
+ - result.key.name == ec2_key_name
+ - '"snake_case" not in result.key.tags'
+ - '"CamelCase" not in result.key.tags'
+ - '"spaced key" not in result.key.tags'
+ - '"newKey" in result.key.tags'
+ - result.key.tags['newKey'] == 'Another value'
+
+ - name: test updating tags with purge - idempotency (check mode)
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: present
+ tags:
+ newKey: 'Another value'
+ purge_tags: true
+ register: result
+ check_mode: true
+
+ - name: assert updated tags
+ assert:
+ that:
+ - result is not changed
+
+ - name: test updating tags with purge - idempotency
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: present
+ tags:
+ newKey: 'Another value'
+ purge_tags: true
+ register: result
+
+ - name: assert updated tags
+ assert:
+ that:
+ - result is not changed
+ - '"key" in result'
+ - '"name" in result.key'
+ - '"fingerprint" in result.key'
+ - '"private_key" not in result.key'
+ - '"id" in result.key'
+ - '"tags" in result.key'
+ - result.key.name == ec2_key_name
+ - result.key.id == key_id_1
+ - '"snake_case" not in result.key.tags'
+ - '"CamelCase" not in result.key.tags'
+ - '"spaced key" not in result.key.tags'
+ - '"newKey" in result.key.tags'
+ - result.key.tags['newKey'] == 'Another value'
+
+ # ============================================================
+ - name: test removing an existent key (check mode)
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: absent
+ register: result
+ check_mode: true
+
+ - name: assert removing an existent key
+ assert:
+ that:
+ - result is changed
+
+ - name: test removing an existent key
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: absent
+ register: result
+
+ - name: assert removing an existent key
+ assert:
+ that:
+ - result is changed
+ - '"key" in result'
+ - result.key == None
+
+ # ============================================================
+ - name: test state=present with key_material
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ key_material: '{{ key_material }}'
+ state: present
+ register: result
+
+ - name: assert state=present with key_material
+ assert:
+ that:
+ - 'result.changed == True'
+ - '"key" in result'
+ - '"name" in result.key'
+ - '"fingerprint" in result.key'
+ - '"private_key" not in result.key'
+ - '"id" in result.key'
+ - '"tags" in result.key'
+ - 'result.key.name == "{{ec2_key_name}}"'
+ - 'result.key.fingerprint == "{{fingerprint}}"'
+
+ # ============================================================
+ - name: test state=present with key_material (idempotency)
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ key_material: '{{ key_material }}'
+ state: present
+ register: result
+
+ - name: assert state=present with key_material
+ assert:
+ that:
+ - result is not changed
+ - '"key" in result'
+ - '"name" in result.key'
+ - '"fingerprint" in result.key'
+ - '"private_key" not in result.key'
+ - '"id" in result.key'
+ - '"tags" in result.key'
+ - 'result.key.name == "{{ec2_key_name}}"'
+ - 'result.key.fingerprint == "{{fingerprint}}"'
+ - 'result.msg == "key pair already exists"'
+
+ # ============================================================
+
+ - name: test force=no with another_key_material (expect changed=false)
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ key_material: '{{ another_key_material }}'
+ force: no
+ register: result
+
+ - name: assert force=no with another_key_material (expect changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.key.fingerprint == "{{ fingerprint }}"'
+
+ # ============================================================
+
+ - name: test updating a key pair using another_key_material (expect changed=True)
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ key_material: '{{ another_key_material }}'
+ register: result
+
+ - name: assert updating a key pair using another_key_material (expect changed=True)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.key.fingerprint != "{{ fingerprint }}"'
+
+ # ============================================================
+ - name: test state=absent (expect changed=true)
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: absent
+ register: result
+
+ - name: assert state=absent with key_material (expect changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - '"key" in result'
+ - 'result.key == None'
+
+ # ============================================================
+ - name: test create ED25519 key pair type with botocore <= 1.21.23
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ key_type: ed25519
+ ignore_errors: true
+ register: result
+
+ - name: assert that task failed
+ assert:
+ that:
+ - 'result.failed'
+ - '"Failed to import the required Python library (botocore>=1.21.23)" in result.msg'
+ - '"This is required to set the key_type for a keypair" in result.msg'
+
+ - name: test create ED25519 key pair type
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ key_type: ed25519
+ register: result
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - name: assert that task succeed
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.key.type == "ed25519"'
+
+ - name: Update key pair type from ED25519 to RSA
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ key_type: rsa
+ register: result
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - name: assert that task succeed
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.key.type == "rsa"'
+
+ always:
+
+ # ============================================================
+ - name: Always delete the key we might create
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: absent
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/aliases
new file mode 100644
index 000000000..65a419c87
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/aliases
@@ -0,0 +1,5 @@
+# very dependent on how quickly the instance comes up, varies between 5m and 10m
+time=10m
+
+non_local
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/meta/main.yml
new file mode 100644
index 000000000..445013b49
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/meta/main.yml
@@ -0,0 +1,7 @@
+dependencies:
+ - setup_ec2_facts
+ - setup_sshkey
+ #required for run_instances with MetadataOptions.InstanceMetadataTags
+ - role: setup_botocore_pip
+ vars:
+ botocore_version: '1.23.30'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml
new file mode 100644
index 000000000..11c623a33
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml
@@ -0,0 +1,182 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+
+ hosts: localhost
+
+ collections:
+ - amzon.aws
+ - community.aws
+
+ vars:
+ vpc_name: '{{ resource_prefix }}-vpc'
+ vpc_seed: '{{ resource_prefix }}'
+ vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
+ subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
+
+ tasks:
+ - set_fact:
+ # As lookup plugins don't have access to module_defaults
+ connection_args:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ aws_security_token: "{{ security_token | default(omit) }}"
+
+ - include_role:
+ name: '../setup_sshkey'
+ - include_role:
+ name: '../setup_ec2_facts'
+
+ - include_role:
+ name: '../setup_botocore_pip'
+ vars:
+ botocore_version: '1.23.30'
+
+ - set_fact:
+ availability_zone: '{{ ec2_availability_zone_names[0] }}'
+
+ # ============================================================
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ register: vpc_result
+
+ - set_fact:
+ vpc_id: "{{ vpc_result.vpc.id }}"
+
+ - name: create an internet gateway
+ ec2_vpc_igw:
+ vpc_id: "{{ vpc_id }}"
+ state: present
+ tags:
+ "Name": "{{ resource_prefix }}"
+ register: igw_result
+
+ - name: create a subnet
+ ec2_vpc_subnet:
+ cidr: "{{ vpc_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_id }}"
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ state: present
+ register: vpc_subnet_result
+
+ - name: create a public route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc_id }}"
+ tags:
+ "Name": "{{ resource_prefix }}"
+ subnets:
+ - "{{ vpc_subnet_result.subnet.id }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw_result.gateway_id }}"
+ register: public_route_table
+
+ - name: create a security group
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: "Created by {{ resource_prefix }}"
+ rules:
+ - proto: tcp
+ ports: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: icmp
+ from_port: -1
+ to_port: -1
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ register: vpc_sg_result
+
+ - name: Create a key
+ ec2_key:
+ name: '{{ resource_prefix }}'
+ key_material: '{{ key_material }}'
+ state: present
+ register: ec2_key_result
+
+ - name: Set facts to simplify use of extra resources
+ set_fact:
+ vpc_subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ vpc_sg_id: "{{ vpc_sg_result.group_id }}"
+ vpc_igw_id: "{{ igw_result.gateway_id }}"
+ vpc_route_table_id: "{{ public_route_table.route_table.id }}"
+ ec2_key_name: "{{ ec2_key_result.key.name }}"
+
+ - name: Create an instance to test with
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-ec2-metadata-facts"
+ image_id: "{{ ec2_ami_id }}"
+ vpc_subnet_id: "{{ vpc_subnet_id }}"
+ security_group: "{{ vpc_sg_id }}"
+ instance_type: t2.micro
+ key_name: "{{ ec2_key_name }}"
+ network:
+ assign_public_ip: true
+ delete_on_termination: true
+ metadata_options:
+ instance_metadata_tags: enabled
+ tags:
+ snake_case_key: a_snake_case_value
+ camelCaseKey: aCamelCaseValue
+ register: ec2_instance
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - set_fact:
+ ec2_ami_id_py2: "{{ lookup('aws_ssm', '/aws/service/ami-amazon-linux-latest/amzn-ami-hvm-x86_64-gp2', **connection_args) }}"
+ ec2_ami_ssh_user_py2: "ec2-user"
+
+ - name: Create an instance to test with using Python 2
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-ec2-metadata-facts-py2"
+ image_id: "{{ ec2_ami_id_py2 }}"
+ vpc_subnet_id: "{{ vpc_subnet_id }}"
+ security_group: "{{ vpc_sg_id }}"
+ instance_type: t2.micro
+ key_name: "{{ ec2_key_name }}"
+ network:
+ assign_public_ip: true
+ delete_on_termination: true
+ metadata_options:
+ instance_metadata_tags: enabled
+ tags:
+ snake_case_key: a_snake_case_value
+ camelCaseKey: aCamelCaseValue
+ wait: True
+ register: ec2_instance_py2
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - set_fact:
+ ec2_instance_id: "{{ ec2_instance.instances[0].instance_id }}"
+ ec2_instance_id_py2: "{{ ec2_instance_py2.instances[0].instance_id }}"
+
+ - name: Create inventory file
+ template:
+ src: ../templates/inventory.j2
+ dest: ../inventory
+
+ - wait_for:
+ port: 22
+ host: '{{ ec2_instance.instances[0].public_ip_address }}'
+ timeout: 1200
+
+ - wait_for:
+ port: 22
+ host: '{{ ec2_instance_py2.instances[0].public_ip_address }}'
+ timeout: 1200
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml
new file mode 100644
index 000000000..11ddf88ef
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml
@@ -0,0 +1,84 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+
+ hosts: localhost
+
+ collections:
+ - amazon.aws
+ - community.aws
+
+ tasks:
+ # ============================================================
+
+ - name: terminate the instance
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ ec2_instance_id }}"
+ - "{{ ec2_instance_id_py2 }}"
+ wait: True
+ ignore_errors: true
+ retries: 5
+ register: remove
+ until: remove is successful
+
+ - name: remove ssh key
+ ec2_key:
+ name: "{{ ec2_key_name }}"
+ state: absent
+ ignore_errors: true
+
+ - name: remove the public route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc_id }}"
+ route_table_id: "{{ vpc_route_table_id }}"
+ lookup: id
+ state: absent
+ ignore_errors: true
+ retries: 5
+ register: remove
+ until: remove is successful
+
+ - name: remove the internet gateway
+ ec2_vpc_igw:
+ vpc_id: "{{ vpc_id }}"
+ state: absent
+ ignore_errors: true
+ retries: 5
+ register: remove
+ until: remove is successful
+
+ - name: remove the security group
+ ec2_group:
+ group_id: "{{ vpc_sg_id }}"
+ state: absent
+ ignore_errors: true
+ retries: 5
+ register: remove
+ until: remove is successful
+
+ - name: remove the subnet
+ ec2_vpc_subnet:
+ cidr: "{{ vpc_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_id }}"
+ state: absent
+ ignore_errors: true
+ retries: 5
+ register: remove
+ until: remove is successful
+
+ - name: remove the VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: "{{ vpc_cidr }}"
+ state: absent
+ ignore_errors: true
+ retries: 5
+ register: remove
+ until: remove is successful
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml
new file mode 100644
index 000000000..eba96f916
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml
@@ -0,0 +1,18 @@
+---
+- hosts: testhost
+ tasks:
+
+ - name: Wait for EC2 to be available
+ wait_for_connection:
+
+ - amazon.aws.ec2_metadata_facts:
+
+ - name: Assert initial metadata for the instance
+ assert:
+ that:
+ - ansible_ec2_ami_id == image_id
+ - ansible_ec2_placement_availability_zone == availability_zone
+ - ansible_ec2_security_groups == "{{ resource_prefix }}-sg"
+ - ansible_ec2_user_data == "None"
+ - ansible_ec2_instance_tags_keys is defined
+ - ansible_ec2_instance_tags_keys | length == 3
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/runme.sh
new file mode 100755
index 000000000..6f2bc4660
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/runme.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -eux
+export ANSIBLE_HOST_KEY_CHECKING=False
+export ANSIBLE_SSH_ARGS='-o UserKnownHostsFile=/dev/null'
+
+CMD_ARGS=("$@")
+
+# Destroy Environment
+cleanup() {
+ ansible-playbook playbooks/teardown.yml -i inventory -c local "${CMD_ARGS[@]}"
+}
+trap "cleanup" EXIT
+
+# create test resources and inventory
+ansible-playbook playbooks/setup.yml -c local "$@"
+
+# test ec2_instance_metadata
+ansible-playbook playbooks/test_metadata.yml -i inventory \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/templates/inventory.j2 b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/templates/inventory.j2
new file mode 100644
index 000000000..86ec99287
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/templates/inventory.j2
@@ -0,0 +1,34 @@
+[testhost_py3]
+"{{ ec2_instance.instances[0].public_ip_address }}"
+
+[testhost_py2]
+"{{ ec2_instance_py2.instances[0].public_ip_address }}"
+
+[testhost:children]
+testhost_py3
+testhost_py2
+
+[testhost:vars]
+ansible_ssh_private_key_file="{{ sshkey }}"
+ansible_python_interpreter=/usr/bin/env python
+
+[testhost_py3:vars]
+ansible_user="{{ ec2_ami_ssh_user }}"
+image_id="{{ ec2_ami_id }}"
+
+[testhost_py2:vars]
+ansible_user="{{ ec2_ami_ssh_user_py2 }}"
+image_id="{{ ec2_ami_id_py2 }}"
+
+[all:vars]
+# Template vars that will need to be used in used in tests and teardown
+vpc_id="{{ vpc_id }}"
+vpc_subnet_id="{{ vpc_subnet_id }}"
+vpc_sg_id="{{ vpc_sg_id }}"
+vpc_cidr="{{ vpc_cidr }}"
+vpc_igw="{{ vpc_igw_id }}"
+vpc_route_table_id="{{ vpc_route_table_id }}"
+ec2_key_name="{{ ec2_key_name }}"
+availability_zone="{{ availability_zone }}"
+ec2_instance_id="{{ ec2_instance_id }}"
+ec2_instance_id_py2="{{ ec2_instance_id_py2 }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/aliases
new file mode 100644
index 000000000..115fded97
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/aliases
@@ -0,0 +1,6 @@
+# duration: 15
+slow
+
+cloud/aws
+
+ec2_security_group_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/defaults/main.yml
new file mode 100644
index 000000000..f17a67a51
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# defaults file for test_ec2_group
+ec2_group_name: '{{resource_prefix}}'
+ec2_group_description: 'Created by ansible integration tests'
+
+vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16'
+subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/data_validation.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/data_validation.yml
new file mode 100644
index 000000000..c461287d9
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/data_validation.yml
@@ -0,0 +1,33 @@
+---
+- block:
+ - name: Create a group with only the default rule
+ ec2_group:
+ name: '{{ec2_group_name}}-input-tests'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+
+ - name: Run through some common weird port specs
+ ec2_group:
+ name: '{{ec2_group_name}}-input-tests'
+ description: '{{ec2_group_description}}'
+ rules:
+ - "{{ item }}"
+ with_items:
+ - proto: tcp
+ from_port: "8182"
+ to_port: 8182
+ cidr_ipv6: "fc00:ff9b::/96"
+ rule_desc: Mixed string and non-string ports
+ - proto: tcp
+ ports:
+ - "9000"
+ - 9001
+ - 9002-9005
+ cidr_ip: "10.2.3.0/24"
+ always:
+ - name: tidy up input testing group
+ ec2_group:
+ name: '{{ec2_group_name}}-input-tests'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: absent
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/diff_mode.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/diff_mode.yml
new file mode 100644
index 000000000..e687bad23
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/diff_mode.yml
@@ -0,0 +1,167 @@
+---
+ # ============================================================
+
+ - name: create a group with a rule (CHECK MODE + DIFF)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ rules_egress:
+ - proto: all
+ cidr_ip: 0.0.0.0/0
+ register: check_mode_result
+ check_mode: true
+ diff: true
+
+ - assert:
+ that:
+ - check_mode_result.changed
+
+ - name: create a group with a rule (DIFF)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ rules_egress:
+ - proto: all
+ cidr_ip: 0.0.0.0/0
+ register: result
+ diff: true
+
+ - assert:
+ that:
+ - result.changed
+ - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions
+ - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress
+
+ - name: add rules to make sorting occur (CHECK MODE + DIFF)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 172.16.0.0/12
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 10.0.0.0/8
+ rules_egress:
+ - proto: all
+ cidr_ip: 0.0.0.0/0
+ register: check_mode_result
+ check_mode: true
+ diff: true
+
+ - assert:
+ that:
+ - check_mode_result.changed
+
+ - name: add rules in a different order to test sorting consistency (DIFF)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 172.16.0.0/12
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 10.0.0.0/8
+ rules_egress:
+ - proto: all
+ cidr_ip: 0.0.0.0/0
+ register: result
+ diff: true
+
+ - assert:
+ that:
+ - result.changed
+ - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions
+ - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress
+
+ - name: purge rules (CHECK MODE + DIFF)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ rules_egress: []
+ register: check_mode_result
+ check_mode: true
+ diff: true
+
+ - assert:
+ that:
+ - check_mode_result.changed
+
+ - name: purge rules (DIFF)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ rules_egress: []
+ register: result
+ diff: true
+
+ - assert:
+ that:
+ - result.changed
+ - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions
+ - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress
+
+ - name: delete the security group (CHECK MODE + DIFF)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ state: absent
+ register: check_mode_result
+ diff: true
+ check_mode: true
+
+ - assert:
+ that:
+ - check_mode_result.changed
+
+ - name: delete the security group (DIFF)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ state: absent
+ register: result
+ diff: true
+
+ - assert:
+ that:
+ - result.changed
+ - not result.diff.0.after and not check_mode_result.diff.0.after
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/egress_tests.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/egress_tests.yml
new file mode 100644
index 000000000..5635f4434
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/egress_tests.yml
@@ -0,0 +1,177 @@
+---
+- block:
+ - name: Create a group with only the default rule
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ state: present
+ register: result
+
+ - name: assert default rule is in place (expected changed=true)
+ assert:
+ that:
+ - result is changed
+ - result.ip_permissions|length == 0
+ - result.ip_permissions_egress|length == 1
+ - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0'
+
+ - name: Create a group with only the default rule
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ purge_rules_egress: false
+ state: present
+ register: result
+
+ - name: assert default rule is not purged (expected changed=false)
+ assert:
+ that:
+ - result is not changed
+ - result.ip_permissions|length == 0
+ - result.ip_permissions_egress|length == 1
+ - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0'
+
+ - name: Pass empty egress rules without purging, should leave default rule in place
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_rules_egress: false
+ rules_egress: []
+ state: present
+ register: result
+
+ - name: assert default rule is not purged (expected changed=false)
+ assert:
+ that:
+ - result is not changed
+ - result.ip_permissions|length == 0
+ - result.ip_permissions_egress|length == 1
+ - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0'
+
+ - name: Purge rules, including the default
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_rules_egress: true
+ rules_egress: []
+ state: present
+ register: result
+
+ - name: assert default rule is not purged (expected changed=false)
+ assert:
+ that:
+ - result is changed
+ - result.ip_permissions|length == 0
+ - result.ip_permissions_egress|length == 0
+
+ - name: Add a custom egress rule
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules_egress:
+ - proto: tcp
+ ports:
+ - 1212
+ cidr_ip: 10.2.1.2/32
+ state: present
+ register: result
+
+ - name: assert first rule is here
+ assert:
+ that:
+ - result.ip_permissions_egress|length == 1
+
+ - name: Add a second custom egress rule
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ description: '{{ec2_group_description}}'
+ purge_rules_egress: false
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules_egress:
+ - proto: tcp
+ ports:
+ - 2323
+ cidr_ip: 10.3.2.3/32
+ state: present
+ register: result
+
+ - name: assert the first rule is not purged
+ assert:
+ that:
+ - result.ip_permissions_egress|length == 2
+
+ - name: Purge the second rule (CHECK MODE) (DIFF MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules_egress:
+ - proto: tcp
+ ports:
+ - 1212
+ cidr_ip: 10.2.1.2/32
+ state: present
+ register: result
+ check_mode: True
+ diff: True
+
+ - name: assert first rule will be left
+ assert:
+ that:
+ - result.changed
+ - result.diff.0.after.ip_permissions_egress|length == 1
+ - result.diff.0.after.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '10.2.1.2/32'
+
+ - name: Purge the second rule
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules_egress:
+ - proto: tcp
+ ports:
+ - 1212
+ cidr_ip: 10.2.1.2/32
+ state: present
+ register: result
+
+ - name: assert first rule is here
+ assert:
+ that:
+ - result.ip_permissions_egress|length == 1
+ - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '10.2.1.2/32'
+
+ - name: add a rule for all TCP ports
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ description: '{{ec2_group_description}}'
+ rules_egress:
+ - proto: tcp
+ ports: 0-65535
+ cidr_ip: 0.0.0.0/0
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: result
+
+ - name: Re-add the default rule
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ description: '{{ec2_group_description}}'
+ rules_egress:
+ - proto: -1
+ cidr_ip: 0.0.0.0/0
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: result
+ always:
+ - name: tidy up egress rule test security group
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/group_info.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/group_info.yml
new file mode 100644
index 000000000..86c8a5460
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/group_info.yml
@@ -0,0 +1,96 @@
+---
+
+# file for testing the ec2_group_info module
+
+- block:
+ # ======================== Setup =====================================
+ - name: Create a group for testing group info retrieval below
+ ec2_group:
+ name: '{{ ec2_group_name }}-info-1'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ ec2_group_description }}'
+ rules:
+ - proto: tcp
+ ports:
+ - 90
+ cidr_ip: 10.2.2.2/32
+ tags:
+ test: '{{ resource_prefix }}_ec2_group_info_module'
+ register: group_info_test_setup
+
+ - name: Create another group for testing group info retrieval below
+ ec2_group:
+ name: '{{ ec2_group_name }}-info-2'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ ec2_group_description }}'
+ rules:
+ - proto: tcp
+ ports:
+ - 91
+ cidr_ip: 10.2.2.2/32
+
+ # ========================= ec2_group_info tests ====================
+
+ - name: Retrieve security group info based on SG name
+ ec2_group_info:
+ filters:
+ group-name: '{{ ec2_group_name }}-info-2'
+ register: result_1
+
+ - name: Assert results found
+ assert:
+ that:
+ - result_1.security_groups is defined
+ - (result_1.security_groups|first).group_name == '{{ ec2_group_name }}-info-2'
+
+ - name: Retrieve security group info based on SG VPC
+ ec2_group_info:
+ filters:
+ vpc-id: '{{ vpc_result.vpc.id }}'
+ register: result_2
+
+ - name: Assert results found
+ assert:
+ that:
+ - result_2.security_groups is defined
+ - (result_2.security_groups|first).vpc_id == vpc_result.vpc.id
+ - (result_2.security_groups|length) > 2
+
+ - name: Retrieve security group info based on SG tags
+ ec2_group_info:
+ filters:
+ "tag:test": "{{ resource_prefix }}_ec2_group_info_module"
+ register: result_3
+
+ - name: Assert results found
+ assert:
+ that:
+ - result_3.security_groups is defined
+ - (result_3.security_groups|first).group_id == group_info_test_setup.group_id
+
+ - name: Retrieve security group info based on SG ID
+ ec2_group_info:
+ filters:
+ group-id: '{{ group_info_test_setup.group_id }}'
+ register: result_4
+
+ - name: Assert correct result found
+ assert:
+ that:
+ - result_4.security_groups is defined
+ - (result_4.security_groups|first).group_id == group_info_test_setup.group_id
+ - (result_4.security_groups|length) == 1
+
+ always:
+ # ========================= Cleanup =================================
+ - name: tidy up test security group 1
+ ec2_group:
+ name: '{{ ec2_group_name }}-info-1'
+ state: absent
+ ignore_errors: yes
+
+ - name: tidy up test security group 2
+ ec2_group:
+ name: '{{ ec2_group_name }}-info-2'
+ state: absent
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/icmp_verbs.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/icmp_verbs.yml
new file mode 100644
index 000000000..a4f1d3947
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/icmp_verbs.yml
@@ -0,0 +1,221 @@
+---
+- block:
+ # ============================================================
+ - name: Create simple rule using icmp verbs
+ ec2_group:
+ name: '{{ec2_group_name}}-icmp-1'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: "icmp"
+ icmp_type: 3
+ icmp_code: 8
+ cidr_ip:
+ - 10.0.0.0/8
+ - 172.16.40.10/32
+ state: present
+ register: result
+
+ - name: Retrieve security group info
+ ec2_group_info:
+ filters:
+ group-name: '{{ ec2_group_name }}-icmp-1'
+ register: result_1
+
+ - assert:
+ that:
+ - result is changed
+ - result_1.security_groups is defined
+ - (result_1.security_groups|first).group_name == '{{ ec2_group_name }}-icmp-1'
+ - (result_1.security_groups|first).ip_permissions[0].ip_protocol == "icmp"
+
+ - name: Create ipv6 rule using icmp verbs
+ ec2_group:
+ name: '{{ec2_group_name}}-icmp-2'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: "icmpv6"
+ icmp_type: 1
+ icmp_code: 4
+ cidr_ipv6: "64:ff9b::/96"
+ state: present
+ register: result
+
+ - name: Retrieve security group info
+ ec2_group_info:
+ filters:
+ group-name: '{{ ec2_group_name }}-icmp-2'
+ register: result_1
+
+ - assert:
+ that:
+ - result is changed
+ - result_1.security_groups is defined
+ - (result_1.security_groups|first).group_name == '{{ ec2_group_name }}-icmp-2'
+ - (result_1.security_groups|first).ip_permissions[0].ip_protocol == "icmpv6"
+
+
+ - name: Create rule using security group referencing
+ ec2_group:
+ name: '{{ec2_group_name}}-icmp-3'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: "icmp"
+ icmp_type: 5
+ icmp_code: 1
+ group_name: '{{ec2_group_name}}-auto-create-2'
+ group_desc: "sg-group-referencing"
+ state: present
+ register: result
+
+ - name: Retrieve security group info
+ ec2_group_info:
+ filters:
+ group-name: '{{ ec2_group_name }}-icmp-3'
+ register: result_1
+
+ - assert:
+ that:
+ - result is changed
+ - (result_1.security_groups | first).ip_permissions[0].user_id_group_pairs is defined
+
+ - name: Create list rule using 0 as icmp_type
+ ec2_group:
+ name: '{{ec2_group_name}}-icmp-4'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: icmp
+ icmp_type: 0
+ icmp_code: 1
+ cidr_ip:
+ - 10.0.0.0/8
+ - 172.16.40.10/32
+ - proto: "tcp"
+ from_port: 80
+ to_port: 80
+ cidr_ip: 172.16.40.10/32
+ state: present
+ register: result
+
+ - name: Retrieve security group info
+ ec2_group_info:
+ filters:
+ group-name: '{{ ec2_group_name }}-icmp-4'
+ register: result_1
+
+ - assert:
+ that:
+ - result is changed
+ - (result_1.security_groups | first).ip_permissions | length == 2
+ # ============================================================
+
+ # ============================================================
+ - name: Create a group with non-ICMP protocol
+ ec2_group:
+ name: '{{ec2_group_name}}-icmp-4'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: "tcp"
+ icmp_type: 0
+ icmp_code: 1
+ cidr_ip:
+ - 10.0.0.0/8
+ - 172.16.40.10/32
+ state: present
+ register: result
+ ignore_errors: true
+
+ - name: assert that group creation fails when proto != icmp with icmp parameters
+ assert:
+ that:
+ - result is failed
+
+ - name: Create a group with conflicting parameters
+ ec2_group:
+ name: '{{ec2_group_name}}-icmp-4'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: icmp
+ from_port: 5
+ to_port: 1
+ icmp_type: 0
+ icmp_code: 1
+ cidr_ip:
+ - 10.0.0.0/8
+ - 172.16.40.10/32
+ state: present
+ register: result
+ ignore_errors: true
+
+ - name: assert that group creation fails when using conflicting parameters
+ assert:
+ that:
+ - result is failed
+
+ - name: Create a group with missing icmp parameters
+ ec2_group:
+ name: '{{ec2_group_name}}-icmp-4'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: "tcp"
+ icmp_type: 0
+ cidr_ip:
+ - 10.0.0.0/8
+ - 172.16.40.10/32
+ state: present
+ register: result
+ ignore_errors: true
+
+ - name: assert that group creation fails when missing icmp parameters
+ assert:
+ that:
+ - result is failed
+
+ always:
+ - name: tidy up egress rule test security group rules
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-2'
+ description: 'sg-group-referencing'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules: []
+ rules_egress: []
+ ignore_errors: yes
+
+ - name: tidy up egress rule test security group rules
+ ec2_group:
+ name: '{{ec2_group_name}}-icmp-{{ item }}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules: []
+ rules_egress: []
+ ignore_errors: yes
+ with_items:
+ - 1
+ - 2
+ - 3
+ - 4
+
+ - name: tidy up egress rule test security group rules
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-2'
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ ignore_errors: yes
+
+ - name: tidy up egress rule test security group
+ ec2_group:
+ name: '{{ec2_group_name}}-icmp-{{ item }}'
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ ignore_errors: yes
+ with_items:
+ - 1
+ - 2
+ - 3
+ - 4
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/ipv6_default_tests.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/ipv6_default_tests.yml
new file mode 100644
index 000000000..2dea42a64
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/ipv6_default_tests.yml
@@ -0,0 +1,90 @@
+---
+# ============================================================
+- name: test state=present for ipv6 (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ check_mode: true
+ register: result
+
+- name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+# ============================================================
+- name: test state=present for ipv6 (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ register: result
+
+- name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+# ============================================================
+- name: test rules_egress state=present for ipv6 (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ rules_egress:
+ - proto: "tcp"
+ from_port: 8181
+ to_port: 8181
+ cidr_ipv6: "64:ff9b::/96"
+ check_mode: true
+ register: result
+
+- name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+# ============================================================
+- name: test rules_egress state=present for ipv6 (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ rules_egress:
+ - proto: "tcp"
+ from_port: 8181
+ to_port: 8181
+ cidr_ipv6: "64:ff9b::/96"
+ register: result
+
+- name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+- name: delete it
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ state: absent
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/main.yml
new file mode 100644
index 000000000..fa0ab9496
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/main.yml
@@ -0,0 +1,1368 @@
+---
+- set_fact:
+ # lookup plugins don't have access to module_defaults
+ connection_args:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ aws_security_token: "{{ security_token | default(omit) }}"
+ no_log: True
+
+# ============================================================
+- name: Run all tests
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit)}}"
+ region: "{{ aws_region }}"
+ block:
+ - name: determine if there is a default VPC
+ set_fact:
+ defaultvpc: "{{ lookup('amazon.aws.aws_account_attribute', attribute='default-vpc', **connection_args) }}"
+ register: default_vpc
+
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ register: vpc_result
+ #TODO(ryansb): Update CI for VPC peering permissions
+ #- include: ./multi_account.yml
+ - include: ./diff_mode.yml
+ - include: ./numeric_protos.yml
+ - include: ./rule_group_create.yml
+ - include: ./egress_tests.yml
+ - include: ./icmp_verbs.yml
+ - include: ./data_validation.yml
+ - include: ./multi_nested_target.yml
+ - include: ./group_info.yml
+
+ # ============================================================
+ - name: test state=absent (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: absent
+ check_mode: true
+ register: result
+
+ - name: assert no changes would be made
+ assert:
+ that:
+ - not result.changed
+
+ # ===========================================================
+ - name: test state=absent
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: absent
+ register: result
+
+ # ============================================================
+ - name: test state=present (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test state=present (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test state=present different description (expected changed=false) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}CHANGED'
+ state: present
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+
+ # ============================================================
+ - name: test state=present different description (expected changed=false)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}CHANGED'
+ state: present
+ ignore_errors: true
+ register: result
+
+ - name: assert state=present (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test state=present (expected changed=false)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ register: result
+
+ - name: assert state=present (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: tests IPv6 with the default VPC
+ include: ./ipv6_default_tests.yml
+ when: default_vpc
+
+ - name: test IPv6 with a specified VPC
+ block:
+
+ # ============================================================
+ - name: test state=present (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test state=present (expected changed=true)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test state=present for ipv6 (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test state=present for ipv6 (expected changed=true)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test state=present for ipv6 (expected changed=false) (CHECK MODE)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ check_mode: true
+ register: result
+
+ - name: assert nothing changed
+ assert:
+ that:
+ - 'not result.changed'
+
+ # ============================================================
+ - name: test state=present for ipv6 (expected changed=false)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ register: result
+
+ - name: assert nothing changed
+ assert:
+ that:
+ - 'not result.changed'
+
+ # ============================================================
+ - name: test rules_egress state=present for ipv6 (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ rules_egress:
+ - proto: "tcp"
+ from_port: 8181
+ to_port: 8181
+ cidr_ipv6: "64:ff9b::/96"
+ check_mode: true
+ diff: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.diff.0.before.ip_permissions == result.diff.0.after.ip_permissions'
+ - 'result.diff.0.before.ip_permissions_egress != result.diff.0.after.ip_permissions_egress'
+
+ # ============================================================
+ - name: test rules_egress state=present for ipv6 (expected changed=true)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ rules_egress:
+ - proto: "tcp"
+ from_port: 8181
+ to_port: 8181
+ cidr_ipv6: "64:ff9b::/96"
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test state=absent (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ check_mode: true
+ diff: true
+ register: result
+
+ - name: assert group was removed
+ assert:
+ that:
+ - 'result.changed'
+ - 'not result.diff.0.after'
+
+ # ============================================================
+ - name: test state=absent (expected changed=true)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: result
+
+ - name: assert group was removed
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test state=present for ipv4 (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test state=present for ipv4 (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+ - 'result.ip_permissions|length == 1'
+ - 'result.ip_permissions_egress|length == 1'
+
+ # ============================================================
+ - name: add same rule to the existing group (expected changed=false) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ check_mode: true
+ diff: true
+ register: check_result
+
+ - assert:
+ that:
+ - not check_result.changed
+ - check_result.diff.0.before.ip_permissions.0 == check_result.diff.0.after.ip_permissions.0
+
+ # ============================================================
+ - name: add same rule to the existing group (expected changed=false)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ register: result
+
+ - name: assert state=present (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ - name: assert state=present (expected changed=false)
+ assert:
+ that:
+ - 'not check_result.changed'
+
+ # ============================================================
+ - name: add a rule that auto creates another security group (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ purge_rules: no
+ rules:
+ - proto: "tcp"
+ group_name: "{{ resource_prefix }} - Another security group"
+ group_desc: Another security group
+ ports: 7171
+ check_mode: true
+ register: result
+
+ - name: check that there are now two rules
+ assert:
+ that:
+ - result.changed
+
+ # ============================================================
+ - name: add a rule that auto creates another security group
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ purge_rules: no
+ rules:
+ - proto: "tcp"
+ group_name: "{{ resource_prefix }} - Another security group"
+ group_desc: Another security group
+ ports: 7171
+ register: result
+
+ - name: check that there are now two rules
+ assert:
+ that:
+ - result.changed
+ - result.warning is not defined
+ - result.ip_permissions|length == 2
+ - result.ip_permissions[0].user_id_group_pairs or
+ result.ip_permissions[1].user_id_group_pairs
+ - 'result.ip_permissions_egress[0].ip_protocol == "-1"'
+
+ # ============================================================
+ - name: test ip rules convert port numbers from string to int (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: "8183"
+ to_port: "8183"
+ cidr_ip: "10.1.1.1/32"
+ rules_egress:
+ - proto: "tcp"
+ from_port: "8184"
+ to_port: "8184"
+ cidr_ip: "10.1.1.1/32"
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test ip rules convert port numbers from string to int (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: "8183"
+ to_port: "8183"
+ cidr_ip: "10.1.1.1/32"
+ rules_egress:
+ - proto: "tcp"
+ from_port: "8184"
+ to_port: "8184"
+ cidr_ip: "10.1.1.1/32"
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+ - 'result.ip_permissions|length == 1'
+ - 'result.ip_permissions_egress[0].ip_protocol == "tcp"'
+
+
+ # ============================================================
+ - name: test group rules convert port numbers from string to int (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: "8185"
+ to_port: "8185"
+ group_id: "{{result.group_id}}"
+ rules_egress:
+ - proto: "tcp"
+ from_port: "8186"
+ to_port: "8186"
+ group_id: "{{result.group_id}}"
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test group rules convert port numbers from string to int (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: "8185"
+ to_port: "8185"
+ group_id: "{{result.group_id}}"
+ rules_egress:
+ - proto: "tcp"
+ from_port: "8186"
+ to_port: "8186"
+ group_id: "{{result.group_id}}"
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+ - result.warning is not defined
+
+ # ============================================================
+ - name: test adding a range of ports and ports given as strings (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8183-8190
+ - '8192'
+ cidr_ip: 10.1.1.1/32
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test adding a range of ports and ports given as strings (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8183-8190
+ - '8192'
+ cidr_ip: 10.1.1.1/32
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test adding a rule with a IPv4 CIDR with host bits set (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8195
+ cidr_ip: 10.0.0.1/8
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test adding a rule with a IPv4 CIDR with host bits set (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8195
+ cidr_ip: 10.0.0.1/8
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test adding the same rule with a IPv4 CIDR with host bits set (expected changed=false) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8195
+ cidr_ip: 10.0.0.1/8
+ check_mode: true
+ register: check_result
+
+ # ============================================================
+ - name: test adding the same rule with a IPv4 CIDR with host bits set (expected changed=false and a warning)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8195
+ cidr_ip: 10.0.0.1/8
+ register: result
+
+ - name: assert state=present (expected changed=false and a warning)
+ assert:
+ that:
+ - 'not check_result.changed'
+
+ - name: assert state=present (expected changed=false and a warning)
+ assert:
+ that:
+ # No way to assert for warnings?
+ - 'not result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test using the default VPC
+ block:
+
+ - name: test adding a rule with a IPv6 CIDR with host bits set (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8196
+ cidr_ipv6: '2001:db00::1/24'
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test adding a rule with a IPv6 CIDR with host bits set (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8196
+ cidr_ipv6: '2001:db00::1/24'
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+
+ - name: test adding a rule again with a IPv6 CIDR with host bits set (expected changed=false and a warning)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8196
+ cidr_ipv6: '2001:db00::1/24'
+ register: result
+
+ - name: assert state=present (expected changed=false and a warning)
+ assert:
+ that:
+ # No way to assert for warnings?
+ - 'not result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ when: default_vpc
+
+ # ============================================================
+ - name: test state=absent (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ state: absent
+ check_mode: true
+ register: result
+
+ - name: assert state=absent (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test state=absent (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ state: absent
+ register: result
+
+ - name: assert state=absent (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'not result.group_id'
+
+ # ============================================================
+ - name: create security group in the VPC (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: create security group in the VPC
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.vpc_id == vpc_result.vpc.id'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test adding tags (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ tags:
+ tag1: test1
+ tag2: test2
+ check_mode: true
+ diff: true
+ register: result
+
+ - name: assert that tags were added (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'not result.diff.0.before.tags'
+ - 'result.diff.0.after.tags.tag1 == "test1"'
+ - 'result.diff.0.after.tags.tag2 == "test2"'
+
+ # ============================================================
+ - name: test adding tags (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ tags:
+ tag1: test1
+ tag2: test2
+ register: result
+
+ - name: assert that tags were added (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.tags == {"tag1": "test1", "tag2": "test2"}'
+
+ # ============================================================
+ - name: test that tags are present (expected changed=False) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ purge_rules_egress: false
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ tags:
+ tag1: test1
+ tag2: test2
+ check_mode: true
+ register: result
+
+ - name: assert that tags were not changed (expected changed=False)
+ assert:
+ that:
+ - 'not result.changed'
+
+ # ============================================================
+ - name: test that tags are present (expected changed=False)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ purge_rules_egress: false
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ tags:
+ tag1: test1
+ tag2: test2
+ register: result
+
+ - name: assert that tags were not changed (expected changed=False)
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.tags == {"tag1": "test1", "tag2": "test2"}'
+
+ # ============================================================
+ - name: test purging tags (expected changed=True) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ tags:
+ tag1: test1
+ check_mode: true
+ register: result
+
+ - name: assert that tag2 was removed (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test purging tags (expected changed=True)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ tags:
+ tag1: test1
+ register: result
+
+ - name: assert that tag2 was removed (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.tags == {"tag1": "test1"}'
+
+ # ============================================================
+
+ - name: assert that tags are left as-is if not specified (expected changed=False)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ register: result
+
+ - name: assert that the tags stayed the same (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.tags == {"tag1": "test1"}'
+
+ # ============================================================
+
+ - name: test purging all tags (expected changed=True)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ tags: {}
+ register: result
+
+ - name: assert that tag1 was removed (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'not result.tags'
+
+ # ============================================================
+ - name: test adding a rule and egress rule descriptions (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ # purge the other rules so assertions work for the subsequent tests for rule descriptions
+ purge_rules_egress: true
+ purge_rules: true
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rule_desc: ipv6 rule desc 1
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc: egress rule desc 1
+ check_mode: true
+ register: result
+
+ - name: assert that rule descriptions are created (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # =========================================================================================
+ - name: add rules without descriptions ready for adding descriptions to existing rules
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ # purge the other rules so assertions work for the subsequent tests for rule descriptions
+ purge_rules_egress: true
+ purge_rules: true
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ register: result
+
+ # ============================================================
+ - name: test adding a rule and egress rule descriptions (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ # purge the other rules so assertions work for the subsequent tests for rule descriptions
+ purge_rules_egress: true
+ purge_rules: true
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rule_desc: ipv6 rule desc 1
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc: egress rule desc 1
+ register: result
+
+ - name: assert that rule descriptions are created (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 1"'
+ - 'result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 1"'
+
+ # ============================================================
+ - name: test modifying rule and egress rule descriptions (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_rules_egress: false
+ purge_rules: false
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rule_desc: ipv6 rule desc 2
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc: egress rule desc 2
+ check_mode: true
+ register: result
+
+ - name: assert that rule descriptions were modified (expected changed=true)
+ assert:
+ that:
+ - 'result.ip_permissions | length > 0'
+ - 'result.changed'
+
+ # ============================================================
+ - name: test modifying rule and egress rule descriptions (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_rules_egress: false
+ purge_rules: false
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rule_desc: ipv6 rule desc 2
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc: egress rule desc 2
+ register: result
+
+ - name: assert that rule descriptions were modified (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 2"'
+ - 'result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 2"'
+
+ # ============================================================
+
+ - name: test creating rule in default vpc with egress rule (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}-default-vpc'
+ description: '{{ec2_group_description}} default VPC'
+ purge_rules_egress: true
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ip: 10.1.1.1/24
+ rule_desc: ipv4 rule desc
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc: egress rule desc 2
+ register: result
+
+ - name: assert that rule descriptions were modified (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.ip_permissions_egress|length == 1'
+
+ # ============================================================
+ - name: test that keeping the same rule descriptions (expected changed=false) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_rules_egress: false
+ purge_rules: false
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rule_desc: ipv6 rule desc 2
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc: egress rule desc 2
+ check_mode: true
+ register: result
+
+ - name: assert that rule descriptions stayed the same (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+
+ # ============================================================
+ - name: test that keeping the same rule descriptions (expected changed=false)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_rules_egress: false
+ purge_rules: false
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rule_desc: ipv6 rule desc 2
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc: egress rule desc 2
+ register: result
+
+ - name: assert that rule descriptions stayed the same (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 2"'
+ - 'result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 2"'
+
+ # ============================================================
+ - name: test removing rule descriptions (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_rules_egress: false
+ purge_rules: false
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rule_desc:
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc:
+ check_mode: true
+ register: result
+
+ - name: assert that rule descriptions were removed (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test removing rule descriptions (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_rules_egress: false
+ purge_rules: false
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rule_desc:
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc:
+ register: result
+ ignore_errors: true
+
+ - name: assert that rule descriptions were removed
+ assert:
+ that:
+ - 'result.ip_permissions[0].ipv6_ranges[0].description is undefined'
+ - 'result.ip_permissions_egress[0].ip_ranges[0].description is undefined'
+
+ # ============================================================
+
+ - name: test state=absent (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ state: absent
+ register: result
+
+ - name: assert state=absent (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'not result.group_id'
+ always:
+
+ # ============================================================
+ # Describe state of remaining resources
+
+ - name: Retrieve security group info based on SG VPC
+ ec2_group_info:
+ filters:
+ vpc-id: '{{ vpc_result.vpc.id }}'
+ register: remaining_groups
+
+ - name: Retrieve subnet info based on SG VPC
+ ec2_vpc_subnet_info:
+ filters:
+ vpc-id: '{{ vpc_result.vpc.id }}'
+ register: remaining_subnets
+
+ - name: Retrieve VPC info based on SG VPC
+ ec2_vpc_net_info:
+ vpc_ids:
+ - '{{ vpc_result.vpc.id }}'
+ register: remaining_vpc
+
+ # ============================================================
+ # Delete all remaining SGs
+
+ - name: Delete rules from remaining SGs
+ ec2_group:
+ name: '{{ item.group_name }}'
+ group_id: '{{ item.group_id }}'
+ description: '{{ item.description }}'
+ rules: []
+ rules_egress: []
+ loop: '{{ remaining_groups.security_groups }}'
+ ignore_errors: yes
+
+ - name: Delete remaining SGs
+ ec2_group:
+ state: absent
+ group_id: '{{ item.group_id }}'
+ loop: '{{ remaining_groups.security_groups }}'
+ ignore_errors: yes
+
+ # ============================================================
+
+ - name: tidy up VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: absent
+ cidr_block: "{{ vpc_cidr }}"
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_account.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_account.yml
new file mode 100644
index 000000000..675dfd933
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_account.yml
@@ -0,0 +1,124 @@
+- block:
+ - aws_caller_info:
+ register: caller_facts
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc-2"
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Description: "Created by ansible-test"
+ register: vpc_result_2
+ - name: Peer the secondary-VPC to the main VPC
+ ec2_vpc_peer:
+ vpc_id: '{{ vpc_result_2.vpc.id }}'
+ peer_vpc_id: '{{ vpc_result.vpc.id }}'
+ peer_owner_id: '{{ caller_facts.account }}'
+ peer_region: '{{ aws_region }}'
+ register: peer_origin
+ - name: Accept the secondary-VPC peering connection in the main VPC
+ ec2_vpc_peer:
+ peer_vpc_id: '{{ vpc_result_2.vpc.id }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: accept
+ peering_id: '{{ peer_origin.peering_id }}'
+ peer_owner_id: '{{ caller_facts.account }}'
+ peer_region: '{{ aws_region }}'
+ - name: Create group in second VPC
+ ec2_group:
+ name: '{{ ec2_group_name }}-external'
+ description: '{{ ec2_group_description }}'
+ vpc_id: '{{ vpc_result_2.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ cidr_ip: 0.0.0.0/0
+ ports:
+ - 80
+ rule_desc: 'http whoo'
+ register: external
+ - name: Create group in internal VPC
+ ec2_group:
+ name: '{{ ec2_group_name }}-internal'
+ description: '{{ ec2_group_description }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ group_id: '{{ caller_facts.account }}/{{ external.group_id }}/{{ ec2_group_name }}-external'
+ ports:
+ - 80
+ - name: Re-make same rule, expecting changed=false in internal VPC
+ ec2_group:
+ name: '{{ ec2_group_name }}-internal'
+ description: '{{ ec2_group_description }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ group_id: '{{ caller_facts.account }}/{{ external.group_id }}/{{ ec2_group_name }}-external'
+ ports:
+ - 80
+ register: out
+ - assert:
+ that:
+ - out is not changed
+ - name: Try again with a bad group_id group in internal VPC
+ ec2_group:
+ name: '{{ ec2_group_name }}-internal'
+ description: '{{ ec2_group_description }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ group_id: '{{ external.group_id }}/{{ caller_facts.account }}/{{ ec2_group_name }}-external'
+ ports:
+ - 80
+ register: out
+ ignore_errors: true
+ - assert:
+ that:
+ - out is failed
+ always:
+ - pause: seconds=5
+ - name: Delete secondary-VPC side of peer
+ ec2_vpc_peer:
+ vpc_id: '{{ vpc_result_2.vpc.id }}'
+ peer_vpc_id: '{{ vpc_result.vpc.id }}'
+ peering_id: '{{ peer_origin.peering_id }}'
+ state: absent
+ peer_owner_id: '{{ caller_facts.account }}'
+ peer_region: '{{ aws_region }}'
+ ignore_errors: yes
+ - name: Delete main-VPC side of peer
+ ec2_vpc_peer:
+ peer_vpc_id: '{{ vpc_result_2.vpc.id }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: absent
+ peering_id: '{{ peer_origin.peering_id }}'
+ peer_owner_id: '{{ caller_facts.account }}'
+ peer_region: '{{ aws_region }}'
+ ignore_errors: yes
+ - name: Clean up group in second VPC
+ ec2_group:
+ name: '{{ ec2_group_name }}-external'
+ description: '{{ ec2_group_description }}'
+ state: absent
+ vpc_id: '{{ vpc_result_2.vpc.id }}'
+ ignore_errors: yes
+ - name: Clean up group in second VPC
+ ec2_group:
+ name: '{{ ec2_group_name }}-internal'
+ description: '{{ ec2_group_description }}'
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ ignore_errors: yes
+ - name: tidy up VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc-2"
+ state: absent
+ cidr_block: "{{ vpc_cidr }}"
+ ignore_errors: yes
+ register: removed
+ retries: 10
+ until: removed is not failed
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_nested_target.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_nested_target.yml
new file mode 100644
index 000000000..87f48468f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_nested_target.yml
@@ -0,0 +1,213 @@
+---
+ # ============================================================
+
+ - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6:
+ - "64:ff9b::/96"
+ - ["2620::/32"]
+ - proto: "tcp"
+ ports: 5665
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ - ["10.0.0.0/24", "10.20.0.0/24"]
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=true)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6:
+ - "64:ff9b::/96"
+ - ["2620::/32"]
+ - proto: "tcp"
+ ports: 5665
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ - ["10.0.0.0/24", "10.20.0.0/24"]
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.ip_permissions | length == 2'
+ - 'result.ip_permissions[0].ip_ranges | length == 4 or result.ip_permissions[1].ip_ranges | length == 4'
+ - 'result.ip_permissions[0].ipv6_ranges | length == 2 or result.ip_permissions[1].ipv6_ranges | length == 2'
+
+ - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=false) (CHECK MODE)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6:
+ - "64:ff9b::/96"
+ - ["2620::/32"]
+ - proto: "tcp"
+ ports: 5665
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ - ["10.0.0.0/24", "10.20.0.0/24"]
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'not result.changed'
+
+ - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=false)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6:
+ - "64:ff9b::/96"
+ - ["2620::/32"]
+ - proto: "tcp"
+ ports: 5665
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ - ["10.0.0.0/24", "10.20.0.0/24"]
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'not result.changed'
+
+ - name: test state=present purging a nested ipv4 target (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6:
+ - "64:ff9b::/96"
+ - ["2620::/32"]
+ - proto: "tcp"
+ ports: 5665
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ - ["10.0.0.0/24"]
+ check_mode: true
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: test state=present purging a nested ipv4 target (expected changed=true)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6:
+ - "64:ff9b::/96"
+ - ["2620::/32"]
+ - proto: "tcp"
+ ports: 5665
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ - ["10.0.0.0/24"]
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - 'result.ip_permissions[0].ip_ranges | length == 3 or result.ip_permissions[1].ip_ranges | length == 3'
+ - 'result.ip_permissions[0].ipv6_ranges | length == 2 or result.ip_permissions[1].ipv6_ranges | length == 2'
+
+ - name: test state=present with both associated ipv6 targets nested (expected changed=false)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6:
+ - ["2620::/32", "64:ff9b::/96"]
+ - proto: "tcp"
+ ports: 5665
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ - ["10.0.0.0/24"]
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: test state=present add another nested ipv6 target (expected changed=true)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6:
+ - ["2620::/32", "64:ff9b::/96"]
+ - ["2001:DB8:A0B:12F0::1/64"]
+ - proto: "tcp"
+ ports: 5665
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ - ["10.0.0.0/24"]
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.warning is not defined
+ - 'result.ip_permissions[0].ip_ranges | length == 3 or result.ip_permissions[1].ip_ranges | length == 3'
+ - 'result.ip_permissions[0].ipv6_ranges | length == 3 or result.ip_permissions[1].ipv6_ranges | length == 3'
+
+ - name: delete it
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ state: absent
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/numeric_protos.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/numeric_protos.yml
new file mode 100644
index 000000000..6cca9fc43
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/numeric_protos.yml
@@ -0,0 +1,60 @@
+---
+- block:
+ - name: set up temporary group name for tests
+ set_fact:
+ group_tmp_name: '{{ec2_group_name}}-numbered-protos'
+
+ - name: Create a group with numbered protocol (GRE)
+ ec2_group:
+ name: '{{ group_tmp_name }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ ec2_group_description }}'
+ rules:
+ - proto: 47
+ to_port: -1
+ from_port: -1
+ cidr_ip: 0.0.0.0/0
+ state: present
+ register: result
+
+ - name: Create a group with a quoted proto
+ ec2_group:
+ name: '{{ group_tmp_name }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ ec2_group_description }}'
+ rules:
+ - proto: '47'
+ to_port: -1
+ from_port: -1
+ cidr_ip: 0.0.0.0/0
+ state: present
+ register: result
+ - assert:
+ that:
+ - result is not changed
+ - name: Add a tag with a numeric value
+ ec2_group:
+ name: '{{ group_tmp_name }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ ec2_group_description }}'
+ tags:
+ foo: 1
+ - name: Read a tag with a numeric value
+ ec2_group:
+ name: '{{ group_tmp_name }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ ec2_group_description }}'
+ tags:
+ foo: 1
+ register: result
+ - assert:
+ that:
+ - result is not changed
+
+ always:
+ - name: tidy up egress rule test security group
+ ec2_group:
+ name: '{{group_tmp_name}}'
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/rule_group_create.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/rule_group_create.yml
new file mode 100644
index 000000000..4d763c988
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/rule_group_create.yml
@@ -0,0 +1,127 @@
+---
+- block:
+ - name: Create a group with self-referring rule
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-1'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: "tcp"
+ from_port: 8000
+ to_port: 8100
+ group_name: '{{ec2_group_name}}-auto-create-1'
+ state: present
+ register: result
+
+ - name: Create a second group rule
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-2'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ state: present
+
+ - name: Create a series of rules with a recently created group as target
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-1'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ from_port: "{{ item }}"
+ to_port: "{{ item }}"
+ group_name: '{{ec2_group_name}}-auto-create-2'
+ state: present
+ register: result
+ with_items:
+ - 20
+ - 40
+ - 60
+ - 80
+
+ - assert:
+ that:
+ - result.warning is not defined
+
+ - name: Create a group with only the default rule
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-1'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ group_name: '{{ec2_group_name}}-auto-create-3'
+ state: present
+ register: result
+ ignore_errors: true
+
+ - name: assert you can't create a new group from a rule target with no description
+ assert:
+ that:
+ - result is failed
+
+ - name: Create a group with a target of a separate group
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-1'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: tcp
+ ports:
+ - 22
+ - 80
+ group_name: '{{ec2_group_name}}-auto-create-3'
+ group_desc: '{{ec2_group_description}}'
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result.warning is not defined
+
+ - name: Create a 4th group
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-4'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: tcp
+ ports:
+ - 22
+ cidr_ip: 0.0.0.0/0
+
+ - name: use recently created group in a rule
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-5'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: tcp
+ ports:
+ - 443
+ group_name: '{{ec2_group_name}}-auto-create-4'
+ state: present
+
+ - assert:
+ that:
+ - result.warning is not defined
+
+ always:
+ - name: tidy up egress rule test security group rules
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-{{ item }}'
+ description: '{{ec2_group_description}}'
+ rules: []
+ rules_egress: []
+ ignore_errors: yes
+ with_items: [5, 4, 3, 2, 1]
+ - name: tidy up egress rule test security group
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-{{ item }}'
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ ignore_errors: yes
+ with_items: [1, 2, 3, 4, 5]
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/aliases
new file mode 100644
index 000000000..951ec3caf
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/aliases
@@ -0,0 +1,10 @@
+# reason: unstable
+# Testing of paginated results fails when fewer results are returned than
+# expected - probably a race condition
+# https://github.com/ansible-collections/amazon.aws/issues/441
+disabled
+
+slow
+
+cloud/aws
+ec2_snapshot_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/defaults/main.yml
new file mode 100644
index 000000000..dc1f0f703
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for ec2_snapshot
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/meta/main.yml
new file mode 100644
index 000000000..2bff8543a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- role: setup_ec2_facts
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/main.yml
new file mode 100644
index 000000000..1a4bb0fb5
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/main.yml
@@ -0,0 +1,400 @@
+---
+# Tests for EC2 Snapshot
+#
+# Tests ec2_snapshot:
+# - Snapshot creation
+# - Create with last_snapshot_min_age
+# - Snapshot deletion
+#
+# Tests ec2_snapshot_info:
+# - Listing snapshots for filter: tag
+#
+- name: Integration testing for ec2_snapshot
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+
+ collections:
+ - amazon.aws
+ - community.aws
+
+ block:
+ - name: Gather availability zones
+ aws_az_facts:
+ register: azs
+
+ # Create a new volume in detached mode without tags
+ - name: Create a detached volume without tags
+ ec2_vol:
+ volume_size: 1
+ zone: '{{ azs.availability_zones[0].zone_name }}'
+ register: volume_detached
+
+ # Capture snapshot of this detached volume and assert the results
+ - name: Create a snapshot of detached volume without tags and store results
+ ec2_snapshot:
+ volume_id: '{{ volume_detached.volume_id }}'
+ register: untagged_snapshot
+
+ - assert:
+ that:
+ - untagged_snapshot is changed
+ - untagged_snapshot.snapshots| length == 1
+ - untagged_snapshot.snapshots[0].volume_id == volume_detached.volume_id
+
+ - name: Setup an instance for testing, make sure volumes are attached before next task
+ ec2_instance:
+ name: '{{ resource_prefix }}'
+ instance_type: t2.nano
+ image_id: '{{ ec2_ami_id }}'
+ volumes:
+ - device_name: /dev/xvda
+ ebs:
+ volume_size: 8
+ delete_on_termination: true
+ state: running
+ wait: true
+ register: instance
+
+ - set_fact:
+ volume_id: '{{ instance.instances[0].block_device_mappings[0].ebs.volume_id }}'
+ instance_id: '{{ instance.instances[0].instance_id }}'
+ device_name: '{{ instance.instances[0].block_device_mappings[0].device_name }}'
+
+ - name: Take snapshot (check mode)
+ ec2_snapshot:
+ instance_id: '{{ instance_id }}'
+ device_name: '{{ device_name }}'
+ snapshot_tags:
+ Test: '{{ resource_prefix }}'
+ check_mode: true
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: Take snapshot of volume
+ ec2_snapshot:
+ volume_id: '{{ volume_id }}'
+ register: result
+
+ # The Name tag is created automatically as the instance_name; ie the resource_prefix
+ - name: Get info about snapshots
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ register: info_result
+
+ - assert:
+ that:
+ - result is changed
+ - info_result is not changed
+ - info_result.snapshots| length == 1
+ - info_result.snapshots[0].snapshot_id == result.snapshot_id
+ - info_result.snapshots[0].volume_id == result.volume_id
+ - info_result.snapshots[0].volume_size == result.volume_size
+ - info_result.snapshots[0].tags == result.tags
+
+ - name: Get info about snapshots (check_mode)
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ register: info_check
+ check_mode: true
+
+ - assert:
+ that:
+ - info_check is not changed
+ - info_check.snapshots| length == 1
+ - info_check.snapshots[0].snapshot_id == result.snapshot_id
+ - info_check.snapshots[0].volume_id == result.volume_id
+ - info_check.snapshots[0].volume_size == result.volume_size
+ - info_check.snapshots[0].tags == result.tags
+
+ - name: Take snapshot if most recent >1hr (False) (check mode)
+ ec2_snapshot:
+ volume_id: '{{ volume_id }}'
+ snapshot_tags:
+ Name: '{{ resource_prefix }}'
+ last_snapshot_min_age: 60
+ check_mode: true
+ register: result
+ - assert:
+ that:
+ - result is not changed
+
+ - name: Take snapshot if most recent >1hr (False)
+ ec2_snapshot:
+ volume_id: '{{ volume_id }}'
+ last_snapshot_min_age: 60
+ register: result
+
+ - name: Get info about snapshots
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ register: info_result
+
+ - assert:
+ that:
+ - result is not changed
+ - info_result.snapshots| length == 1
+
+ - name: Pause so we can do a last_snapshot_min_age test
+ pause:
+ minutes: 1
+
+ - name: Take snapshot if most recent >1min (True) (check mode)
+ ec2_snapshot:
+ volume_id: '{{ volume_id }}'
+ snapshot_tags:
+ Name: '{{ resource_prefix }}'
+ last_snapshot_min_age: 1
+ check_mode: true
+ register: result
+ - assert:
+ that:
+ - result is changed
+
+ - name: Take snapshot if most recent >1min (True)
+ ec2_snapshot:
+ volume_id: '{{ volume_id }}'
+ last_snapshot_min_age: 1
+ register: result
+
+ - name: Get info about snapshots
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ register: info_result
+
+ - assert:
+ that:
+ - result is changed
+ - info_result.snapshots| length == 2
+ - result.snapshot_id in ( info_result.snapshots | map(attribute='snapshot_id') | list )
+
+ - name: Take snapshot with a tag (check mode)
+ ec2_snapshot:
+ volume_id: '{{ volume_id }}'
+ snapshot_tags:
+ MyTag: '{{ resource_prefix }}'
+ check_mode: true
+ register: result
+ - assert:
+ that:
+ - result is changed
+
+ - name: Take snapshot and tag it
+ ec2_snapshot:
+ volume_id: '{{ volume_id }}'
+ snapshot_tags:
+ MyTag: '{{ resource_prefix }}'
+ register: tagged_result
+
+ - name: Get info about snapshots by tag
+ ec2_snapshot_info:
+ filters:
+ "tag:MyTag": '{{ resource_prefix }}'
+ register: tag_info_result
+
+ - set_fact:
+ tagged_snapshot_id: '{{ tag_info_result.snapshots[0].snapshot_id }}'
+
+ - assert:
+ that:
+ - tagged_result is changed
+ - tagged_result.tags| length == 2
+ - tag_info_result.snapshots| length == 1
+ - tagged_result.tags.MyTag == "{{ resource_prefix }}"
+ - '"{{ tagged_result.snapshot_id }}" == "{{ tagged_snapshot_id }}"'
+
+ - name: Get info about all snapshots for this test
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ register: info_result
+
+ - assert:
+ that:
+ - info_result.snapshots | length == 3
+
+ - name: Generate extra snapshots
+ ec2_snapshot:
+ volume_id: '{{ volume_id }}'
+ snapshot_tags:
+ ResourcePrefix: '{{ resource_prefix }}'
+ loop: '{{ range(1, 6, 1) | list }}'
+ loop_control:
+ # Anything under 15 will trigger SnapshotCreationPerVolumeRateExceeded,
+ # this should now be automatically handled, but pause a little anyway to
+ # avoid being aggressive
+ pause: 15
+ label: "Generate extra snapshots - {{ item }}"
+
+ - name: Pause to allow creation to finish
+ pause:
+ minutes: 3
+
+ # check that snapshot_ids and max_results are mutually exclusive
+ - name: Check that max_results and snapshot_ids are mutually exclusive
+ ec2_snapshot_info:
+ snapshot_ids:
+ - '{{ tagged_snapshot_id }}'
+ max_results: 5
+ ignore_errors: true
+ register: info_result
+
+ - name: assert that operation failed
+ assert:
+ that:
+ - info_result is failed
+
+ # check that snapshot_ids and next_token_id are mutually exclusive
+ - name: Check that snapshot_ids and next_token_id are mutually exclusive
+ ec2_snapshot_info:
+ snapshot_ids:
+ - '{{ tagged_snapshot_id }}'
+ next_token_id: 'random_value_token'
+ ignore_errors: true
+ register: info_result
+
+ - name: assert that operation failed
+ assert:
+ that:
+ - info_result is failed
+
+ # Retrieve snapshots in paginated mode
+ - name: Get snapshots in paginated mode using max_results option
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ max_results: 5
+ register: info_result
+
+ - assert:
+ that:
+ - info_result.snapshots | length == 5
+ - info_result.next_token_id is defined
+
+ # Pagination : 2nd request
+ - name: Get snapshots for a second paginated request
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ next_token_id: "{{ info_result.next_token_id }}"
+ register: info_result
+
+ - assert:
+ that:
+ - info_result.snapshots | length == 3
+
+ # delete the tagged snapshot - check mode
+ - name: Delete the tagged snapshot (check mode)
+ ec2_snapshot:
+ state: absent
+ snapshot_id: '{{ tagged_snapshot_id }}'
+ register: delete_result_check_mode
+ check_mode: true
+
+ - assert:
+ that:
+ - delete_result_check_mode is changed
+
+ # delete the tagged snapshot
+ - name: Delete the tagged snapshot
+ ec2_snapshot:
+ state: absent
+ snapshot_id: '{{ tagged_snapshot_id }}'
+
+ # delete the tagged snapshot again (results in InvalidSnapshot.NotFound)
+ - name: Delete already removed snapshot (check mode)
+ ec2_snapshot:
+ state: absent
+ snapshot_id: '{{ tagged_snapshot_id }}'
+ register: delete_result_second_check_mode
+ check_mode: true
+
+ - assert:
+ that:
+ - delete_result_second_check_mode is not changed
+
+ - name: Delete already removed snapshot (idempotent)
+ ec2_snapshot:
+ state: absent
+ snapshot_id: '{{ tagged_snapshot_id }}'
+ register: delete_result_second_idempotent
+
+ - assert:
+ that:
+ - delete_result_second_idempotent is not changed
+
+ - name: Get info about all snapshots for this test
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ register: info_result
+
+ - assert:
+ that:
+ - info_result.snapshots| length == 7
+ - tagged_snapshot_id not in ( info_result.snapshots | map(attribute='snapshot_id') | list )
+
+ - name: Delete snapshots
+ ec2_snapshot:
+ state: absent
+ snapshot_id: '{{ item.snapshot_id }}'
+ with_items: '{{ info_result.snapshots }}'
+
+ - name: Get info about all snapshots for this test
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ register: info_result
+
+ - assert:
+ that:
+ - info_result.snapshots| length == 0
+
+ always:
+
+ - name: Snapshots to delete
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ register: tagged_snapshots
+
+ - name: Delete tagged snapshots
+ ec2_snapshot:
+ state: absent
+ snapshot_id: '{{ item.snapshot_id }}'
+ with_items: '{{ tagged_snapshots.snapshots }}'
+ ignore_errors: true
+
+ - name: Delete instance
+ ec2_instance:
+ instance_ids: '{{ instance_id }}'
+ state: absent
+ ignore_errors: true
+
+ - name: Delete volume
+ ec2_vol:
+ id: '{{ volume_id }}'
+ state: absent
+ ignore_errors: true
+
+ - name: Delete detached and untagged volume
+ ec2_vol:
+ id: '{{ volume_detached.volume_id}}'
+ state: absent
+ ignore_errors: true
+
+ - name: Delete untagged snapshot
+ ec2_snapshot:
+ state: absent
+ snapshot_id: '{{ untagged_snapshot.snapshot_id }}'
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/aliases
new file mode 100644
index 000000000..f556641fb
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+ec2_spot_instance_info \ No newline at end of file
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/defaults/main.yml
new file mode 100644
index 000000000..cb3895af0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+vpc_seed_a: '{{ resource_prefix }}'
+vpc_seed_b: '{{ resource_prefix }}-ec2_eni'
+vpc_prefix: '10.{{ 256 | random(seed=vpc_seed_a) }}.{{ 256 | random(seed=vpc_seed_b ) }}'
+vpc_cidr: '{{ vpc_prefix}}.128/26'
+ip_1: "{{ vpc_prefix }}.132"
+ip_2: "{{ vpc_prefix }}.133"
+ip_3: "{{ vpc_prefix }}.134"
+ip_4: "{{ vpc_prefix }}.135"
+ip_5: "{{ vpc_prefix }}.136"
+
+ec2_ips:
+- "{{ vpc_prefix }}.137"
+- "{{ vpc_prefix }}.138"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/meta/main.yml
new file mode 100644
index 000000000..1471b11f6
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_ec2_facts
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/main.yaml
new file mode 100644
index 000000000..1e98ad890
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/main.yaml
@@ -0,0 +1,315 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+
+ collections:
+ - amazon.aws
+ - community.aws
+
+ block:
+ - name: Get available AZs
+ aws_az_info:
+ filters:
+ region-name: "{{ aws_region }}"
+ register: az_info
+
+ - name: Pick an AZ
+ set_fact:
+ availability_zone: "{{ az_info['availability_zones'][0]['zone_name'] }}"
+
+ # ============================================================
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ register: vpc_result
+
+ - name: create a subnet
+ ec2_vpc_subnet:
+ cidr: "{{ vpc_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ state: present
+ register: vpc_subnet_result
+
+ - name: create a security group
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: "Created by {{ resource_prefix }}"
+ rules: []
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ register: vpc_sg_result
+
+ - name: create a new ec2 key pair
+ ec2_key:
+ name: "{{ resource_prefix }}-keypair"
+
+ - name: Set facts to simplify use of extra resources
+ set_fact:
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ vpc_subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ vpc_sg_id: "{{ vpc_sg_result.group_id }}"
+
+ # ============================================================
+
+ - name: Run tests for termianting associated instances
+ import_tasks: terminate_associated_instances.yml
+
+ # Assert that spot instance request is created
+ - name: Create simple spot instance request
+ ec2_spot_instance:
+ launch_specification:
+ image_id: "{{ ec2_ami_id }}"
+ key_name: "{{ resource_prefix }}-keypair"
+ instance_type: "t2.medium"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ tags:
+ ansible-test: "{{ resource_prefix }}"
+ register: create_result
+
+ - name: Assert that result has changed and request has been created
+ assert:
+ that:
+ - create_result is changed
+ - create_result.spot_request is defined
+ - create_result.spot_request.spot_instance_request_id is defined
+ - create_result.spot_request.launch_specification.subnet_id == vpc_subnet_result.subnet.id
+
+ - name: Get info about the spot instance request created
+ ec2_spot_instance_info:
+ spot_instance_request_ids:
+ - "{{ create_result.spot_request.spot_instance_request_id }}"
+ register: spot_instance_info_result
+
+ - name: Assert that the spot request created is open or active
+ assert:
+ that:
+ - spot_instance_info_result.spot_request[0].state in ['open', 'active']
+
+ - name: Create spot request with more complex options
+ ec2_spot_instance:
+ launch_specification:
+ image_id: "{{ ec2_ami_id }}"
+ key_name: "{{ resource_prefix }}-keypair"
+ instance_type: "t2.medium"
+ block_device_mappings:
+ - device_name: /dev/sdb
+ ebs:
+ delete_on_termination: True
+ volume_type: gp3
+ volume_size: 5
+ network_interfaces:
+ - associate_public_ip_address: False
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ delete_on_termination: True
+ device_index: 0
+ placement:
+ availability_zone: '{{ availability_zone }}'
+ monitoring:
+ enabled: False
+ spot_price: 0.002
+ tags:
+ camelCase: "helloWorld"
+ PascalCase: "HelloWorld"
+ snake_case: "hello_world"
+ "Title Case": "Hello World"
+ "lowercase spaced": "hello world"
+ ansible-test: "{{ resource_prefix }}"
+ register: complex_create_result
+
+ - assert:
+ that:
+ - complex_create_result is changed
+ - complex_create_result.spot_request is defined
+ - complex_create_result.spot_request.spot_instance_request_id is defined
+ - complex_create_result.spot_request.type == 'one-time'
+ - '"0.002" in complex_create_result.spot_request.spot_price' ## AWS pads trailing zeros on the spot price
+ - launch_spec.placement.availability_zone == availability_zone
+ - launch_spec.block_device_mappings|length == 1
+ - launch_spec.block_device_mappings.0.ebs.delete_on_termination == true
+ - launch_spec.block_device_mappings.0.ebs.volume_type == 'gp3'
+ - launch_spec.block_device_mappings.0.ebs.volume_size == 5
+ - launch_spec.network_interfaces|length == 1
+ - launch_spec.network_interfaces.0.device_index == 0
+ - launch_spec.network_interfaces.0.associate_public_ip_address == false
+ - launch_spec.network_interfaces.0.delete_on_termination == true
+ - spot_request_tags|length == 6
+ - spot_request_tags['camelCase'] == 'helloWorld'
+ - spot_request_tags['PascalCase'] == 'HelloWorld'
+ - spot_request_tags['snake_case'] == 'hello_world'
+ - spot_request_tags['Title Case'] == 'Hello World'
+ - spot_request_tags['lowercase spaced'] == 'hello world'
+ vars:
+ launch_spec: '{{ complex_create_result.spot_request.launch_specification }}'
+ spot_request_tags: '{{ complex_create_result.spot_request.tags }}'
+
+ - name: Get info about the complex spot instance request created
+ ec2_spot_instance_info:
+ spot_instance_request_ids:
+ - "{{ complex_create_result.spot_request.spot_instance_request_id }}"
+ register: complex_info_result
+
+ - name: Assert that the complex spot request created is open/active and correct keys are set
+ assert:
+ that:
+ - complex_info_result.spot_request[0].state in ['open', 'active']
+ - complex_create_result.spot_request.spot_price == complex_info_result.spot_request[0].spot_price
+ - create_launch_spec.block_device_mappings[0].ebs.volume_size == info_launch_spec.block_device_mappings[0].ebs.volume_size
+ - create_launch_spec.block_device_mappings[0].ebs.volume_type == info_launch_spec.block_device_mappings[0].ebs.volume_type
+ - create_launch_spec.network_interfaces[0].delete_on_termination == info_launch_spec.network_interfaces[0].delete_on_termination
+ vars:
+ create_launch_spec: "{{ complex_create_result.spot_request.launch_specification }}"
+ info_launch_spec: "{{ complex_info_result.spot_request[0].launch_specification }}"
+
+ - name: Get info about the created spot instance requests and filter result based on provided filters
+ ec2_spot_instance_info:
+ spot_instance_request_ids:
+ - '{{ create_result.spot_request.spot_instance_request_id }}'
+ - '{{ complex_create_result.spot_request.spot_instance_request_id }}'
+ filters:
+ tag:ansible-test: "{{ resource_prefix }}"
+ launch.block-device-mapping.device-name: /dev/sdb
+ register: spot_instance_info_filter_result
+
+ - name: Assert that the correct spot request was returned in the filtered result
+ assert:
+ that:
+ - spot_instance_info_filter_result.spot_request[0].spot_instance_request_id == complex_create_result.spot_request.spot_instance_request_id
+
+ # Assert check mode
+ - name: Create spot instance request (check_mode)
+ ec2_spot_instance:
+ launch_specification:
+ image_id: "{{ ec2_ami_id }}"
+ key_name: "{{ resource_prefix }}-keypair"
+ instance_type: "t2.medium"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ tags:
+ ansible-test: "{{ resource_prefix }}"
+ check_mode: True
+ register: check_create_result
+
+ - assert:
+ that:
+ - check_create_result is changed
+
+ - name: Remove spot instance request (check_mode)
+ ec2_spot_instance:
+ spot_instance_request_ids: '{{ create_result.spot_request.spot_instance_request_id }}'
+ state: absent
+ check_mode: True
+ register: check_cancel_result
+
+ - assert:
+ that:
+ - check_cancel_result is changed
+
+ - name: Remove spot instance requests
+ ec2_spot_instance:
+ spot_instance_request_ids:
+ - '{{ create_result.spot_request.spot_instance_request_id }}'
+ - '{{ complex_create_result.spot_request.spot_instance_request_id }}'
+ state: absent
+ register: cancel_result
+
+ - assert:
+ that:
+ - cancel_result is changed
+ - '"Cancelled Spot request" in cancel_result.msg'
+
+ - name: Sometimes we run the next test before the EC2 API is fully updated from the previous task
+ pause:
+ seconds: 3
+
+ - name: Check no change if request is already cancelled (idempotency)
+ ec2_spot_instance:
+ spot_instance_request_ids: '{{ create_result.spot_request.spot_instance_request_id }}'
+ state: absent
+ register: cancel_request_again
+
+ - assert:
+ that:
+ - cancel_request_again is not changed
+ - '"Spot request not found or already cancelled" in cancel_request_again.msg'
+
+ - name: Gracefully try to remove non-existent request (NotFound)
+ ec2_spot_instance:
+ spot_instance_request_ids:
+ - sir-12345678
+ state: absent
+ register: fake_cancel_result
+
+ - assert:
+ that:
+ - fake_cancel_result is not changed
+ - '"Spot request not found or already cancelled" in fake_cancel_result.msg'
+
+ always:
+
+ # ============================================================
+ - name: Delete spot instances
+ ec2_instance:
+ state: absent
+ filters:
+ vpc-id: "{{ vpc_result.vpc.id }}"
+
+ - name: get all spot requests created during test
+ ec2_spot_instance_info:
+ filters:
+ tag:ansible-test: "{{ resource_prefix }}"
+ register: spot_request_list
+
+ - name: remove spot instance requests
+ ec2_spot_instance:
+ spot_instance_request_ids:
+ - '{{ item.spot_instance_request_id }}'
+ state: 'absent'
+ ignore_errors: true
+ retries: 5
+ with_items: "{{ spot_request_list.spot_request }}"
+
+ - name: remove the security group
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: "{{ resource_prefix }}"
+ rules: []
+ state: absent
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ignore_errors: true
+ retries: 5
+
+ - name: remove the subnet
+ ec2_vpc_subnet:
+ cidr: "{{ vpc_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ ignore_errors: true
+ retries: 5
+ when: vpc_subnet_result is defined
+
+ - name: remove the VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: "{{ vpc_cidr }}"
+ state: absent
+ ignore_errors: true
+ retries: 5
+
+ - name: remove key pair by name
+ ec2_key:
+ name: "{{ resource_prefix }}-keypair"
+ state: absent
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/terminate_associated_instances.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/terminate_associated_instances.yml
new file mode 100644
index 000000000..92864baaf
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/terminate_associated_instances.yml
@@ -0,0 +1,109 @@
+---
+- block:
+
+ # Spot instance request creation
+ - name: Simple Spot Request Creation
+ amazon.aws.ec2_spot_instance:
+ launch_specification:
+ image_id: "{{ ec2_ami_id }}"
+ key_name: "{{ resource_prefix }}-keypair"
+ instance_type: "t2.micro"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ tags:
+ ansible-test: "{{ resource_prefix }}"
+ register: create_result
+
+ # Get instance ID of associated spot instance request
+ - name: Get info about the spot instance request created
+ amazon.aws.ec2_spot_instance_info:
+ spot_instance_request_ids:
+ - "{{ create_result.spot_request.spot_instance_request_id }}"
+ register: spot_instance_info_result
+ retries: 5
+ until: spot_instance_info_result.spot_request[0].instance_id is defined
+
+ - name: Pause to allow instance launch
+ pause:
+ seconds: 60
+
+ - name: Get instance ID of the instance associated with above spot instance request
+ set_fact:
+ instance_id_1: "{{ spot_instance_info_result.spot_request[0].instance_id }}"
+
+ - name: Check state of instance - BEFORE request cancellation
+ amazon.aws.ec2_instance_info:
+ instance_ids: ["{{ instance_id_1 }}"]
+ register: instance_info_result
+
+ # Cancel spot instance request
+ - name: Spot Request Termination
+ amazon.aws.ec2_spot_instance:
+ spot_instance_request_ids:
+ - '{{ create_result.spot_request.spot_instance_request_id }}'
+ state: absent
+
+ # Verify that instance is not terminated and still running
+ - name: Check state of instance - AFTER request cancellation
+ amazon.aws.ec2_instance_info:
+ instance_ids: ["{{ instance_id_1 }}"]
+ register: instance_info_result
+
+ - assert:
+ that: instance_info_result.instances[0].state.name == 'running'
+
+#==========================================================================
+
+ # Spot instance request creation
+ - name: Simple Spot Request Creation
+ amazon.aws.ec2_spot_instance:
+ launch_specification:
+ image_id: "{{ ec2_ami_id }}"
+ key_name: "{{ resource_prefix }}-keypair"
+ instance_type: "t2.micro"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ tags:
+ ansible-test: "{{ resource_prefix }}"
+ register: create_result
+
+ # Get instance ID of associated spot instance request
+ - name: Get info about the spot instance request created
+ amazon.aws.ec2_spot_instance_info:
+ spot_instance_request_ids:
+ - "{{ create_result.spot_request.spot_instance_request_id }}"
+ register: spot_instance_info_result
+ retries: 5
+ until: spot_instance_info_result.spot_request[0].instance_id is defined
+
+ - name: Pause to allow instance launch
+ pause:
+ seconds: 60
+
+ - name: Get instance ID of the instance associated with above spot instance request
+ set_fact:
+ instance_id_2: "{{ spot_instance_info_result.spot_request[0].instance_id }}"
+
+ - name: Check state of instance - BEFORE request cancellation
+ amazon.aws.ec2_instance_info:
+ instance_ids: ["{{ instance_id_2 }}"]
+ register: instance_info_result
+
+ # Cancel spot instance request
+ - name: Spot Request Termination
+ amazon.aws.ec2_spot_instance:
+ spot_instance_request_ids:
+ - '{{ create_result.spot_request.spot_instance_request_id }}'
+ state: absent
+ terminate_instances: true
+
+ - name: wait for instance to terminate
+ pause:
+ seconds: 60
+
+ # Verify that instance is terminated or shutting-down
+ - name: Check state of instance - AFTER request cancellation
+ amazon.aws.ec2_instance_info:
+ instance_ids: ["{{ instance_id_2 }}"]
+ register: instance_info_result
+
+ - assert:
+ that: instance_info_result.instances[0].state.name in ['terminated', 'shutting-down']
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/aliases
new file mode 100644
index 000000000..326c8845b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+ec2_tag_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/defaults/main.yml
new file mode 100644
index 000000000..6aa39c736
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for test_ec2_tag
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/tasks/main.yml
new file mode 100644
index 000000000..1f2ea62cd
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/tasks/main.yml
@@ -0,0 +1,136 @@
+---
+# tasks file for test_ec2_tag
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: Create an EC2 volume so we have something to tag
+ ec2_vol:
+ name: "{{ resource_prefix }} ec2_tag volume"
+ volume_size: 1
+ state: present
+ zone: "{{ aws_region }}a"
+ register: volume
+
+ - name: List the tags on the volume (ec2_tag_info)
+ ec2_tag_info:
+ resource: "{{ volume.volume_id }}"
+ register: result_info
+
+ - assert:
+ that:
+ - result_info.tags | length == 1
+ - result_info.tags.Name == '{{ resource_prefix }} ec2_tag volume'
+
+ - name: Set some new tags on the volume
+ ec2_tag:
+ resource: "{{ volume.volume_id }}"
+ state: present
+ tags:
+ foo: foo
+ bar: baz
+ baz: also baz
+ register: result
+ - name: List the new tags on the volume
+ ec2_tag_info:
+ resource: "{{ volume.volume_id }}"
+ register: result_info
+
+ - assert:
+ that:
+ - result is changed
+ - result.tags | length == 4
+ - result.added_tags | length == 3
+ - result.tags.Name == '{{ resource_prefix }} ec2_tag volume'
+ - result.tags.foo == 'foo'
+ - result.tags.bar == 'baz'
+ - result.tags.baz == 'also baz'
+ - result_info.tags | length == 4
+ - result_info.tags.Name == '{{ resource_prefix }} ec2_tag volume'
+ - result_info.tags.foo == 'foo'
+ - result_info.tags.bar == 'baz'
+ - result_info.tags.baz == 'also baz'
+
+ - name: Remove a tag by name
+ ec2_tag:
+ resource: "{{ volume.volume_id }}"
+ state: absent
+ tags:
+ baz:
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.removed_tags | length == 1
+ - "'baz' in result.removed_tags"
+
+ - name: Don't remove a tag
+ ec2_tag:
+ resource: "{{ volume.volume_id }}"
+ state: absent
+ tags:
+ foo: baz
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: Remove a tag
+ ec2_tag:
+ resource: "{{ volume.volume_id }}"
+ state: absent
+ tags:
+ foo: foo
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.tags | length == 2
+ - "'added_tags' not in result"
+ - result.removed_tags | length == 1
+ - result.tags.Name == '{{ resource_prefix }} ec2_tag volume'
+ - result.tags.bar == 'baz'
+
+ - name: Set an exclusive tag
+ ec2_tag:
+ resource: "{{ volume.volume_id }}"
+ purge_tags: true
+ tags:
+ baz: quux
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.tags | length == 1
+ - result.added_tags | length == 1
+ - result.removed_tags | length == 2
+ - result.tags.baz == 'quux'
+
+ - name: Remove all tags
+ ec2_tag:
+ resource: "{{ volume.volume_id }}"
+ purge_tags: true
+ tags: {}
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.tags | length == 0
+
+ always:
+ - name: Remove the volume
+ ec2_vol:
+ id: "{{ volume.volume_id }}"
+ state: absent
+ register: result
+ until: result is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/vars/main.yml
new file mode 100644
index 000000000..c2d0654ae
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for test_ec2_tag
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/aliases
new file mode 100644
index 000000000..f348f79e6
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/aliases
@@ -0,0 +1,4 @@
+time=10m
+
+cloud/aws
+ec2_vol_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/defaults/main.yml
new file mode 100644
index 000000000..ae86815c5
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/defaults/main.yml
@@ -0,0 +1,8 @@
+availability_zone: '{{ ec2_availability_zone_names[0] }}'
+
+vpc_name: '{{ resource_prefix }}-vpc'
+vpc_seed: '{{ resource_prefix }}'
+vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
+subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
+
+instance_name: '{{ resource_prefix }}-instance'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/meta/main.yml
new file mode 100644
index 000000000..2bff8543a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- role: setup_ec2_facts
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml
new file mode 100644
index 000000000..0b77b1571
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml
@@ -0,0 +1,1002 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key | default(omit) }}'
+ aws_secret_key: '{{ aws_secret_key | default(omit) }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region | default(omit) }}'
+
+ collections:
+ - amazon.aws
+ - community.aws
+
+ block:
+
+ - name: Create a test VPC
+ ec2_vpc_net:
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: ec2_vol testing
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: testing_vpc
+
+ - name: Create a test subnet
+ ec2_vpc_subnet:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_cidr }}"
+ tags:
+ Name: ec2_vol testing
+ ResourcePrefix: "{{ resource_prefix }}"
+ az: '{{ availability_zone }}'
+ register: testing_subnet
+
+ - name: create an ec2 instance
+ ec2_instance:
+ name: "{{ instance_name }}"
+ vpc_subnet_id: "{{ testing_subnet.subnet.id }}"
+ instance_type: t3.nano
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: test_instance
+
+ - name: check task return attributes
+ assert:
+ that:
+ - test_instance.changed
+
+ - name: create another ec2 instance
+ ec2_instance:
+ name: "{{ instance_name }}-2"
+ vpc_subnet_id: "{{ testing_subnet.subnet.id }}"
+ instance_type: t3.nano
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: test_instance_2
+
+ - name: check task return attributes
+ assert:
+ that:
+ - test_instance_2.changed
+
+ - name: create another ec2 instance
+ ec2_instance:
+ name: "{{ instance_name }}-3"
+ vpc_subnet_id: "{{ testing_subnet.subnet.id }}"
+ instance_type: t3.nano
+ image_id: "{{ ec2_ami_id }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: test_instance_3
+
+ - name: check task return attributes
+ assert:
+ that:
+ - test_instance_3.changed
+
+ # # ==== ec2_vol tests ===============================================
+
+ - name: create a volume (validate module defaults - check_mode)
+ ec2_vol:
+ volume_size: 1
+ zone: "{{ availability_zone }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ check_mode: true
+ register: volume1_check_mode
+
+ - assert:
+ that:
+ - volume1_check_mode is changed
+
+
+ - name: create a volume (validate module defaults)
+ ec2_vol:
+ volume_size: 1
+ zone: "{{ availability_zone }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: volume1
+
+ - name: check task return attributes
+ assert:
+ that:
+ - volume1.changed
+ - "'volume' in volume1"
+ - "'volume_id' in volume1"
+ - "'volume_type' in volume1"
+ - "'device' in volume1"
+ - volume1.volume.status == 'available'
+ - volume1.volume_type == 'standard'
+ - "'attachment_set' in volume1.volume"
+ - volume1.volume.attachment_set | length == 0
+ - not ("Name" in volume1.volume.tags)
+ - not volume1.volume.encrypted
+ - volume1.volume.tags.ResourcePrefix == "{{ resource_prefix }}"
+
+ # no idempotency check needed here
+
+ - name: create another volume (override module defaults)
+ ec2_vol:
+ encrypted: yes
+ volume_size: 4
+ volume_type: io1
+ iops: 101
+ name: "{{ resource_prefix }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ zone: "{{ availability_zone }}"
+ register: volume2
+
+ - name: check task return attributes
+ assert:
+ that:
+ - volume2.changed
+ - "'volume' in volume2"
+ - "'volume_id' in volume2"
+ - "'volume_type' in volume2"
+ - "'device' in volume2"
+ - volume2.volume.status == 'available'
+ - volume2.volume_type == 'io1'
+ - volume2.volume.iops == 101
+ - volume2.volume.size == 4
+ - volume2.volume.tags.Name == "{{ resource_prefix }}"
+ - volume2.volume.encrypted
+ - volume2.volume.tags.ResourcePrefix == "{{ resource_prefix }}"
+
+ - name: create another volume (override module defaults) (idempotent)
+ ec2_vol:
+ encrypted: yes
+ volume_size: 4
+ volume_type: io1
+ iops: 101
+ name: "{{ resource_prefix }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ zone: "{{ availability_zone }}"
+ register: volume2_idem
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not volume2_idem.changed
+
+ - name: create snapshot from volume
+ ec2_snapshot:
+ volume_id: "{{ volume1.volume_id }}"
+ description: "Resource Prefix - {{ resource_prefix }}"
+ snapshot_tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: vol1_snapshot
+
+ - name: check task return attributes
+ assert:
+ that:
+ - vol1_snapshot.changed
+
+ - name: create a volume from a snapshot (check_mode)
+ ec2_vol:
+ snapshot: "{{ vol1_snapshot.snapshot_id }}"
+ encrypted: yes
+ volume_type: gp2
+ volume_size: 1
+ zone: "{{ availability_zone }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ check_mode: true
+ register: volume3_check_mode
+
+ - name: check task return attributes
+ assert:
+ that:
+ - volume3_check_mode.changed
+
+ - name: create a volume from a snapshot
+ ec2_vol:
+ snapshot: "{{ vol1_snapshot.snapshot_id }}"
+ encrypted: yes
+ volume_type: gp2
+ volume_size: 1
+ zone: "{{ availability_zone }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: volume3
+
+ - name: check task return attributes
+ assert:
+ that:
+ - volume3.changed
+ - "volume3.volume.snapshot_id == vol1_snapshot.snapshot_id"
+
+ - name: Wait for instance to start
+ ec2_instance:
+ state: running
+ instance_ids: "{{ test_instance.instance_ids }}"
+ wait: True
+
+ - name: attach existing volume to an instance (check_mode)
+ ec2_vol:
+ id: "{{ volume1.volume_id }}"
+ instance: "{{ test_instance.instance_ids[0] }}"
+ device_name: /dev/sdg
+ delete_on_termination: no
+ check_mode: true
+ register: vol_attach_result_check_mode
+
+ - assert:
+ that:
+ - vol_attach_result_check_mode is changed
+
+ - name: attach existing volume to an instance
+ ec2_vol:
+ id: "{{ volume1.volume_id }}"
+ instance: "{{ test_instance.instance_ids[0] }}"
+ device_name: /dev/sdg
+ delete_on_termination: no
+ register: vol_attach_result
+
+ - name: check task return attributes
+ assert:
+ that:
+ - vol_attach_result.changed
+ - "'device' in vol_attach_result and vol_attach_result.device == '/dev/sdg'"
+ - "'volume' in vol_attach_result"
+
+ # There's a delay between the volume being "In Use", and the attachment being reported. This
+ # can result in a race condition on the results. (There's no clean waiter to use either)
+ - name: wait for volume to report attached/attaching
+ ec2_vol_info:
+ filters:
+ volume-id: '{{ volume1.volume_id }}'
+ register: vol_attach_info
+ until:
+ - vol_attach_info.volumes[0].attachment_set | length >=1
+ retries: 5
+ delay: 2
+
+ - assert:
+ that:
+ - vol_attach_info.volumes[0].attachment_set[0].status in ['attached', 'attaching']
+ - vol_attach_info.volumes[0].attachment_set[0].instance_id == test_instance.instance_ids[0]
+ - vol_attach_info.volumes[0].attachment_set[0].device == '/dev/sdg'
+ - not vol_attach_info.volumes[0].attachment_set[0].delete_on_termination
+
+ - name: attach existing volume to an instance (idempotent - check_mode)
+ ec2_vol:
+ id: "{{ volume1.volume_id }}"
+ instance: "{{ test_instance.instance_ids[0] }}"
+ device_name: /dev/sdg
+ delete_on_termination: no
+ check_mode: true
+ register: vol_attach_result_check_mode
+
+ - assert:
+ that:
+ - vol_attach_result_check_mode is not changed
+
+ - name: attach existing volume to an instance (idempotent)
+ ec2_vol:
+ id: "{{ volume1.volume_id }}"
+ instance: "{{ test_instance.instance_ids[0] }}"
+ device_name: /dev/sdg
+ delete_on_termination: no
+ register: vol_attach_result
+
+ - name: check task return attributes
+ assert:
+ that:
+ - "not vol_attach_result.changed"
+ - vol_attach_result.volume.attachment_set[0].status in ['attached', 'attaching']
+
+ - name: attach a new volume to an instance (check_mode)
+ ec2_vol:
+ instance: "{{ test_instance.instance_ids[0] }}"
+ device_name: /dev/sdh
+ volume_size: 1
+ volume_type: gp2
+ name: '{{ resource_prefix }} - sdh'
+ tags:
+ "lowercase spaced": 'hello cruel world'
+ "Title Case": 'Hello Cruel World'
+ CamelCase: 'SimpleCamelCase'
+ snake_case: 'simple_snake_case'
+ ResourcePrefix: "{{ resource_prefix }}"
+ check_mode: true
+ register: new_vol_attach_result_check_mode
+
+ - assert:
+ that:
+ - new_vol_attach_result_check_mode is changed
+
+ - name: attach a new volume to an instance
+ ec2_vol:
+ instance: "{{ test_instance.instance_ids[0] }}"
+ device_name: /dev/sdh
+ volume_size: 1
+ volume_type: standard
+ name: '{{ resource_prefix }} - sdh'
+ tags:
+ "lowercase spaced": 'hello cruel world'
+ "Title Case": 'Hello Cruel World'
+ CamelCase: 'SimpleCamelCase'
+ snake_case: 'simple_snake_case'
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: new_vol_attach_result
+
+ - name: check task return attributes
+ assert:
+ that:
+ - new_vol_attach_result.changed
+ - "'device' in new_vol_attach_result and new_vol_attach_result.device == '/dev/sdh'"
+ - "'volume' in new_vol_attach_result"
+ - new_vol_attach_result.volume.attachment_set[0].status in ['attached', 'attaching']
+ - new_vol_attach_result.volume.attachment_set[0].instance_id == test_instance.instance_ids[0]
+ - new_vol_attach_result.volume.attachment_set[0].device == '/dev/sdh'
+ - new_vol_attach_result.volume.tags["lowercase spaced"] == 'hello cruel world'
+ - new_vol_attach_result.volume.tags["Title Case"] == 'Hello Cruel World'
+ - new_vol_attach_result.volume.tags["CamelCase"] == 'SimpleCamelCase'
+ - new_vol_attach_result.volume.tags["snake_case"] == 'simple_snake_case'
+ - new_vol_attach_result.volume.tags["Name"] == '{{ resource_prefix }} - sdh'
+
+ - name: attach a new volume to an instance (idempotent - check_mode)
+ ec2_vol:
+ instance: "{{ test_instance.instance_ids[0] }}"
+ device_name: /dev/sdh
+ volume_size: 1
+ volume_type: standard
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ check_mode: true
+ register: new_vol_attach_result_idem_check_mode
+ ignore_errors: true
+
+ - assert:
+ that:
+ - new_vol_attach_result_idem_check_mode is not changed
+
+ - name: attach a new volume to an instance (idempotent)
+ ec2_vol:
+ instance: "{{ test_instance.instance_ids[0] }}"
+ device_name: /dev/sdh
+ volume_size: 1
+ volume_type: standard
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: new_vol_attach_result_idem
+ ignore_errors: true
+
+ - name: check task return attributes
+ assert:
+ that:
+ - "not new_vol_attach_result_idem.changed"
+ - "'Volume mapping for /dev/sdh already exists' in new_vol_attach_result_idem.msg"
+
+ - name: change some tag values
+ ec2_vol:
+ instance: "{{ test_instance.instance_ids[0] }}"
+ id: "{{ new_vol_attach_result.volume.id }}"
+ device_name: /dev/sdh
+ volume_size: 1
+ volume_type: standard
+ tags:
+ "lowercase spaced": 'hello cruel world ❤️'
+ "Title Case": 'Hello Cruel World ❤️'
+ CamelCase: 'SimpleCamelCase ❤️'
+ snake_case: 'simple_snake_case ❤️'
+ purge_tags: false
+ register: new_vol_attach_result
+
+ - name: check task return attributes
+ assert:
+ that:
+ - new_vol_attach_result.changed
+ - "'volume_id' in new_vol_attach_result"
+ - new_vol_attach_result.volume_id == "{{ new_vol_attach_result.volume_id }}"
+ - "'attachment_set' in new_vol_attach_result.volume"
+ - "'create_time' in new_vol_attach_result.volume"
+ - "'id' in new_vol_attach_result.volume"
+ - "'size' in new_vol_attach_result.volume"
+ - new_vol_attach_result.volume.size == 1
+ - "'volume_type' in new_vol_attach_result"
+ - new_vol_attach_result.volume_type == 'standard'
+ - "'tags' in new_vol_attach_result.volume"
+ - (new_vol_attach_result.volume.tags | length) == 6
+ - new_vol_attach_result.volume.tags["lowercase spaced"] == 'hello cruel world ❤️'
+ - new_vol_attach_result.volume.tags["Title Case"] == 'Hello Cruel World ❤️'
+ - new_vol_attach_result.volume.tags["CamelCase"] == 'SimpleCamelCase ❤️'
+ - new_vol_attach_result.volume.tags["snake_case"] == 'simple_snake_case ❤️'
+ - new_vol_attach_result.volume.tags["ResourcePrefix"] == resource_prefix
+ - new_vol_attach_result.volume.tags["Name"] == '{{ resource_prefix }} - sdh'
+
+ - name: change some tag values
+ ec2_vol:
+ instance: "{{ test_instance.instance_ids[0] }}"
+ id: "{{ new_vol_attach_result.volume.id }}"
+ device_name: /dev/sdh
+ volume_size: 1
+ volume_type: standard
+ tags:
+ "lowercase spaced": 'hello cruel world ❤️'
+ "Title Case": 'Hello Cruel World ❤️'
+ snake_case: 'simple_snake_case ❤️'
+ ResourcePrefix: "{{ resource_prefix }}"
+ purge_tags: true
+ register: new_vol_attach_result
+
+ - name: check task return attributes
+ assert:
+ that:
+ - new_vol_attach_result.changed
+ - "'volume_id' in new_vol_attach_result"
+ - new_vol_attach_result.volume_id == "{{ new_vol_attach_result.volume_id }}"
+ - "'attachment_set' in new_vol_attach_result.volume"
+ - "'create_time' in new_vol_attach_result.volume"
+ - "'id' in new_vol_attach_result.volume"
+ - "'size' in new_vol_attach_result.volume"
+ - new_vol_attach_result.volume.size == 1
+ - "'volume_type' in new_vol_attach_result"
+ - new_vol_attach_result.volume_type == 'standard'
+ - "'tags' in new_vol_attach_result.volume"
+ - (new_vol_attach_result.volume.tags | length) == 4
+ - new_vol_attach_result.volume.tags["lowercase spaced"] == 'hello cruel world ❤️'
+ - new_vol_attach_result.volume.tags["Title Case"] == 'Hello Cruel World ❤️'
+ - new_vol_attach_result.volume.tags["snake_case"] == 'simple_snake_case ❤️'
+ - new_vol_attach_result.volume.tags["ResourcePrefix"] == resource_prefix
+
+ - name: create a volume from a snapshot and attach to the instance (check_mode)
+ ec2_vol:
+ instance: "{{ test_instance.instance_ids[0] }}"
+ device_name: /dev/sdi
+ snapshot: "{{ vol1_snapshot.snapshot_id }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ check_mode: true
+ register: attach_new_vol_from_snapshot_result_check_mode
+
+ - assert:
+ that:
+ - attach_new_vol_from_snapshot_result_check_mode is changed
+
+
+ - name: create a volume from a snapshot and attach to the instance
+ ec2_vol:
+ instance: "{{ test_instance.instance_ids[0] }}"
+ device_name: /dev/sdi
+ snapshot: "{{ vol1_snapshot.snapshot_id }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: attach_new_vol_from_snapshot_result
+
+ - name: check task return attributes
+ assert:
+ that:
+ - attach_new_vol_from_snapshot_result.changed
+ - "'device' in attach_new_vol_from_snapshot_result and attach_new_vol_from_snapshot_result.device == '/dev/sdi'"
+ - "'volume' in attach_new_vol_from_snapshot_result"
+ - attach_new_vol_from_snapshot_result.volume.attachment_set[0].status in ['attached', 'attaching']
+ - attach_new_vol_from_snapshot_result.volume.attachment_set[0].instance_id == test_instance.instance_ids[0]
+
+ - name: get info on ebs volumes
+ ec2_vol_info:
+ register: ec2_vol_info
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not ec2_vol_info.failed
+
+ - name: get info on ebs volumes
+ ec2_vol_info:
+ filters:
+ attachment.instance-id: "{{ test_instance.instance_ids[0] }}"
+ register: ec2_vol_info
+
+ - name: check task return attributes
+ assert:
+ that:
+ - ec2_vol_info.volumes | length == 4
+
+ - name: must not change because of missing parameter modify_volume
+ ec2_vol:
+ id: "{{ new_vol_attach_result.volume_id }}"
+ zone: "{{ availability_zone }}"
+ volume_type: gp3
+ register: changed_gp3_volume
+
+ - name: volume must not changed
+ assert:
+ that:
+ - not changed_gp3_volume.changed
+
+ - name: change existing volume to gp3 (check_mode)
+ ec2_vol:
+ id: "{{ new_vol_attach_result.volume_id }}"
+ zone: "{{ availability_zone }}"
+ volume_type: gp3
+ modify_volume: yes
+ check_mode: true
+ register: changed_gp3_volume_check_mode
+
+ - assert:
+ that:
+ - changed_gp3_volume_check_mode is changed
+
+ - name: change existing volume to gp3
+ ec2_vol:
+ id: "{{ new_vol_attach_result.volume_id }}"
+ zone: "{{ availability_zone }}"
+ volume_type: gp3
+ modify_volume: yes
+ register: changed_gp3_volume
+
+ - name: check that volume_type has changed
+ assert:
+ that:
+ - changed_gp3_volume.changed
+ - "'volume_id' in changed_gp3_volume"
+ - changed_gp3_volume.volume_id == "{{ new_vol_attach_result.volume_id }}"
+ - "'attachment_set' in changed_gp3_volume.volume"
+ - "'create_time' in changed_gp3_volume.volume"
+ - "'id' in changed_gp3_volume.volume"
+ - "'size' in changed_gp3_volume.volume"
+ - "'volume_type' in changed_gp3_volume"
+ - changed_gp3_volume.volume_type == 'gp3'
+ - "'iops' in changed_gp3_volume.volume"
+ - changed_gp3_volume.volume.iops == 3000
+ # Ensure our tags are still here
+ - "'tags' in changed_gp3_volume.volume"
+ - (changed_gp3_volume.volume.tags | length) == 4
+ - new_vol_attach_result.volume.tags["lowercase spaced"] == 'hello cruel world ❤️'
+ - new_vol_attach_result.volume.tags["Title Case"] == 'Hello Cruel World ❤️'
+ - new_vol_attach_result.volume.tags["snake_case"] == 'simple_snake_case ❤️'
+ - new_vol_attach_result.volume.tags["ResourcePrefix"] == resource_prefix
+
+ - name: volume must be from type gp3 (idempotent)
+ ec2_vol:
+ id: "{{ new_vol_attach_result.volume_id }}"
+ zone: "{{ availability_zone }}"
+ volume_type: gp3
+ modify_volume: yes
+ register: changed_gp3_volume
+ retries: 10
+ delay: 3
+ until: not changed_gp3_volume.failed
+ # retry because ebs change is to slow
+
+ - name: must not changed (idempotent)
+ assert:
+ that:
+ - not changed_gp3_volume.changed
+ - "'volume_id' in changed_gp3_volume"
+ - changed_gp3_volume.volume_id == "{{ new_vol_attach_result.volume_id }}"
+ - "'attachment_set' in changed_gp3_volume.volume"
+ - "'create_time' in changed_gp3_volume.volume"
+ - "'id' in changed_gp3_volume.volume"
+ - "'size' in changed_gp3_volume.volume"
+ - "'volume_type' in changed_gp3_volume"
+ - changed_gp3_volume.volume_type == 'gp3'
+ - "'iops' in changed_gp3_volume.volume"
+ - changed_gp3_volume.volume.iops == 3000
+ - "'throughput' in changed_gp3_volume.volume"
+ - "'tags' in changed_gp3_volume.volume"
+ - (changed_gp3_volume.volume.tags | length) == 4
+ - new_vol_attach_result.volume.tags["lowercase spaced"] == 'hello cruel world ❤️'
+ - new_vol_attach_result.volume.tags["Title Case"] == 'Hello Cruel World ❤️'
+ - new_vol_attach_result.volume.tags["snake_case"] == 'simple_snake_case ❤️'
+ - new_vol_attach_result.volume.tags["ResourcePrefix"] == resource_prefix
+
+ - name: re-read volume information to validate new volume_type
+ ec2_vol_info:
+ filters:
+ volume-id: "{{ changed_gp3_volume.volume_id }}"
+ register: verify_gp3_change
+
+ - name: volume type must be gp3
+ assert:
+ that:
+ - v.type == 'gp3'
+ vars:
+ v: "{{ verify_gp3_change.volumes[0] }}"
+
+ - name: detach volume from the instance (check_mode)
+ ec2_vol:
+ id: "{{ new_vol_attach_result.volume_id }}"
+ instance: ""
+ check_mode: true
+ register: new_vol_attach_result_check_mode
+
+ - assert:
+ that:
+ - new_vol_attach_result_check_mode is changed
+
+ - name: detach volume from the instance
+ ec2_vol:
+ id: "{{ new_vol_attach_result.volume_id }}"
+ instance: ""
+ register: new_vol_attach_result
+
+ - name: check task return attributes
+ assert:
+ that:
+ - new_vol_attach_result.changed
+ - new_vol_attach_result.volume.status == 'available'
+
+ - name: detach volume from the instance (idempotent - check_mode)
+ ec2_vol:
+ id: "{{ new_vol_attach_result.volume_id }}"
+ instance: ""
+ register: new_vol_attach_result_idem_check_mode
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not new_vol_attach_result_idem_check_mode.changed
+
+ - name: detach volume from the instance (idempotent)
+ ec2_vol:
+ id: "{{ new_vol_attach_result.volume_id }}"
+ instance: ""
+ register: new_vol_attach_result_idem
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not new_vol_attach_result_idem.changed
+
+ - name: delete volume (check_mode)
+ ec2_vol:
+ id: "{{ volume2.volume_id }}"
+ state: absent
+ check_mode: true
+ register: delete_volume_result_check_mode
+
+ - assert:
+ that:
+ - delete_volume_result_check_mode is changed
+
+ - name: delete volume
+ ec2_vol:
+ id: "{{ volume2.volume_id }}"
+ state: absent
+ register: delete_volume_result
+
+ - name: check task return attributes
+ assert:
+ that:
+ - "delete_volume_result.changed"
+
+ - name: delete volume (idempotent - check_mode)
+ ec2_vol:
+ id: "{{ volume2.volume_id }}"
+ state: absent
+ check_mode: true
+ register: delete_volume_result_check_mode
+
+ - assert:
+ that:
+ - delete_volume_result_check_mode is not changed
+
+ - name: delete volume (idempotent)
+ ec2_vol:
+ id: "{{ volume2.volume_id }}"
+ state: absent
+ register: delete_volume_result_idem
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not delete_volume_result_idem.changed
+ - '"Volume {{ volume2.volume_id }} does not exist" in delete_volume_result_idem.msg'
+
+ # Originally from ec2_vol_info
+
+ - name: Create test volume with Destroy on Terminate
+ ec2_vol:
+ instance: "{{ test_instance.instance_ids[0] }}"
+ volume_size: 4
+ name: "{{ resource_prefix }}_delete_on_terminate"
+ device_name: /dev/sdj
+ volume_type: io1
+ iops: 100
+ tags:
+ Tag Name with Space-and-dash: Tag Value with Space-and-dash
+ delete_on_termination: yes
+ register: dot_volume
+
+ - name: check task return attributes
+ assert:
+ that:
+ - dot_volume.changed
+ - "'attachment_set' in dot_volume.volume"
+ - "'delete_on_termination' in dot_volume.volume.attachment_set[0]"
+ - "'create_time' in dot_volume.volume"
+ - "'id' in dot_volume.volume"
+ - "'size' in dot_volume.volume"
+ - dot_volume.volume.size == 4
+ - "'volume_type' in dot_volume"
+ - dot_volume.volume_type == 'io1'
+ - "'iops' in dot_volume.volume"
+ - dot_volume.volume.iops == 100
+ - "'tags' in dot_volume.volume"
+ - (dot_volume.volume.tags | length ) == 2
+ - dot_volume.volume.tags["Name"] == "{{ resource_prefix }}_delete_on_terminate"
+ - dot_volume.volume.tags["Tag Name with Space-and-dash"] == 'Tag Value with Space-and-dash'
+
+ - name: Gather volume info without any filters
+ ec2_vol_info:
+ register: volume_info_wo_filters
+ check_mode: no
+
+ - name: Check if info are returned without filters
+ assert:
+ that:
+ - "volume_info_wo_filters.volumes is defined"
+
+ - name: Gather volume info
+ ec2_vol_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}_delete_on_terminate"
+ register: volume_info
+ check_mode: no
+
+ - name: Format check
+ assert:
+ that:
+ - "volume_info.volumes|length == 1"
+ - "v.attachment_set[0].attach_time is defined"
+ - "v.attachment_set[0].device is defined and v.attachment_set[0].device == dot_volume.device"
+ - "v.attachment_set[0].instance_id is defined and v.attachment_set[0].instance_id == test_instance.instance_ids[0]"
+ - "v.attachment_set[0].status is defined and v.attachment_set[0].status == 'attached'"
+ - "v.create_time is defined"
+ - "v.encrypted is defined and v.encrypted == false"
+ - "v.id is defined and v.id == dot_volume.volume_id"
+ - "v.iops is defined and v.iops == 100"
+ - "v.region is defined and v.region == aws_region"
+ - "v.size is defined and v.size == 4"
+ - "v.snapshot_id is defined and v.snapshot_id == ''"
+ - "v.status is defined and v.status == 'in-use'"
+ - "v.tags.Name is defined and v.tags.Name == resource_prefix + '_delete_on_terminate'"
+ - "v.tags['Tag Name with Space-and-dash'] == 'Tag Value with Space-and-dash'"
+ - "v.type is defined and v.type == 'io1'"
+ - "v.zone is defined and v.zone == test_instance.instances[0].placement.availability_zone"
+ vars:
+ v: "{{ volume_info.volumes[0] }}"
+
+ - name: New format check
+ assert:
+ that:
+ - "v.attachment_set[0].delete_on_termination is defined"
+ vars:
+ v: "{{ volume_info.volumes[0] }}"
+ when: ansible_version.full is version('2.7', '>=')
+
+ - name: test create a new gp3 volume
+ ec2_vol:
+ volume_size: 70
+ zone: "{{ availability_zone }}"
+ volume_type: gp3
+ throughput: 130
+ iops: 3001
+ name: "GP3-TEST-{{ resource_prefix }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: gp3_volume
+
+ - name: check that volume_type is gp3
+ assert:
+ that:
+ - gp3_volume.changed
+ - "'attachment_set' in gp3_volume.volume"
+ - "'create_time' in gp3_volume.volume"
+ - "'id' in gp3_volume.volume"
+ - "'size' in gp3_volume.volume"
+ - gp3_volume.volume.size == 70
+ - "'volume_type' in gp3_volume"
+ - gp3_volume.volume_type == 'gp3'
+ - "'iops' in gp3_volume.volume"
+ - gp3_volume.volume.iops == 3001
+ - "'throughput' in gp3_volume.volume"
+ - gp3_volume.volume.throughput == 130
+ - "'tags' in gp3_volume.volume"
+ - (gp3_volume.volume.tags | length ) == 2
+ - gp3_volume.volume.tags["ResourcePrefix"] == "{{ resource_prefix }}"
+
+ - name: Read volume information to validate throughput
+ ec2_vol_info:
+ filters:
+ volume-id: "{{ gp3_volume.volume_id }}"
+ register: verify_throughput
+
+ - name: throughput must be equal to 130
+ assert:
+ that:
+ - v.throughput == 130
+ vars:
+ v: "{{ verify_throughput.volumes[0] }}"
+
+ - name: print out facts
+ debug:
+ var: vol_facts
+
+ - name: Read volume information to validate throughput
+ ec2_vol_info:
+ filters:
+ volume-id: "{{ gp3_volume.volume_id }}"
+ register: verify_throughput
+
+ - name: throughput must be equal to 130
+ assert:
+ that:
+ - v.throughput == 130
+ vars:
+ v: "{{ verify_throughput.volumes[0] }}"
+
+ - name: print out facts
+ debug:
+ var: vol_facts
+
+ - name: increase throughput
+ ec2_vol:
+ volume_size: 70
+ zone: "{{ availability_zone }}"
+ volume_type: gp3
+ throughput: 131
+ modify_volume: yes
+ name: "GP3-TEST-{{ resource_prefix }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: gp3_volume
+
+ - name: check that throughput has changed
+ assert:
+ that:
+ - gp3_volume.changed
+ - "'create_time' in gp3_volume.volume"
+ - "'id' in gp3_volume.volume"
+ - "'size' in gp3_volume.volume"
+ - gp3_volume.volume.size == 70
+ - "'volume_type' in gp3_volume"
+ - gp3_volume.volume_type == 'gp3'
+ - "'iops' in gp3_volume.volume"
+ - gp3_volume.volume.iops == 3001
+ - "'throughput' in gp3_volume.volume"
+ - gp3_volume.volume.throughput == 131
+
+ # Multi-Attach disk
+ - name: create disk with multi-attach enabled
+ ec2_vol:
+ volume_size: 4
+ volume_type: io1
+ iops: 102
+ zone: "{{ availability_zone }}"
+ multi_attach: yes
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: multi_attach_disk
+
+ - name: check volume creation
+ assert:
+ that:
+ - multi_attach_disk.changed
+ - "'volume' in multi_attach_disk"
+ - multi_attach_disk.volume.multi_attach_enabled
+
+ - name: attach existing volume to an instance
+ ec2_vol:
+ id: "{{ multi_attach_disk.volume_id }}"
+ instance: "{{ test_instance.instance_ids[0] }}"
+ device_name: /dev/sdk
+ delete_on_termination: no
+ register: vol_attach_result
+
+ - name: Wait for instance to start
+ ec2_instance:
+ state: running
+ instance_ids: "{{ test_instance_2.instance_ids }}"
+ wait: True
+
+ - name: attach existing volume to second instance
+ ec2_vol:
+ id: "{{ multi_attach_disk.volume_id }}"
+ instance: "{{ test_instance_2.instance_ids[0] }}"
+ device_name: /dev/sdg
+ delete_on_termination: no
+ register: vol_attach_result
+
+ - name: check task return attributes
+ assert:
+ that:
+ - vol_attach_result.changed
+ - "'volume' in vol_attach_result"
+ - vol_attach_result.volume.attachment_set | length == 2
+ - 'test_instance.instance_ids[0] in vol_attach_result.volume.attachment_set | map(attribute="instance_id") | list'
+ - 'test_instance_2.instance_ids[0] in vol_attach_result.volume.attachment_set | map(attribute="instance_id") | list'
+
+ - name: create a volume without tags
+ ec2_vol:
+ volume_size: 5
+ zone: "{{ availability_zone }}"
+ instance: "{{ test_instance_3.instance_ids[0] }}"
+ register: volume_without_tag
+
+ - assert:
+ that:
+ - volume_without_tag.changed
+
+ # idempotency check without tags
+ - name: create a volume without tags (idempotency check)
+ ec2_vol:
+ volume_size: 5
+ zone: "{{ availability_zone }}"
+ instance: "{{ test_instance_3.instance_ids[0] }}"
+ register: volume_without_tag
+
+ - assert:
+ that:
+ - not volume_without_tag.changed
+ # ==== Cleanup ============================================================
+
+ always:
+ - name: Describe the instance before we delete it
+ ec2_instance_info:
+ instance_ids:
+ - "{{ item }}"
+ ignore_errors: yes
+ with_items:
+ - "{{ test_instance.instance_ids[0] }}"
+ - "{{ test_instance_2.instance_ids[0] }}"
+ - "{{ test_instance_3.instance_ids[0] }}"
+ register: pre_delete
+
+ - debug:
+ var: pre_delete
+
+ - name: delete test instance
+ ec2_instance:
+ instance_ids:
+ - "{{ item }}"
+ state: terminated
+ wait: True
+ with_items:
+ - "{{ test_instance.instance_ids[0] }}"
+ - "{{ test_instance_2.instance_ids[0] }}"
+ - "{{ test_instance_3.instance_ids[0] }}"
+ ignore_errors: yes
+
+ - name: delete volumes
+ ec2_vol:
+ id: "{{ item.volume_id }}"
+ state: absent
+ ignore_errors: yes
+ with_items:
+ - "{{ volume1 }}"
+ - "{{ volume2 }}"
+ - "{{ volume3 }}"
+ - "{{ new_vol_attach_result }}"
+ - "{{ attach_new_vol_from_snapshot_result }}"
+ - "{{ dot_volume }}"
+ - "{{ gp3_volume }}"
+ - "{{ multi_attach_disk }}"
+ - "{{ volume_without_tag }}"
+
+ - name: delete snapshot
+ ec2_snapshot:
+ snapshot_id: "{{ vol1_snapshot.snapshot_id }}"
+ state: absent
+ ignore_errors: yes
+
+ - name: delete test subnet
+ ec2_vpc_subnet:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_cidr }}"
+ state: absent
+ ignore_errors: yes
+
+ - name: delete test VPC
+ ec2_vpc_net:
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ state: absent
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml
new file mode 100644
index 000000000..26403c17d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+# defaults file for ec2_dhcp_option_info tests
+vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24'
+# default option sets get an AWS domain_name, which is different in us-east-1
+aws_domain_name: "{{ (aws_region == 'us-east-1') | ternary('ec2.internal', aws_region + '.compute.internal') }}" \ No newline at end of file
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml
new file mode 100644
index 000000000..5441e4f7f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml
@@ -0,0 +1,948 @@
+---
+# ============================================================
+# Known issues:
+#
+# there is no way to associate the `default` option set in the module
+# The module doesn't store/return tags in the new_options dictionary
+# always reassociated (changed=True) when vpc_id is provided without options
+#
+# ============================================================
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default('') }}"
+ region: "{{ aws_region }}"
+
+ block:
+
+ # DHCP option set can be attached to multiple VPCs, we don't want to use any that
+ # don't belong to this test run
+ - name: find all DHCP option sets that already exist before running tests
+ ec2_vpc_dhcp_option_info:
+ register: result
+
+ - set_fact:
+ preexisting_option_sets: "{{ result.dhcp_options | map(attribute='dhcp_options_id') | list }}"
+
+ - name: create a VPC with a default DHCP option set to test inheritance and delete_old
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}"
+ cidr_block: "{{ vpc_cidr }}"
+ state: present
+ register: vpc
+
+ - name: ensure a DHCP option set is attached to the VPC
+ assert:
+ that:
+ - vpc.vpc.dhcp_options_id is defined
+
+ - set_fact:
+ vpc_id: "{{ vpc.vpc.id }}"
+ default_options_id: "{{ vpc.vpc.dhcp_options_id }}"
+
+## ============================================
+ - name: Option Sets can be attached to multiple VPCs, create a new one if the test VPC is reusing a pre-existing one
+ when: vpc.vpc.dhcp_options_id in preexisting_option_sets
+ block:
+ - name: Create the new option set
+ ec2_vpc_dhcp_option:
+ state: present
+ domain_name: "{{ aws_domain_name }}"
+ dns_servers:
+ - AmazonProvidedDNS
+ delete_old: True
+ tags:
+ Name: "{{ resource_prefix }}"
+ register: new_dhcp_options
+
+ - assert:
+ that:
+ - new_dhcp_options.dhcp_options_id not in preexisting_option_sets
+
+ - name: Attach the new option set to the VPC
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ purge_tags: False
+ dhcp_options_id: "{{ new_dhcp_options.dhcp_options_id }}"
+## ============================================
+
+ - name: find the VPC's associated option set
+ ec2_vpc_net_info:
+ vpc_ids: "{{ vpc_id }}"
+ register: vpc_info
+
+ - set_fact:
+ original_dhcp_options_id: "{{ vpc_info.vpcs[0].dhcp_options_id }}"
+
+ - name: get information about the DHCP option
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ original_dhcp_options_id }}"]
+ register: original_dhcp_options_info
+
+ - set_fact:
+ original_config: "{{ original_dhcp_options_info.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}"
+
+ - assert:
+ that:
+ - original_dhcp_options_info.dhcp_options | length == 1
+ - original_config.keys() | list | sort == ['domain-name', 'domain-name-servers']
+ - original_config['domain-name'][0]['value'] == '{{ aws_domain_name }}'
+ - original_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS'
+ - original_dhcp_options_id not in preexisting_option_sets
+
+## ============================================
+
+ # FIXME: always reassociated to lowest alphanum dhcp_options_id when vpc_id is provided without options,
+ # This task will return an unpredictable dhcp_option_id so we can't assert anything about the option's values
+ - name: test a DHCP option exists (check mode)
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ domain_name: "{{ aws_domain_name }}"
+ dns_servers:
+ - AmazonProvidedDNS
+ tags:
+ Name: "{{ resource_prefix }}"
+ register: found_dhcp_options
+ check_mode: true
+
+ - assert:
+ that:
+ - not found_dhcp_options.changed
+
+ # FIXME: always reassociated when vpc_id is provided without options, so here we provide the default options
+ - name: test a DHCP option exists
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ domain_name: "{{ aws_domain_name }}"
+ dns_servers:
+ - AmazonProvidedDNS
+ tags:
+ Name: "{{ resource_prefix }}"
+ register: found_dhcp_options
+
+ - assert:
+ that:
+ - found_dhcp_options is not changed
+ - found_dhcp_options.dhcp_options_id is defined
+ - original_dhcp_options_id == found_dhcp_options.dhcp_options_id
+
+ # Create a DHCP option set that inherits from the default set and does not delete the old set
+ - name: create a DHCP option set that inherits from the default set (check mode)
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios_node_type: 2
+ delete_old: False
+ register: dhcp_options
+ check_mode: true
+
+ - assert:
+ that:
+ - dhcp_options.changed
+
+ - name: create a DHCP option set that inherits from the default set
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios_node_type: 2
+ delete_old: False
+ register: dhcp_options
+
+ - set_fact:
+ dhcp_options_config: "{{ dhcp_options.dhcp_options.dhcp_configurations | items2dict(key_name='key', value_name='values') }}"
+
+ - assert:
+ that:
+ - dhcp_options.changed
+ - dhcp_options.new_options
+ - dhcp_options.new_options.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers']
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - dhcp_options.new_options['netbios-node-type'] == '2'
+ - dhcp_options.new_options['domain-name'] == ['{{ aws_domain_name }}']
+ - dhcp_options.new_options['domain-name-servers'] == ['AmazonProvidedDNS']
+ # We return the list of dicts that boto gives us, in addition to the user-friendly config dict
+ - dhcp_options_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2']
+ - dhcp_options_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1']
+ - dhcp_options_config['netbios-node-type'][0]['value'] == '2'
+ - dhcp_options_config['domain-name'][0]['value'] == '{{ aws_domain_name }}'
+ - dhcp_options_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS'
+ - original_dhcp_options_id != dhcp_options.dhcp_options_id
+
+ - set_fact:
+ new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+
+ - name: get information about the new DHCP option
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ new_dhcp_options_id }}"]
+ register: new_dhcp_options
+
+ - set_fact:
+ new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}"
+
+ - assert:
+ that:
+ - new_config.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers']
+ - new_config['domain-name'][0]['value'] == '{{ aws_domain_name }}'
+ - new_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS'
+ - new_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2']
+ - new_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1']
+ - new_config['netbios-node-type'][0]['value'] == '2'
+ # We return the list of dicts that boto gives us, in addition to the user-friendly config dict
+ - new_dhcp_options.dhcp_config[0]['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+ - new_dhcp_options.dhcp_config[0]['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - new_dhcp_options.dhcp_config[0]['netbios-node-type'] == '2'
+ - new_dhcp_options.dhcp_config[0]['domain-name'] == ['{{ aws_domain_name }}']
+ - new_dhcp_options.dhcp_config[0]['domain-name-servers'] == ['AmazonProvidedDNS']
+
+
+ # FIXME: no way to associate `default` in the module
+ - name: Re-associate the default DHCP options set so that the new one can be deleted
+ ec2_vpc_dhcp_option:
+ vpc_id: '{{ vpc_id }}'
+ dhcp_options_id: '{{ default_options_id }}'
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result is success
+ - result.dhcp_options_id == '{{ default_options_id }}'
+
+ - name: delete it for the next test
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ new_dhcp_options_id }}"
+ state: absent
+
+ # Create a DHCP option set that does not inherit from the old set and doesn't delete the old set
+
+ - name: create a DHCP option set that does not inherit from the default set (check mode)
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios_node_type: 2
+ delete_old: False
+ register: dhcp_options
+ check_mode: true
+
+ - assert:
+ that:
+ - dhcp_options.changed
+
+ - name: create a DHCP option set that does not inherit from the default set
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios_node_type: 2
+ delete_old: False
+ register: dhcp_options
+
+ - set_fact:
+ dhcp_options_config: "{{ dhcp_options.dhcp_options.dhcp_configurations | items2dict(key_name='key', value_name='values') }}"
+
+ - assert:
+ that:
+ - dhcp_options.changed
+ - dhcp_options.new_options
+ # FIXME extra keys are returned unpredictably
+ - dhcp_options.new_options.keys() | list | sort == ['netbios-name-servers', 'netbios-node-type', 'ntp-servers']
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - dhcp_options.new_options['netbios-node-type'] == '2'
+ - original_dhcp_options_id != dhcp_options.dhcp_options_id
+ # We return the list of dicts that boto gives us, in addition to the user-friendly config dict
+ - new_dhcp_options.dhcp_config[0]['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+ - new_dhcp_options.dhcp_config[0]['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - new_dhcp_options.dhcp_config[0]['netbios-node-type'] == '2'
+
+ - set_fact:
+ new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+
+ - name: get information about the new DHCP option
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ new_dhcp_options_id }}"]
+ register: new_dhcp_options
+
+ - set_fact:
+ new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}"
+
+ - assert:
+ that:
+ - new_config.keys() | list | sort == ['netbios-name-servers', 'netbios-node-type', 'ntp-servers']
+ - new_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2']
+ - new_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1']
+ - new_config['netbios-node-type'][0]['value'] == '2'
+
+ - name: disassociate the new DHCP option set so it can be deleted
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ original_dhcp_options_id }}"
+ vpc_id: "{{ vpc_id }}"
+ state: present
+
+ - name: delete it for the next test
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ new_dhcp_options_id }}"
+ state: absent
+
+ # Create a DHCP option set that inherits from the default set overwrites a default and deletes the old set
+ - name: create a DHCP option set that inherits from the default set and deletes the original set (check mode)
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: True
+ domain_name: us-west-2.compute.internal
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios_node_type: 2
+ delete_old: True
+ register: dhcp_options
+ check_mode: true
+
+ - assert:
+ that:
+ - dhcp_options.changed
+
+ - name: create a DHCP option set that inherits from the default set and deletes the original set
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: True
+ domain_name: '{{ aws_domain_name }}'
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios_node_type: 1
+ delete_old: True
+ register: dhcp_options
+
+ - assert:
+ that:
+ - dhcp_options.changed
+ - dhcp_options.new_options
+ - dhcp_options.new_options.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers']
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - dhcp_options.new_options['netbios-node-type'] == '1'
+ - dhcp_options.new_options['domain-name'] == ['{{ aws_domain_name }}']
+ - original_dhcp_options_id != dhcp_options.dhcp_options_id
+
+ - set_fact:
+ new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+
+ - name: get information about the new DHCP option
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ new_dhcp_options_id }}"]
+ register: new_dhcp_options
+
+ - set_fact:
+ new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}"
+
+ - assert:
+ that:
+ - new_config.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers']
+ - new_config['domain-name'][0]['value'] == '{{ aws_domain_name }}'
+ - new_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2']
+ - new_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1']
+ - new_config['netbios-node-type'][0]['value'] == '1'
+
+ - name: verify the original set was deleted
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ original_dhcp_options_id }}"]
+ register: dhcp_options
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - dhcp_options.failed
+ - '"does not exist" in dhcp_options.error.message'
+
+ - name: verify the original set was deleted
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ original_dhcp_options_id }}"]
+ register: dhcp_options
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - '"does not exist" in dhcp_options.error.message'
+
+ - set_fact:
+ original_dhcp_options_id: "{{ new_dhcp_options_id }}"
+
+ # Create a DHCP option set that does not inherit from the old set and deletes the old set
+
+ - name: create a DHCP option set that does not inherit from the default set and deletes the original set (check mode)
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ domain_name: '{{ aws_domain_name }}'
+ dns_servers:
+ - AmazonProvidedDNS
+ delete_old: True
+ register: dhcp_options
+ check_mode: true
+
+ - assert:
+ that:
+ - dhcp_options.changed
+
+ - name: create a DHCP option set that does not inherit from the default set and deletes the original set
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ domain_name: "{{ aws_domain_name }}"
+ dns_servers:
+ - AmazonProvidedDNS
+ delete_old: True
+ register: dhcp_options
+
+ - assert:
+ that:
+ - dhcp_options.new_options
+ - dhcp_options.new_options.keys() | list | sort is superset(['domain-name', 'domain-name-servers'])
+ - dhcp_options.new_options['domain-name'] == ['{{ aws_domain_name }}']
+ - dhcp_options.new_options['domain-name-servers'] == ['AmazonProvidedDNS']
+ - original_dhcp_options_id != dhcp_options.dhcp_options_id
+
+ - set_fact:
+ new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+
+ - name: get information about the new DHCP option
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ new_dhcp_options_id }}"]
+ register: new_dhcp_options
+
+ - set_fact:
+ new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}"
+
+ - assert:
+ that:
+ - new_config.keys() | list | sort == ['domain-name', 'domain-name-servers']
+ - new_config['domain-name'][0]['value'] == '{{ aws_domain_name }}'
+ - new_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS'
+
+ - name: verify the original set was deleted
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ original_dhcp_options_id }}"]
+ register: dhcp_options
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - dhcp_options.failed
+ - '"does not exist" in dhcp_options.error.message'
+
+ - set_fact:
+ original_dhcp_options_id: "{{ new_dhcp_options_id }}"
+
+ # Create a DHCP option set with tags
+
+ - name: create a DHCP option set with tags (check mode)
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags:
+ CreatedBy: ansible-test
+ Collection: amazon.aws
+ register: dhcp_options
+ check_mode: true
+ ignore_errors: true
+
+ - assert:
+ that:
+ - dhcp_options.changed
+
+ - name: create a DHCP option set with tags
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags:
+ CreatedBy: ansible-test
+ Collection: amazon.aws
+ register: dhcp_options
+
+ - set_fact:
+ dhcp_options_config: "{{ dhcp_options.dhcp_options.dhcp_configurations | items2dict(key_name='key', value_name='values') }}"
+
+ - assert:
+ that:
+ - dhcp_options.changed
+ - dhcp_options.new_options.keys() | list | sort is superset(['ntp-servers', 'netbios-name-servers'])
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - original_dhcp_options_id != dhcp_options.dhcp_options_id
+ # We return the list of dicts that boto gives us, in addition to the user-friendly config dict
+ - dhcp_options_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2']
+ - dhcp_options_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1']
+ - dhcp_options.dhcp_options.tags.keys() | length == 2
+ - dhcp_options.dhcp_options.tags['CreatedBy'] == 'ansible-test'
+ - dhcp_options.dhcp_options.tags['Collection'] == 'amazon.aws'
+
+ - set_fact:
+ new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+
+ - name: check if the expected tags are associated
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ new_dhcp_options_id }}"]
+ register: dhcp_options_info
+
+ - assert:
+ that:
+ - dhcp_options_info.dhcp_options[0].tags is defined
+ - dhcp_options_info.dhcp_options[0].tags | length == 2
+ - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws'
+ - dhcp_options_info.dhcp_options[0].tags['CreatedBy'] == 'ansible-test'
+
+ - name: test no changes with the same tags (check mode)
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags:
+ CreatedBy: ansible-test
+ Collection: amazon.aws
+ register: dhcp_options
+ check_mode: true
+
+ - assert:
+ that:
+ - not dhcp_options.changed
+ - dhcp_options.new_options.keys() | list | sort == ['netbios-name-servers', 'ntp-servers']
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+
+ - name: test no changes with the same tags
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags:
+ CreatedBy: ansible-test
+ Collection: amazon.aws
+ register: dhcp_options
+
+ - name: check if the expected tags are associated
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"]
+ register: dhcp_options_info
+
+ - assert:
+ that:
+ - not dhcp_options.changed
+ - dhcp_options.new_options.keys() | list | sort == ['netbios-name-servers', 'ntp-servers']
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+ - new_dhcp_options_id == dhcp_options.dhcp_options_id
+ - dhcp_options.dhcp_options.tags.keys() | length == 2
+ - dhcp_options.dhcp_options.tags['CreatedBy'] == 'ansible-test'
+ - dhcp_options.dhcp_options.tags['Collection'] == 'amazon.aws'
+ - dhcp_options_info.dhcp_options[0].tags is defined
+ - dhcp_options_info.dhcp_options[0].tags.keys() | length == 2
+ - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws'
+ - dhcp_options_info.dhcp_options[0].tags['CreatedBy'] == 'ansible-test'
+
+ - name: test no changes without specifying tags (check mode)
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ purge_tags: False
+ register: dhcp_options
+ check_mode: true
+
+ - assert:
+ that:
+ - not dhcp_options.changed
+ - dhcp_options.new_options.keys() | list | sort is superset(['netbios-name-servers', 'ntp-servers'])
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+
+ - name: test no changes without specifying tags
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ purge_tags: False
+ register: dhcp_options
+
+ - name: check if the expected tags are associated
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"]
+ register: dhcp_options_info
+
+ - assert:
+ that:
+ - not dhcp_options.changed
+ - dhcp_options.new_options.keys() | list | sort is superset(['netbios-name-servers', 'ntp-servers'])
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+ - new_dhcp_options_id == dhcp_options.dhcp_options_id
+ - dhcp_options_info.dhcp_options[0].tags is defined
+ - dhcp_options_info.dhcp_options[0].tags.keys() | length == 2
+ - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws'
+ - dhcp_options_info.dhcp_options[0].tags['CreatedBy'] == 'ansible-test'
+
+ - name: add a tag without using dhcp_options_id
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags:
+ CreatedBy: ansible-test
+ Collection: amazon.aws
+ another: tag
+ register: dhcp_options
+
+ - name: check if the expected tags are associated
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"]
+ register: dhcp_options_info
+
+ - assert:
+ that:
+ - dhcp_options.changed
+ - dhcp_options.new_options.keys() | list | sort is superset(['netbios-name-servers', 'ntp-servers'])
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+ - new_dhcp_options_id == dhcp_options.dhcp_options_id
+ - dhcp_options.dhcp_options.tags.keys() | length == 3
+ - dhcp_options.dhcp_options.tags['another'] == 'tag'
+ - dhcp_options.dhcp_options.tags['CreatedBy'] == 'ansible-test'
+ - dhcp_options.dhcp_options.tags['Collection'] == 'amazon.aws'
+ - dhcp_options_info.dhcp_options[0].tags is defined
+ - dhcp_options_info.dhcp_options[0].tags.keys() | length == 3
+ - dhcp_options_info.dhcp_options[0].tags['another'] == 'tag'
+ - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws'
+ - dhcp_options_info.dhcp_options[0].tags['CreatedBy'] == 'ansible-test'
+
+ - name: add and removing tags (check mode)
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags:
+ AnsibleTest: integration
+ Collection: amazon.aws
+ register: dhcp_options
+ check_mode: true
+
+ - assert:
+ that:
+ - dhcp_options.changed
+
+ - name: add and remove tags
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags:
+ AnsibleTest: integration
+ Collection: amazon.aws
+ register: dhcp_options
+
+ - name: check if the expected tags are associated
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"]
+ register: dhcp_options_info
+
+ - assert:
+ that:
+ - dhcp_options.changed
+ - dhcp_options.dhcp_options.tags.keys() | length == 2
+ - dhcp_options.dhcp_options.tags['AnsibleTest'] == 'integration'
+ - dhcp_options.dhcp_options.tags['Collection'] == 'amazon.aws'
+ - new_dhcp_options_id == dhcp_options.dhcp_options_id
+ - dhcp_options_info.dhcp_options[0].tags is defined
+ - dhcp_options_info.dhcp_options[0].tags.keys() | length == 2
+ - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws'
+ - dhcp_options_info.dhcp_options[0].tags['AnsibleTest'] == 'integration'
+
+ - name: add tags with different cases
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags:
+ "lowercase spaced": 'hello cruel world'
+ "Title Case": 'Hello Cruel World'
+ CamelCase: 'SimpleCamelCase'
+ snake_case: 'simple_snake_case'
+ register: dhcp_options
+
+ - name: check if the expected tags are associated
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"]
+ register: dhcp_options_info
+
+ - assert:
+ that:
+ - dhcp_options.changed
+ - new_dhcp_options_id == dhcp_options.dhcp_options_id
+ - dhcp_options.dhcp_options.tags.keys() | length == 4
+ - dhcp_options.dhcp_options.tags['lowercase spaced'] == 'hello cruel world'
+ - dhcp_options.dhcp_options.tags['Title Case'] == 'Hello Cruel World'
+ - dhcp_options.dhcp_options.tags['CamelCase'] == 'SimpleCamelCase'
+ - dhcp_options.dhcp_options.tags['snake_case'] == 'simple_snake_case'
+ - dhcp_options_info.dhcp_options[0].tags is defined
+ - dhcp_options_info.dhcp_options[0].tags.keys() | length == 4
+ - dhcp_options_info.dhcp_options[0].tags['lowercase spaced'] == 'hello cruel world'
+ - dhcp_options_info.dhcp_options[0].tags['Title Case'] == 'Hello Cruel World'
+ - dhcp_options_info.dhcp_options[0].tags['CamelCase'] == 'SimpleCamelCase'
+ - dhcp_options_info.dhcp_options[0].tags['snake_case'] == 'simple_snake_case'
+
+ - name: test purging all tags
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags: {}
+ register: dhcp_options
+
+ - name: check if the expected tags are associated
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"]
+ register: dhcp_options_info
+
+ - assert:
+ that:
+ - dhcp_options.changed
+ - new_dhcp_options_id == dhcp_options.dhcp_options_id
+ - not dhcp_options_info.dhcp_options[0].tags
+
+ - name: test removing all tags
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags: {}
+ register: dhcp_options
+
+ - name: check if the expected tags are associated
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"]
+ register: dhcp_options_info
+
+ - assert:
+ that:
+ - dhcp_options.changed
+ - new_dhcp_options_id == dhcp_options.dhcp_options_id
+ - not dhcp_options_info.dhcp_options[0].tags
+
+ - name: remove the DHCP option set (check mode)
+ ec2_vpc_dhcp_option:
+ state: absent
+ vpc_id: "{{ vpc_id }}"
+ dhcp_options_id: "{{ new_dhcp_options_id }}"
+ register: dhcp_options
+ check_mode: true
+
+# - assert:
+# that:
+# - dhcp_options.changed
+
+ # FIXME: does nothing - the module should associate "default" with the VPC provided but currently does not
+ - name: removing the DHCP option set
+ ec2_vpc_dhcp_option:
+ state: absent
+ vpc_id: "{{ vpc_id }}"
+ dhcp_options_id: "{{ new_dhcp_options_id }}"
+ register: dhcp_options
+
+# - assert:
+# that:
+# - dhcp_options.changed
+
+ - name: remove the DHCP option set again (check mode)
+ ec2_vpc_dhcp_option:
+ state: absent
+ vpc_id: "{{ vpc_id }}"
+ dhcp_options_id: "{{ new_dhcp_options_id }}"
+ register: dhcp_options
+ check_mode: true
+
+ - assert:
+ that:
+ - not dhcp_options.changed
+
+ - name: remove the DHCP option set again
+ ec2_vpc_dhcp_option:
+ state: absent
+ vpc_id: "{{ vpc_id }}"
+ dhcp_options_id: "{{ new_dhcp_options_id }}"
+ register: dhcp_options
+
+ - assert:
+ that:
+ - not dhcp_options.changed
+
+ always:
+
+ - name: Re-associate the default DHCP options set so that the new one(s) can be deleted
+ ec2_vpc_dhcp_option:
+ vpc_id: '{{ vpc_id }}'
+ dhcp_options_id: '{{ default_options_id }}'
+ state: present
+ register: result
+ when: vpc_id is defined
+ ignore_errors: yes
+
+ - name: Query all option sets created by the test
+ ec2_vpc_dhcp_option_info:
+ filters:
+ "tag:Name": "*'{{ resource_prefix }}*"
+ register: option_sets
+
+ - name: clean up DHCP option sets
+ ec2_vpc_dhcp_option:
+ state: absent
+ dhcp_options_id: "{{ original_dhcp_options_id }}"
+ vpc_id: "{{ vpc_id }}"
+ when: original_dhcp_options_id is defined
+ ignore_errors: yes
+
+ - name: clean up DHCP option sets
+ ec2_vpc_dhcp_option:
+ state: absent
+ dhcp_options_id: "{{ new_dhcp_options_id }}"
+ vpc_id: "{{ vpc_id }}"
+ when: new_dhcp_options_id is defined
+ ignore_errors: yes
+
+ - name: Delete the VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}"
+ cidr_block: "{{ vpc_cidr }}"
+ state: absent
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/aliases
new file mode 100644
index 000000000..506820fc1
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+disabled
+ec2_vpc_endpoint_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/defaults/main.yml
new file mode 100644
index 000000000..3869e983b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/defaults/main.yml
@@ -0,0 +1,7 @@
+vpc_name: '{{ resource_prefix }}-vpc'
+vpc_seed: '{{ resource_prefix }}'
+vpc_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.22.0/24
+
+# S3 and EC2 should generally be available...
+endpoint_service_a: com.amazonaws.{{ aws_region }}.s3
+endpoint_service_b: com.amazonaws.{{ aws_region }}.ec2
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml
new file mode 100644
index 000000000..09e6908b0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml
@@ -0,0 +1,862 @@
+- name: ec2_vpc_endpoint tests
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ # ============================================================
+ # BEGIN PRE-TEST SETUP
+ - name: create a VPC
+ ec2_vpc_net:
+ state: present
+ name: '{{ vpc_name }}'
+ cidr_block: '{{ vpc_cidr }}'
+ tags:
+ AnsibleTest: ec2_vpc_endpoint
+ AnsibleRun: '{{ resource_prefix }}'
+ register: vpc_creation
+ - name: Assert success
+ assert:
+ that:
+ - vpc_creation is successful
+
+ - name: Create an IGW
+ ec2_vpc_igw:
+ vpc_id: '{{ vpc_creation.vpc.id }}'
+ state: present
+ tags:
+ Name: '{{ resource_prefix }}'
+ AnsibleTest: ec2_vpc_endpoint
+ AnsibleRun: '{{ resource_prefix }}'
+ register: igw_creation
+ - name: Assert success
+ assert:
+ that:
+ - igw_creation is successful
+
+ - name: Create a minimal route table (no routes)
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc_creation.vpc.id }}'
+ tags:
+ AnsibleTest: ec2_vpc_endpoint
+ AnsibleRun: '{{ resource_prefix }}'
+ Name: '{{ resource_prefix }}-empty'
+ subnets: []
+ routes: []
+ register: rtb_creation_empty
+
+ - name: Create a minimal route table (with IGW)
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc_creation.vpc.id }}'
+ tags:
+ AnsibleTest: ec2_vpc_endpoint
+ AnsibleRun: '{{ resource_prefix }}'
+ Name: '{{ resource_prefix }}-igw'
+ subnets: []
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: '{{ igw_creation.gateway_id }}'
+ register: rtb_creation_igw
+
+ - name: Save VPC info in a fact
+ set_fact:
+ vpc_id: '{{ vpc_creation.vpc.id }}'
+ rtb_empty_id: '{{ rtb_creation_empty.route_table.id }}'
+ rtb_igw_id: '{{ rtb_creation_igw.route_table.id }}'
+
+ # ============================================================
+ # BEGIN TESTS
+
+ # Minimal check_mode with _info
+ - name: Fetch Endpoints in check_mode
+ ec2_vpc_endpoint_info:
+ query: endpoints
+ register: endpoint_info
+ check_mode: true
+ - name: Assert success
+ assert:
+ that:
+ # May be run in parallel, the only thing we can guarantee is
+ # - we shouldn't error
+ # - we should return 'vpc_endpoints' (even if it's empty)
+ - endpoint_info is successful
+ - '"vpc_endpoints" in endpoint_info'
+
+ - name: Fetch Services in check_mode
+ ec2_vpc_endpoint_info:
+ query: services
+ register: endpoint_info
+ check_mode: true
+ - name: Assert success
+ assert:
+ that:
+ - endpoint_info is successful
+ - '"service_names" in endpoint_info'
+ # This is just 2 arbitrary AWS services that should (generally) be
+ # available. The actual list will vary over time and between regions
+ - endpoint_service_a in endpoint_info.service_names
+ - endpoint_service_b in endpoint_info.service_names
+
+ # Fetch services without check mode
+ # Note: Filters not supported on services via this module, this is all we can test for now
+ - name: Fetch Services
+ ec2_vpc_endpoint_info:
+ query: services
+ register: endpoint_info
+ - name: Assert success
+ assert:
+ that:
+ - endpoint_info is successful
+ - '"service_names" in endpoint_info'
+ # This is just 2 arbitrary AWS services that should (generally) be
+ # available. The actual list will vary over time and between regions
+ - endpoint_service_a in endpoint_info.service_names
+ - endpoint_service_b in endpoint_info.service_names
+
+ # Attempt to create an endpoint
+ - name: Create minimal endpoint (check mode)
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ service: '{{ endpoint_service_a }}'
+ register: create_endpoint_check
+ check_mode: true
+ - name: Assert changed
+ assert:
+ that:
+ - create_endpoint_check is changed
+
+ - name: Create minimal endpoint
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ service: '{{ endpoint_service_a }}'
+ wait: true
+ register: create_endpoint
+ - name: Check standard return values
+ assert:
+ that:
+ - create_endpoint is changed
+ - '"result" in create_endpoint'
+ - '"creation_timestamp" in create_endpoint.result'
+ - '"dns_entries" in create_endpoint.result'
+ - '"groups" in create_endpoint.result'
+ - '"network_interface_ids" in create_endpoint.result'
+ - '"owner_id" in create_endpoint.result'
+ - '"policy_document" in create_endpoint.result'
+ - '"private_dns_enabled" in create_endpoint.result'
+ - create_endpoint.result.private_dns_enabled == False
+ - '"requester_managed" in create_endpoint.result'
+ - create_endpoint.result.requester_managed == False
+ - '"service_name" in create_endpoint.result'
+ - create_endpoint.result.service_name == endpoint_service_a
+ - '"state" in create_endpoint.result'
+ - create_endpoint.result.state == "available"
+ - '"vpc_endpoint_id" in create_endpoint.result'
+ - create_endpoint.result.vpc_endpoint_id.startswith("vpce-")
+ - '"vpc_endpoint_type" in create_endpoint.result'
+ - create_endpoint.result.vpc_endpoint_type == "Gateway"
+ - '"vpc_id" in create_endpoint.result'
+ - create_endpoint.result.vpc_id == vpc_id
+
+ - name: Save Endpoint info in a fact
+ set_fact:
+ endpoint_id: '{{ create_endpoint.result.vpc_endpoint_id }}'
+
+ # Pull info about the endpoints
+ - name: Fetch Endpoints (all)
+ ec2_vpc_endpoint_info:
+ query: endpoints
+ register: endpoint_info
+ - name: Assert success
+ assert:
+ that:
+ # We're fetching all endpoints, there's no guarantee what the values
+ # will be
+ - endpoint_info is successful
+ - '"vpc_endpoints" in endpoint_info'
+ - '"creation_timestamp" in first_endpoint'
+ - '"policy_document" in first_endpoint'
+ - '"route_table_ids" in first_endpoint'
+ - '"service_name" in first_endpoint'
+ - '"state" in first_endpoint'
+ - '"vpc_endpoint_id" in first_endpoint'
+ - '"vpc_id" in first_endpoint'
+ # Not yet documented, but returned
+ - '"dns_entries" in first_endpoint'
+ - '"groups" in first_endpoint'
+ - '"network_interface_ids" in first_endpoint'
+ - '"owner_id" in first_endpoint'
+ - '"private_dns_enabled" in first_endpoint'
+ - '"requester_managed" in first_endpoint'
+ - '"subnet_ids" in first_endpoint'
+ - '"tags" in first_endpoint'
+ - '"vpc_endpoint_type" in first_endpoint'
+ # Make sure our endpoint is included
+ - endpoint_id in ( endpoint_info | community.general.json_query("vpc_endpoints[*].vpc_endpoint_id")
+ | list | flatten )
+ vars:
+ first_endpoint: '{{ endpoint_info.vpc_endpoints[0] }}'
+
+ - name: Fetch Endpoints (targetted by ID)
+ ec2_vpc_endpoint_info:
+ query: endpoints
+ vpc_endpoint_ids: '{{ endpoint_id }}'
+ register: endpoint_info
+ - name: Assert success
+ assert:
+ that:
+ - endpoint_info is successful
+ - '"vpc_endpoints" in endpoint_info'
+ - '"creation_timestamp" in first_endpoint'
+ - '"policy_document" in first_endpoint'
+ - '"route_table_ids" in first_endpoint'
+ - first_endpoint.route_table_ids | length == 0
+ - '"service_name" in first_endpoint'
+ - first_endpoint.service_name == endpoint_service_a
+ - '"state" in first_endpoint'
+ - first_endpoint.state == "available"
+ - '"vpc_endpoint_id" in first_endpoint'
+ - first_endpoint.vpc_endpoint_id == endpoint_id
+ - '"vpc_id" in first_endpoint'
+ - first_endpoint.vpc_id == vpc_id
+ # Not yet documented, but returned
+ - '"dns_entries" in first_endpoint'
+ - '"groups" in first_endpoint'
+ - '"network_interface_ids" in first_endpoint'
+ - '"owner_id" in first_endpoint'
+ - '"private_dns_enabled" in first_endpoint'
+ - first_endpoint.private_dns_enabled == False
+ - '"requester_managed" in first_endpoint'
+ - first_endpoint.requester_managed == False
+ - '"subnet_ids" in first_endpoint'
+ - '"tags" in first_endpoint'
+ - '"vpc_endpoint_type" in first_endpoint'
+ vars:
+ first_endpoint: '{{ endpoint_info.vpc_endpoints[0] }}'
+
+ - name: Fetch Endpoints (targetted by VPC)
+ ec2_vpc_endpoint_info:
+ query: endpoints
+ filters:
+ vpc-id:
+ - '{{ vpc_id }}'
+ register: endpoint_info
+ - name: Assert success
+ assert:
+ that:
+ - endpoint_info is successful
+ - '"vpc_endpoints" in endpoint_info'
+ - '"creation_timestamp" in first_endpoint'
+ - '"policy_document" in first_endpoint'
+ - '"route_table_ids" in first_endpoint'
+ - '"service_name" in first_endpoint'
+ - first_endpoint.service_name == endpoint_service_a
+ - '"state" in first_endpoint'
+ - first_endpoint.state == "available"
+ - '"vpc_endpoint_id" in first_endpoint'
+ - first_endpoint.vpc_endpoint_id == endpoint_id
+ - '"vpc_id" in first_endpoint'
+ - first_endpoint.vpc_id == vpc_id
+ # Not yet documented, but returned
+ - '"dns_entries" in first_endpoint'
+ - '"groups" in first_endpoint'
+ - '"network_interface_ids" in first_endpoint'
+ - '"owner_id" in first_endpoint'
+ - '"private_dns_enabled" in first_endpoint'
+ - first_endpoint.private_dns_enabled == False
+ - '"requester_managed" in first_endpoint'
+ - first_endpoint.requester_managed == False
+ - '"subnet_ids" in first_endpoint'
+ - '"tags" in first_endpoint'
+ - '"vpc_endpoint_type" in first_endpoint'
+ vars:
+ first_endpoint: '{{ endpoint_info.vpc_endpoints[0] }}'
+
+
+ # matches on parameters without explicitly passing the endpoint ID
+ - name: Create minimal endpoint - idempotency (check mode)
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ service: '{{ endpoint_service_a }}'
+ register: create_endpoint_idem_check
+ check_mode: true
+ - assert:
+ that:
+ - create_endpoint_idem_check is not changed
+
+ - name: Create minimal endpoint - idempotency
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ service: '{{ endpoint_service_a }}'
+ register: create_endpoint_idem
+ - assert:
+ that:
+ - create_endpoint_idem is not changed
+
+ - name: Delete minimal endpoint by ID (check_mode)
+ ec2_vpc_endpoint:
+ state: absent
+ vpc_endpoint_id: '{{ endpoint_id }}'
+ check_mode: true
+ register: endpoint_delete_check
+ - assert:
+ that:
+ - endpoint_delete_check is changed
+
+
+ - name: Delete minimal endpoint by ID
+ ec2_vpc_endpoint:
+ state: absent
+ vpc_endpoint_id: '{{ endpoint_id }}'
+ register: endpoint_delete_check
+ - assert:
+ that:
+ - endpoint_delete_check is changed
+
+ - name: Delete minimal endpoint by ID - idempotency (check_mode)
+ ec2_vpc_endpoint:
+ state: absent
+ vpc_endpoint_id: '{{ endpoint_id }}'
+ check_mode: true
+ register: endpoint_delete_check
+ - assert:
+ that:
+ - endpoint_delete_check is not changed
+
+ - name: Delete minimal endpoint by ID - idempotency
+ ec2_vpc_endpoint:
+ state: absent
+ vpc_endpoint_id: '{{ endpoint_id }}'
+ register: endpoint_delete_check
+ - assert:
+ that:
+ - endpoint_delete_check is not changed
+
+ - name: Fetch Endpoints by ID (expect failed)
+ ec2_vpc_endpoint_info:
+ query: endpoints
+ vpc_endpoint_ids: '{{ endpoint_id }}'
+ ignore_errors: true
+ register: endpoint_info
+ - name: Assert endpoint does not exist
+ assert:
+ that:
+ - endpoint_info is successful
+ - '"does not exist" in endpoint_info.msg'
+ - endpoint_info.vpc_endpoints | length == 0
+
+ # Attempt to create an endpoint with a route table
+ - name: Create an endpoint with route table (check mode)
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ service: '{{ endpoint_service_a }}'
+ route_table_ids:
+ - '{{ rtb_empty_id }}'
+ register: create_endpoint_check
+ check_mode: true
+ - name: Assert changed
+ assert:
+ that:
+ - create_endpoint_check is changed
+
+ - name: Create an endpoint with route table
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ service: '{{ endpoint_service_a }}'
+ route_table_ids:
+ - '{{ rtb_empty_id }}'
+ wait: true
+ register: create_rtb_endpoint
+ - name: Check standard return values
+ assert:
+ that:
+ - create_rtb_endpoint is changed
+ - '"result" in create_rtb_endpoint'
+ - '"creation_timestamp" in create_rtb_endpoint.result'
+ - '"dns_entries" in create_rtb_endpoint.result'
+ - '"groups" in create_rtb_endpoint.result'
+ - '"network_interface_ids" in create_rtb_endpoint.result'
+ - '"owner_id" in create_rtb_endpoint.result'
+ - '"policy_document" in create_rtb_endpoint.result'
+ - '"private_dns_enabled" in create_rtb_endpoint.result'
+ - '"route_table_ids" in create_rtb_endpoint.result'
+ - create_rtb_endpoint.result.route_table_ids | length == 1
+ - create_rtb_endpoint.result.route_table_ids[0] == '{{ rtb_empty_id }}'
+ - create_rtb_endpoint.result.private_dns_enabled == False
+ - '"requester_managed" in create_rtb_endpoint.result'
+ - create_rtb_endpoint.result.requester_managed == False
+ - '"service_name" in create_rtb_endpoint.result'
+ - create_rtb_endpoint.result.service_name == endpoint_service_a
+ - '"state" in create_endpoint.result'
+ - create_rtb_endpoint.result.state == "available"
+ - '"vpc_endpoint_id" in create_rtb_endpoint.result'
+ - create_rtb_endpoint.result.vpc_endpoint_id.startswith("vpce-")
+ - '"vpc_endpoint_type" in create_rtb_endpoint.result'
+ - create_rtb_endpoint.result.vpc_endpoint_type == "Gateway"
+ - '"vpc_id" in create_rtb_endpoint.result'
+ - create_rtb_endpoint.result.vpc_id == vpc_id
+
+ - name: Save Endpoint info in a fact
+ set_fact:
+ rtb_endpoint_id: '{{ create_rtb_endpoint.result.vpc_endpoint_id }}'
+
+ - name: Create an endpoint with route table - idempotency (check mode)
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ service: '{{ endpoint_service_a }}'
+ route_table_ids:
+ - '{{ rtb_empty_id }}'
+ register: create_endpoint_check
+ check_mode: true
+ - name: Assert changed
+ assert:
+ that:
+ - create_endpoint_check is not changed
+
+ - name: Create an endpoint with route table - idempotency
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ service: '{{ endpoint_service_a }}'
+ route_table_ids:
+ - '{{ rtb_empty_id }}'
+ register: create_endpoint_check
+ check_mode: true
+ - name: Assert changed
+ assert:
+ that:
+ - create_endpoint_check is not changed
+
+# # Endpoint modifications are not yet supported by the module
+# # A Change the route table for the endpoint
+# - name: Change the route table for the endpoint (check_mode)
+# ec2_vpc_endpoint:
+# state: present
+# vpc_id: '{{ vpc_id }}'
+# vpc_endpoint_id: "{{ rtb_endpoint_id }}"
+# service: '{{ endpoint_service_a }}'
+# route_table_ids:
+# - '{{ rtb_igw_id }}'
+# check_mode: True
+# register: check_two_rtbs_endpoint
+#
+# - name: Assert second route table would be added
+# assert:
+# that:
+# - check_two_rtbs_endpoint.changed
+#
+# - name: Change the route table for the endpoint
+# ec2_vpc_endpoint:
+# state: present
+# vpc_id: '{{ vpc_id }}'
+# vpc_endpoint_id: "{{ rtb_endpoint_id }}"
+# service: '{{ endpoint_service_a }}'
+# route_table_ids:
+# - '{{ rtb_igw_id }}'
+# register: two_rtbs_endpoint
+#
+# - name: Assert second route table would be added
+# assert:
+# that:
+# - check_two_rtbs_endpoint.changed
+# - two_rtbs_endpoint.result.route_table_ids | length == 1
+# - two_rtbs_endpoint.result.route_table_ids[0] == '{{ rtb_igw_id }}'
+#
+# - name: Change the route table for the endpoint - idempotency (check_mode)
+# ec2_vpc_endpoint:
+# state: present
+# vpc_id: '{{ vpc_id }}'
+# vpc_endpoint_id: "{{ rtb_endpoint_id }}"
+# service: '{{ endpoint_service_a }}'
+# route_table_ids:
+# - '{{ rtb_igw_id }}'
+# check_mode: True
+# register: check_two_rtbs_endpoint
+#
+# - name: Assert route table would not change
+# assert:
+# that:
+# - not check_two_rtbs_endpoint.changed
+#
+# - name: Change the route table for the endpoint - idempotency
+# ec2_vpc_endpoint:
+# state: present
+# vpc_id: '{{ vpc_id }}'
+# vpc_endpoint_id: "{{ rtb_endpoint_id }}"
+# service: '{{ endpoint_service_a }}'
+# route_table_ids:
+# - '{{ rtb_igw_id }}'
+# register: two_rtbs_endpoint
+#
+# - name: Assert route table would not change
+# assert:
+# that:
+# - not check_two_rtbs_endpoint.changed
+
+ - name: Tag the endpoint (check_mode)
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ vpc_endpoint_id: '{{ rtb_endpoint_id }}'
+ service: '{{ endpoint_service_a }}'
+ route_table_ids:
+ - '{{ rtb_empty_id }}'
+ tags:
+ camelCase: helloWorld
+ PascalCase: HelloWorld
+ snake_case: hello_world
+ Title Case: Hello World
+ lowercase spaced: hello world
+ check_mode: true
+ register: check_tag_vpc_endpoint
+
+ - name: Assert tags would have changed
+ assert:
+ that:
+ - check_tag_vpc_endpoint.changed
+
+ - name: Tag the endpoint
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ vpc_endpoint_id: '{{ rtb_endpoint_id }}'
+ service: '{{ endpoint_service_a }}'
+ route_table_ids:
+ - '{{ rtb_igw_id }}'
+ tags:
+ testPrefix: '{{ resource_prefix }}'
+ camelCase: helloWorld
+ PascalCase: HelloWorld
+ snake_case: hello_world
+ Title Case: Hello World
+ lowercase spaced: hello world
+ register: tag_vpc_endpoint
+
+ - name: Assert tags are successful
+ assert:
+ that:
+ - tag_vpc_endpoint.changed
+ - tag_vpc_endpoint.result.tags | length == 6
+ - endpoint_tags["testPrefix"] == resource_prefix
+ - endpoint_tags["camelCase"] == "helloWorld"
+ - endpoint_tags["PascalCase"] == "HelloWorld"
+ - endpoint_tags["snake_case"] == "hello_world"
+ - endpoint_tags["Title Case"] == "Hello World"
+ - endpoint_tags["lowercase spaced"] == "hello world"
+ vars:
+ endpoint_tags: "{{ tag_vpc_endpoint.result.tags | items2dict(key_name='Key',\
+ \ value_name='Value') }}"
+
+ - name: Query by tag
+ ec2_vpc_endpoint_info:
+ query: endpoints
+ filters:
+ tag:testPrefix:
+ - '{{ resource_prefix }}'
+ register: tag_result
+
+ - name: Assert tag lookup found endpoint
+ assert:
+ that:
+ - tag_result is successful
+ - '"vpc_endpoints" in tag_result'
+ - first_endpoint.vpc_endpoint_id == rtb_endpoint_id
+ vars:
+ first_endpoint: '{{ tag_result.vpc_endpoints[0] }}'
+
+ - name: Tag the endpoint - idempotency (check_mode)
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ vpc_endpoint_id: '{{ rtb_endpoint_id }}'
+ service: '{{ endpoint_service_a }}'
+ route_table_ids:
+ - '{{ rtb_igw_id }}'
+ tags:
+ testPrefix: '{{ resource_prefix }}'
+ camelCase: helloWorld
+ PascalCase: HelloWorld
+ snake_case: hello_world
+ Title Case: Hello World
+ lowercase spaced: hello world
+ register: tag_vpc_endpoint_again
+
+ - name: Assert tags would not change
+ assert:
+ that:
+ - not tag_vpc_endpoint_again.changed
+
+ - name: Tag the endpoint - idempotency
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ vpc_endpoint_id: '{{ rtb_endpoint_id }}'
+ service: '{{ endpoint_service_a }}'
+ route_table_ids:
+ - '{{ rtb_igw_id }}'
+ tags:
+ testPrefix: '{{ resource_prefix }}'
+ camelCase: helloWorld
+ PascalCase: HelloWorld
+ snake_case: hello_world
+ Title Case: Hello World
+ lowercase spaced: hello world
+ register: tag_vpc_endpoint_again
+
+ - name: Assert tags would not change
+ assert:
+ that:
+ - not tag_vpc_endpoint_again.changed
+
+ - name: Add a tag (check_mode)
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ vpc_endpoint_id: '{{ rtb_endpoint_id }}'
+ service: '{{ endpoint_service_a }}'
+ route_table_ids:
+ - '{{ rtb_igw_id }}'
+ tags:
+ new_tag: ANewTag
+ check_mode: true
+ register: check_tag_vpc_endpoint
+
+ - name: Assert tags would have changed
+ assert:
+ that:
+ - check_tag_vpc_endpoint.changed
+
+ - name: Add a tag (purge_tags=False)
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ vpc_endpoint_id: '{{ rtb_endpoint_id }}'
+ service: '{{ endpoint_service_a }}'
+ route_table_ids:
+ - '{{ rtb_igw_id }}'
+ tags:
+ new_tag: ANewTag
+ register: add_tag_vpc_endpoint
+
+ - name: Assert tags changed
+ assert:
+ that:
+ - add_tag_vpc_endpoint.changed
+ - add_tag_vpc_endpoint.result.tags | length == 7
+ - endpoint_tags["testPrefix"] == resource_prefix
+ - endpoint_tags["camelCase"] == "helloWorld"
+ - endpoint_tags["PascalCase"] == "HelloWorld"
+ - endpoint_tags["snake_case"] == "hello_world"
+ - endpoint_tags["Title Case"] == "Hello World"
+ - endpoint_tags["lowercase spaced"] == "hello world"
+ - endpoint_tags["new_tag"] == "ANewTag"
+ vars:
+ endpoint_tags: "{{ add_tag_vpc_endpoint.result.tags | items2dict(key_name='Key',\
+ \ value_name='Value') }}"
+
+ - name: Add a tag (purge_tags=True)
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ vpc_endpoint_id: '{{ rtb_endpoint_id }}'
+ service: '{{ endpoint_service_a }}'
+ route_table_ids:
+ - '{{ rtb_igw_id }}'
+ tags:
+ another_new_tag: AnotherNewTag
+ purge_tags: true
+ register: purge_tag_vpc_endpoint
+
+ - name: Assert tags changed
+ assert:
+ that:
+ - purge_tag_vpc_endpoint.changed
+ - purge_tag_vpc_endpoint.result.tags | length == 1
+ - endpoint_tags["another_new_tag"] == "AnotherNewTag"
+ vars:
+ endpoint_tags: "{{ purge_tag_vpc_endpoint.result.tags | items2dict(key_name='Key',\
+ \ value_name='Value') }}"
+
+ - name: Delete minimal route table (no routes)
+ ec2_vpc_route_table:
+ state: absent
+ lookup: id
+ route_table_id: '{{ rtb_empty_id }}'
+ register: rtb_delete
+ - assert:
+ that:
+ - rtb_delete is changed
+
+ - name: Delete minimal route table (IGW route)
+ ec2_vpc_route_table:
+ state: absent
+ lookup: id
+ route_table_id: '{{ rtb_igw_id }}'
+ - assert:
+ that:
+ - rtb_delete is changed
+
+ - name: Delete route table endpoint by ID
+ ec2_vpc_endpoint:
+ state: absent
+ vpc_endpoint_id: '{{ rtb_endpoint_id }}'
+ register: endpoint_delete_check
+ - assert:
+ that:
+ - endpoint_delete_check is changed
+
+ - name: Delete minimal endpoint by ID - idempotency (check_mode)
+ ec2_vpc_endpoint:
+ state: absent
+ vpc_endpoint_id: '{{ rtb_endpoint_id }}'
+ check_mode: true
+ register: endpoint_delete_check
+ - assert:
+ that:
+ - endpoint_delete_check is not changed
+
+ - name: Delete endpoint by ID - idempotency
+ ec2_vpc_endpoint:
+ state: absent
+ vpc_endpoint_id: '{{ endpoint_id }}'
+ register: endpoint_delete_check
+ - assert:
+ that:
+ - endpoint_delete_check is not changed
+
+ - name: Create interface endpoint
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ service: '{{ endpoint_service_a }}'
+ vpc_endpoint_type: Interface
+ register: create_interface_endpoint
+ - name: Check that the interface endpoint was created properly
+ assert:
+ that:
+ - create_interface_endpoint is changed
+ - create_interface_endpoint.result.vpc_endpoint_type == "Interface"
+ - name: Delete interface endpoint
+ ec2_vpc_endpoint:
+ state: absent
+ vpc_endpoint_id: '{{ create_interface_endpoint.result.vpc_endpoint_id }}'
+ register: interface_endpoint_delete_check
+ - assert:
+ that:
+ - interface_endpoint_delete_check is changed
+
+ - name: Create a subnet
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ az: "{{ aws_region}}a"
+ cidr: "{{ vpc_cidr }}"
+ register: interface_endpoint_create_subnet_check
+
+ - name: Create a security group
+ ec2_group:
+ name: securitygroup-prodext
+ description: "security group for Ansible interface endpoint"
+ state: present
+ vpc_id: "{{ vpc.vpc.id }}"
+ rules:
+ - proto: tcp
+ from_port: 1
+ to_port: 65535
+ cidr_ip: 0.0.0.0/0
+ register: interface_endpoint_create_sg_check
+
+ - name: Create interface endpoint attached to a subnet
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ service: '{{ endpoint_service_a }}'
+ vpc_endpoint_type: Interface
+ vpc_endpoint_subnets: "{{ interface_endpoint_create_subnet_check.subnet.id') }}"
+ vpc_endpoint_security_groups: "{{ interface_endpoint_create_sg_check.group_id }}"
+ register: create_interface_endpoint_with_sg_subnets
+ - name: Check that the interface endpoint was created properly
+ assert:
+ that:
+ - create_interface_endpoint_with_sg_subnets is changed
+ - create_interface_endpoint_with_sg_subnets.result.vpc_endpoint_type == "Interface"
+
+ - name: Delete interface endpoint
+ ec2_vpc_endpoint:
+ state: absent
+ vpc_endpoint_id: "{{ create_interface_endpoint_with_sg_subnets.result.vpc_endpoint_id }}"
+ register: create_interface_endpoint_with_sg_subnets_delete_check
+ - assert:
+ that:
+ - create_interface_endpoint_with_sg_subnets_delete_check is changed
+
+ # ============================================================
+ # BEGIN POST-TEST CLEANUP
+ always:
+ # Delete the routes first - you can't delete an endpoint with a route
+ # attached.
+ - name: Delete minimal route table (no routes)
+ ec2_vpc_route_table:
+ state: absent
+ lookup: id
+ route_table_id: '{{ rtb_creation_empty.route_table.id }}'
+ ignore_errors: true
+
+ - name: Delete minimal route table (IGW route)
+ ec2_vpc_route_table:
+ state: absent
+ lookup: id
+ route_table_id: '{{ rtb_creation_igw.route_table.id }}'
+ ignore_errors: true
+
+ - name: Delete endpoint
+ ec2_vpc_endpoint:
+ state: absent
+ vpc_endpoint_id: '{{ create_endpoint.result.vpc_endpoint_id }}'
+ ignore_errors: true
+
+ - name: Delete endpoint
+ ec2_vpc_endpoint:
+ state: absent
+ vpc_endpoint_id: '{{ create_rtb_endpoint.result.vpc_endpoint_id }}'
+ ignore_errors: true
+
+ - name: Query any remain endpoints we created (idempotency work is ongoing) # FIXME
+ ec2_vpc_endpoint_info:
+ query: endpoints
+ filters:
+ vpc-id:
+ - '{{ vpc_id }}'
+ register: test_endpoints
+
+ - name: Delete all endpoints
+ ec2_vpc_endpoint:
+ state: absent
+ vpc_endpoint_id: '{{ item.vpc_endpoint_id }}'
+ with_items: '{{ test_endpoints.vpc_endpoints }}'
+ ignore_errors: true
+
+ - name: Remove IGW
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: '{{ vpc_id }}'
+ register: igw_deletion
+ retries: 10
+ delay: 5
+ until: igw_deletion is success
+ ignore_errors: yes
+
+ - name: Remove VPC
+ ec2_vpc_net:
+ state: absent
+ name: '{{ vpc_name }}'
+ cidr_block: '{{ vpc_cidr }}'
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/aliases
new file mode 100644
index 000000000..760a04f5d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+ec2_vpc_endpoint_service_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/defaults/main.yml
new file mode 100644
index 000000000..445cc7f3c
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/defaults/main.yml
@@ -0,0 +1,3 @@
+search_service_names:
+- 'com.amazonaws.{{ aws_region }}.s3'
+- 'com.amazonaws.{{ aws_region }}.ec2'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/tasks/main.yml
new file mode 100644
index 000000000..22b290a34
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/tasks/main.yml
@@ -0,0 +1,135 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ collections:
+ - amazon.aws
+ - community.aws
+ block:
+
+ - name: 'List all available services (Check Mode)'
+ ec2_vpc_endpoint_service_info:
+ check_mode: True
+ register: services_check
+
+ - name: 'Verify services (Check Mode)'
+ vars:
+ first_service: '{{ services_check.service_details[0] }}'
+ assert:
+ that:
+ - services_check is successful
+ - services_check is not changed
+ - '"service_names" in services_check'
+ - '"service_details" in services_check'
+ - '"acceptance_required" in first_service'
+ - '"availability_zones" in first_service'
+ - '"base_endpoint_dns_names" in first_service'
+ - '"manages_vpc_endpoints" in first_service'
+ - '"owner" in first_service'
+ - '"private_dns_name" in first_service'
+ - '"private_dns_name_verification_state" in first_service'
+ - '"service_id" in first_service'
+ - '"service_name" in first_service'
+ - '"service_type" in first_service'
+ - '"tags" in first_service'
+ - '"vpc_endpoint_policy_supported" in first_service'
+
+ - name: 'List all available services'
+ ec2_vpc_endpoint_service_info:
+ register: services_info
+
+ - name: 'Verify services'
+ vars:
+ first_service: '{{ services_info.service_details[0] }}'
+ assert:
+ that:
+ - services_info is successful
+ - services_info is not changed
+ - '"service_names" in services_info'
+ - '"service_details" in services_info'
+ - '"acceptance_required" in first_service'
+ - '"availability_zones" in first_service'
+ - '"base_endpoint_dns_names" in first_service'
+ - '"manages_vpc_endpoints" in first_service'
+ - '"owner" in first_service'
+ - '"private_dns_name" in first_service'
+ - '"private_dns_name_verification_state" in first_service'
+ - '"service_id" in first_service'
+ - '"service_name" in first_service'
+ - '"service_type" in first_service'
+ - '"tags" in first_service'
+ - '"vpc_endpoint_policy_supported" in first_service'
+
+ - name: 'Limit services by name'
+ ec2_vpc_endpoint_service_info:
+ service_names: '{{ search_service_names }}'
+ register: services_info
+
+ - name: 'Verify services'
+ vars:
+ first_service: '{{ services_info.service_details[0] }}'
+ # The same service sometimes pop up twice. s3 for example has
+ # s3.us-east-1.amazonaws.com and s3.us-east-1.vpce.amazonaws.com which are
+ # part of com.amazonaws.us-east-1.s3 so we need to run the results through
+ # the unique filter to know if we've got what we think we have
+ unique_names: '{{ services_info.service_names | unique | list }}'
+ unique_detail_names: '{{ services_info.service_details | map(attribute="service_name") | unique | list }}'
+ assert:
+ that:
+ - services_info is successful
+ - services_info is not changed
+ - '"service_names" in services_info'
+ - (unique_names | length) == (search_service_names | length)
+ - (unique_detail_names | length ) == (search_service_names | length)
+ - (unique_names | difference(search_service_names) | length) == 0
+ - (unique_detail_names | difference(search_service_names) | length) == 0
+ - '"service_details" in services_info'
+ - '"acceptance_required" in first_service'
+ - '"availability_zones" in first_service'
+ - '"base_endpoint_dns_names" in first_service'
+ - '"manages_vpc_endpoints" in first_service'
+ - '"owner" in first_service'
+ - '"private_dns_name" in first_service'
+ - '"private_dns_name_verification_state" in first_service'
+ - '"service_id" in first_service'
+ - '"service_name" in first_service'
+ - '"service_type" in first_service'
+ - '"tags" in first_service'
+ - '"vpc_endpoint_policy_supported" in first_service'
+
+ - name: 'Grab single service details to test filters'
+ set_fact:
+ example_service: '{{ services_info.service_details[0] }}'
+
+ - name: 'Limit services by filter'
+ ec2_vpc_endpoint_service_info:
+ filters:
+ service-name: '{{ example_service.service_name }}'
+ register: filtered_service
+
+ - name: 'Verify services'
+ vars:
+ first_service: '{{ filtered_service.service_details[0] }}'
+ assert:
+ that:
+ - filtered_service is successful
+ - filtered_service is not changed
+ - '"service_names" in filtered_service'
+ - filtered_service.service_names | length == 1
+ - '"service_details" in filtered_service'
+ - filtered_service.service_details | length == 1
+ - '"acceptance_required" in first_service'
+ - '"availability_zones" in first_service'
+ - '"base_endpoint_dns_names" in first_service'
+ - '"manages_vpc_endpoints" in first_service'
+ - '"owner" in first_service'
+ - '"private_dns_name" in first_service'
+ - '"private_dns_name_verification_state" in first_service'
+ - '"service_id" in first_service'
+ - '"service_name" in first_service'
+ - '"service_type" in first_service'
+ - '"tags" in first_service'
+ - '"vpc_endpoint_policy_supported" in first_service'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/aliases
new file mode 100644
index 000000000..877a442d7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+
+ec2_vpc_igw_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/defaults/main.yml
new file mode 100644
index 000000000..a4590b4c0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/defaults/main.yml
@@ -0,0 +1,3 @@
+vpc_name: '{{ resource_prefix }}-vpc'
+vpc_seed: '{{ resource_prefix }}'
+vpc_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.0.0/16
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/tasks/main.yml
new file mode 100644
index 000000000..05b15d0b7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/tasks/main.yml
@@ -0,0 +1,550 @@
+- name: ec2_vpc_igw tests
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ # ============================================================
+ - name: Fetch IGWs in check_mode
+ ec2_vpc_igw_info:
+ register: igw_info
+ check_mode: true
+ - name: Assert success
+ assert:
+ that:
+ - igw_info is successful
+ - '"internet_gateways" in igw_info'
+
+ # ============================================================
+ - name: Create a VPC
+ ec2_vpc_net:
+ name: '{{ vpc_name }}'
+ state: present
+ cidr_block: '{{ vpc_cidr }}'
+ tags:
+ Name: '{{ resource_prefix }}-vpc'
+ Description: Created by ansible-test
+ register: vpc_result
+ - name: Assert success
+ assert:
+ that:
+ - vpc_result is successful
+ - '"vpc" in vpc_result'
+ - '"id" in vpc_result.vpc'
+ - vpc_result.vpc.state == 'available'
+ - '"tags" in vpc_result.vpc'
+ - vpc_result.vpc.tags | length == 2
+ - vpc_result.vpc.tags["Name"] == "{{ resource_prefix }}-vpc"
+ - vpc_result.vpc.tags["Description"] == "Created by ansible-test"
+
+ # ============================================================
+ - name: Search for internet gateway by VPC - no matches
+ ec2_vpc_igw_info:
+ filters:
+ attachment.vpc-id: '{{ vpc_result.vpc.id }}'
+ register: igw_info
+
+ - name: Assert success
+ assert:
+ that:
+ - igw_info is successful
+ - '"internet_gateways" in igw_info'
+ - (igw_info.internet_gateways | length) == 0
+
+ # ============================================================
+ - name: Create internet gateway (expected changed=true) - CHECK_MODE
+ ec2_vpc_igw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ Tag Two: two {{ resource_prefix }}
+ register: vpc_igw_create
+ check_mode: yes
+
+ - name: Assert creation would happen (expected changed=true) - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_create is changed
+
+ - name: Create internet gateway (expected changed=true)
+ ec2_vpc_igw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ Tag Two: two {{ resource_prefix }}
+ register: vpc_igw_create
+
+ - name: Assert creation happened (expected changed=true)
+ assert:
+ that:
+ - vpc_igw_create is changed
+ - vpc_igw_create.gateway_id.startswith("igw-")
+ - vpc_igw_create.vpc_id == vpc_result.vpc.id
+ - '"tags" in vpc_igw_create'
+ - vpc_igw_create.tags | length == 2
+ - vpc_igw_create.tags["tag_one"] == '{{ resource_prefix }} One'
+ - vpc_igw_create.tags["Tag Two"] == 'two {{ resource_prefix }}'
+ - '"gateway_id" in vpc_igw_create'
+
+ # ============================================================
+ - name: Save IDs for later
+ set_fact:
+ igw_id: '{{ vpc_igw_create.gateway_id }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+
+ - name: Search for internet gateway by VPC
+ ec2_vpc_igw_info:
+ filters:
+ attachment.vpc-id: '{{ vpc_id }}'
+ convert_tags: false
+ register: igw_info
+
+ - name: Check standard IGW details
+ assert:
+ that:
+ - '"internet_gateways" in igw_info'
+ - igw_info.internet_gateways | length == 1
+ - '"attachments" in current_igw'
+ - current_igw.attachments | length == 1
+ - '"state" in current_igw.attachments[0]'
+ - current_igw.attachments[0].state == "available"
+ - '"vpc_id" in current_igw.attachments[0]'
+ - current_igw.attachments[0].vpc_id == vpc_id
+ - '"internet_gateway_id" in current_igw'
+ - current_igw.internet_gateway_id == igw_id
+ - '"tags" in current_igw'
+ - current_igw.tags | length == 2
+ - '"key" in current_igw.tags[0]'
+ - '"value" in current_igw.tags[0]'
+ - '"key" in current_igw.tags[1]'
+ - '"value" in current_igw.tags[1]'
+ # Order isn't guaranteed in boto3 style, so just check the keys and
+ # values we expect are in there.
+ - current_igw.tags[0].key in ["tag_one", "Tag Two"]
+ - current_igw.tags[1].key in ["tag_one", "Tag Two"]
+ - current_igw.tags[0].value in [resource_prefix + " One", "two " + resource_prefix]
+ - current_igw.tags[1].value in [resource_prefix + " One", "two " + resource_prefix]
+ vars:
+ current_igw: '{{ igw_info.internet_gateways[0] }}'
+
+ - name: Fetch IGW by ID
+ ec2_vpc_igw_info:
+ internet_gateway_ids: '{{ igw_id }}'
+ register: igw_info
+
+ - name: Check standard IGW details
+ assert:
+ that:
+ - '"internet_gateways" in igw_info'
+ - igw_info.internet_gateways | length == 1
+ - '"attachments" in current_igw'
+ - current_igw.attachments | length == 1
+ - '"state" in current_igw.attachments[0]'
+ - current_igw.attachments[0].state == "available"
+ - '"vpc_id" in current_igw.attachments[0]'
+ - current_igw.attachments[0].vpc_id == vpc_id
+ - '"internet_gateway_id" in current_igw'
+ - current_igw.internet_gateway_id == igw_id
+ - '"tags" in current_igw'
+ - current_igw.tags | length == 2
+ - '"tag_one" in current_igw.tags'
+ - '"Tag Two" in current_igw.tags'
+ - current_igw.tags["tag_one"] == '{{ resource_prefix }} One'
+ - current_igw.tags["Tag Two"] == 'two {{ resource_prefix }}'
+ vars:
+ current_igw: '{{ igw_info.internet_gateways[0] }}'
+
+ - name: Fetch IGW by ID (list)
+ ec2_vpc_igw_info:
+ internet_gateway_ids:
+ - '{{ igw_id }}'
+ register: igw_info
+
+ - name: Check standard IGW details
+ assert:
+ that:
+ - '"internet_gateways" in igw_info'
+ - igw_info.internet_gateways | length == 1
+ - '"attachments" in current_igw'
+ - current_igw.attachments | length == 1
+ - '"state" in current_igw.attachments[0]'
+ - current_igw.attachments[0].state == "available"
+ - '"vpc_id" in current_igw.attachments[0]'
+ - current_igw.attachments[0].vpc_id == vpc_id
+ - '"internet_gateway_id" in current_igw'
+ - current_igw.internet_gateway_id == igw_id
+ - '"tags" in current_igw'
+ vars:
+ current_igw: '{{ igw_info.internet_gateways[0] }}'
+
+ - name: Attempt to recreate internet gateway on VPC (expected changed=false) - CHECK_MODE
+ ec2_vpc_igw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: vpc_igw_recreate
+ check_mode: yes
+
+ - name: Assert recreation would do nothing (expected changed=false) - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_recreate is not changed
+ - vpc_igw_recreate.gateway_id == igw_id
+ - vpc_igw_recreate.vpc_id == vpc_id
+ - '"tags" in vpc_igw_create'
+ - vpc_igw_create.tags | length == 2
+ - vpc_igw_create.tags["tag_one"] == '{{ resource_prefix }} One'
+ - vpc_igw_create.tags["Tag Two"] == 'two {{ resource_prefix }}'
+
+ - name: Attempt to recreate internet gateway on VPC (expected changed=false)
+ ec2_vpc_igw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: vpc_igw_recreate
+
+ - name: Assert recreation did nothing (expected changed=false)
+ assert:
+ that:
+ - vpc_igw_recreate is not changed
+ - vpc_igw_recreate.gateway_id == igw_id
+ - vpc_igw_recreate.vpc_id == vpc_id
+ - '"tags" in vpc_igw_create'
+ - vpc_igw_create.tags | length == 2
+ - vpc_igw_create.tags["tag_one"] == '{{ resource_prefix }} One'
+ - vpc_igw_create.tags["Tag Two"] == 'two {{ resource_prefix }}'
+
+ # ============================================================
+ - name: Update the tags (no change) - CHECK_MODE
+ ec2_vpc_igw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ Tag Two: two {{ resource_prefix }}
+ register: vpc_igw_recreate
+ check_mode: yes
+
+ - name: Assert tag update would do nothing (expected changed=false) - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_recreate is not changed
+ - vpc_igw_recreate.gateway_id == igw_id
+ - vpc_igw_recreate.vpc_id == vpc_id
+ - '"tags" in vpc_igw_recreate'
+ - vpc_igw_recreate.tags | length == 2
+ - vpc_igw_recreate.tags["tag_one"] == '{{ resource_prefix }} One'
+ - vpc_igw_recreate.tags["Tag Two"] == 'two {{ resource_prefix }}'
+
+ - name: Update the tags (no change)
+ ec2_vpc_igw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ Tag Two: two {{ resource_prefix }}
+ register: vpc_igw_recreate
+
+ - name: Assert tag update did nothing (expected changed=false)
+ assert:
+ that:
+ - vpc_igw_recreate is not changed
+ - vpc_igw_recreate.gateway_id == igw_id
+ - vpc_igw_recreate.vpc_id == vpc_id
+ - '"tags" in vpc_igw_recreate'
+ - vpc_igw_recreate.tags | length == 2
+ - vpc_igw_recreate.tags["tag_one"] == '{{ resource_prefix }} One'
+ - vpc_igw_recreate.tags["Tag Two"] == 'two {{ resource_prefix }}'
+
+ # ============================================================
+ - name: Update the tags (remove and add) - CHECK_MODE
+ ec2_vpc_igw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ tags:
+ tag_three: '{{ resource_prefix }} Three'
+ Tag Two: two {{ resource_prefix }}
+ register: vpc_igw_update
+ check_mode: yes
+
+ - name: Assert tag update would happen (expected changed=true) - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_update is changed
+ - vpc_igw_update.gateway_id == igw_id
+ - vpc_igw_update.vpc_id == vpc_id
+ - '"tags" in vpc_igw_update'
+ - vpc_igw_update.tags | length == 2
+
+ - name: Update the tags (remove and add)
+ ec2_vpc_igw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ tags:
+ tag_three: '{{ resource_prefix }} Three'
+ Tag Two: two {{ resource_prefix }}
+ register: vpc_igw_update
+
+ - name: Assert tags are updated (expected changed=true)
+ assert:
+ that:
+ - vpc_igw_update is changed
+ - vpc_igw_update.gateway_id == igw_id
+ - vpc_igw_update.vpc_id == vpc_id
+ - '"tags" in vpc_igw_update'
+ - vpc_igw_update.tags | length == 2
+ - vpc_igw_update.tags["tag_three"] == '{{ resource_prefix }} Three'
+ - vpc_igw_update.tags["Tag Two"] == 'two {{ resource_prefix }}'
+
+ # ============================================================
+ - name: Update the tags add without purge - CHECK_MODE
+ ec2_vpc_igw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_tags: no
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ register: vpc_igw_update
+ check_mode: yes
+
+ - name: Assert tags would be added - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_update is changed
+ - vpc_igw_update.gateway_id == igw_id
+ - vpc_igw_update.vpc_id == vpc_id
+ - '"tags" in vpc_igw_update'
+
+ - name: Update the tags add without purge
+ ec2_vpc_igw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_tags: no
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ register: vpc_igw_update
+
+ - name: Assert tags added
+ assert:
+ that:
+ - vpc_igw_update is changed
+ - vpc_igw_update.gateway_id == igw_id
+ - vpc_igw_update.vpc_id == vpc_id
+ - '"tags" in vpc_igw_update'
+ - vpc_igw_update.tags | length == 3
+ - vpc_igw_update.tags["tag_one"] == '{{ resource_prefix }} One'
+ - vpc_igw_update.tags["tag_three"] == '{{ resource_prefix }} Three'
+ - vpc_igw_update.tags["Tag Two"] == 'two {{ resource_prefix }}'
+
+
+ # ============================================================
+ - name: Update with CamelCase tags - CHECK_MODE
+ ec2_vpc_igw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ tags:
+ lowercase spaced: "hello cruel world"
+ Title Case: "Hello Cruel World"
+ CamelCase: "SimpleCamelCase"
+ snake_case: "simple_snake_case"
+ register: vpc_igw_update
+ check_mode: yes
+
+ - name: Assert tag update would happen (expected changed=true) - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_update is changed
+ - vpc_igw_update.gateway_id == igw_id
+ - vpc_igw_update.vpc_id == vpc_id
+ - '"tags" in vpc_igw_update'
+
+ - name: Update the tags - remove and add
+ ec2_vpc_igw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ tags:
+ lowercase spaced: "hello cruel world"
+ Title Case: "Hello Cruel World"
+ CamelCase: "SimpleCamelCase"
+ snake_case: "simple_snake_case"
+ register: vpc_igw_update
+
+ - name: assert tags are updated (expected changed=true)
+ assert:
+ that:
+ - vpc_igw_update is changed
+ - vpc_igw_update.gateway_id == igw_id
+ - vpc_igw_update.vpc_id == vpc_id
+ - '"tags" in vpc_igw_update'
+ - vpc_igw_update.tags | length == 4
+ - vpc_igw_update.tags["lowercase spaced"] == 'hello cruel world'
+ - vpc_igw_update.tags["Title Case"] == 'Hello Cruel World'
+ - vpc_igw_update.tags["CamelCase"] == 'SimpleCamelCase'
+ - vpc_igw_update.tags["snake_case"] == 'simple_snake_case'
+
+ # ============================================================
+ - name: Gather information about a filtered list of Internet Gateways using tags
+ ec2_vpc_igw_info:
+ filters:
+ tag:Title Case: "Hello Cruel World"
+ register: igw_info
+
+ - name: Assert success
+ assert:
+ that:
+ - igw_info is successful
+ - '"internet_gateways" in igw_info'
+ - igw_info.internet_gateways | selectattr("internet_gateway_id",'equalto',"{{
+ igw_id }}")
+
+ - name: Gather information about a filtered list of Internet Gateways using tags - CHECK_MODE
+ ec2_vpc_igw_info:
+ filters:
+ tag:Title Case: "Hello Cruel World"
+ register: igw_info
+ check_mode: yes
+
+ - name: Assert success - CHECK_MODE
+ assert:
+ that:
+ - igw_info is successful
+ - '"internet_gateways" in igw_info'
+ - igw_info.internet_gateways | selectattr("internet_gateway_id",'equalto',"{{
+ igw_id }}")
+
+ # ============================================================
+ - name: Gather information about a filtered list of Internet Gateways using tags (no match)
+ ec2_vpc_igw_info:
+ filters:
+ tag:tag_one: '{{ resource_prefix }} One'
+ register: igw_info
+
+ - name: Assert success
+ assert:
+ that:
+ - igw_info is successful
+ - '"internet_gateways" in igw_info'
+ - igw_info.internet_gateways | length == 0
+
+ - name: Gather information about a filtered list of Internet Gateways using tags (no match) - CHECK_MODE
+ ec2_vpc_igw_info:
+ filters:
+ tag:tag_one: '{{ resource_prefix }} One'
+ register: igw_info
+ check_mode: yes
+
+ - name: Assert success - CHECK_MODE
+ assert:
+ that:
+ - igw_info is successful
+ - '"internet_gateways" in igw_info'
+ - igw_info.internet_gateways | length == 0
+
+ # ============================================================
+ - name: Remove all tags - CHECK_MODE
+ ec2_vpc_igw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ tags: {}
+ register: vpc_igw_update
+ check_mode: yes
+
+ - name: Assert tags would be removed - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_update is changed
+
+ - name: Remove all tags
+ ec2_vpc_igw:
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ tags: {}
+ register: vpc_igw_update
+
+ - name: Assert tags removed
+ assert:
+ that:
+ - vpc_igw_update is changed
+ - vpc_igw_update.gateway_id == igw_id
+ - vpc_igw_update.vpc_id == vpc_id
+ - '"tags" in vpc_igw_update'
+ - vpc_igw_update.tags | length == 0
+
+ # ============================================================
+ - name: Test state=absent (expected changed=true) - CHECK_MODE
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: vpc_igw_delete
+ check_mode: yes
+
+ - name: Assert state=absent (expected changed=true) - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_delete is changed
+
+ - name: Test state=absent (expected changed=true)
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: vpc_igw_delete
+
+ - name: Assert state=absent (expected changed=true)
+ assert:
+ that:
+ - vpc_igw_delete is changed
+
+ # ============================================================
+ - name: Fetch IGW by ID (list)
+ ec2_vpc_igw_info:
+ internet_gateway_ids:
+ - '{{ igw_id }}'
+ register: igw_info
+ ignore_errors: true
+
+ - name: Check IGW does not exist
+ assert:
+ that:
+ # Deliberate choice not to change bevahiour when searching by ID
+ - igw_info is failed
+
+ # ============================================================
+ - name: Test state=absent when already deleted (expected changed=false) - CHECK_MODE
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: vpc_igw_delete
+ check_mode: yes
+
+ - name: Assert state=absent (expected changed=false) - CHECK_MODE
+ assert:
+ that:
+ - vpc_igw_delete is not changed
+
+ - name: Test state=absent when already deleted (expected changed=false)
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: vpc_igw_delete
+
+ - name: Assert state=absent (expected changed=false)
+ assert:
+ that:
+ - vpc_igw_delete is not changed
+
+ always:
+ # ============================================================
+ - name: Tidy up IGW
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ ignore_errors: true
+
+ - name: Tidy up VPC
+ ec2_vpc_net:
+ name: '{{ vpc_name }}'
+ state: absent
+ cidr_block: '{{ vpc_cidr }}'
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/aliases
new file mode 100644
index 000000000..5a9dd5bcd
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/aliases
@@ -0,0 +1,5 @@
+time=10m
+
+cloud/aws
+
+ec2_vpc_nat_gateway_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/defaults/main.yml
new file mode 100644
index 000000000..3794da102
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/defaults/main.yml
@@ -0,0 +1,4 @@
+vpc_name: '{{ resource_prefix }}-vpc'
+vpc_seed: '{{ resource_prefix }}'
+vpc_cidr: 10.0.0.0/16
+subnet_cidr: 10.0.{{ 256 | random(seed=vpc_seed) }}.0/24
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml
new file mode 100644
index 000000000..501cccaf9
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml
@@ -0,0 +1,978 @@
+- name: ec2_vpc_nat_gateway tests
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ # ============================================================
+ - name: Create a VPC
+ ec2_vpc_net:
+ name: '{{ vpc_name }}'
+ state: present
+ cidr_block: '{{ vpc_cidr }}'
+ register: vpc_result
+
+ - name: Assert success
+ assert:
+ that:
+ - vpc_result is successful
+ - '"vpc" in vpc_result'
+ - '"cidr_block" in vpc_result.vpc'
+ - vpc_result.vpc.cidr_block == vpc_cidr
+ - '"id" in vpc_result.vpc'
+ - vpc_result.vpc.id.startswith("vpc-")
+ - '"state" in vpc_result.vpc'
+ - vpc_result.vpc.state == 'available'
+ - '"tags" in vpc_result.vpc'
+
+ - name: 'Set fact: VPC ID'
+ set_fact:
+ vpc_id: '{{ vpc_result.vpc.id }}'
+
+
+ # ============================================================
+ - name: Allocate a new EIP
+ ec2_eip:
+ in_vpc: true
+ reuse_existing_ip_allowed: true
+ tag_name: FREE
+ register: eip_result
+
+ - name: Assert success
+ assert:
+ that:
+ - eip_result is successful
+ - '"allocation_id" in eip_result'
+ - eip_result.allocation_id.startswith("eipalloc-")
+ - '"public_ip" in eip_result'
+
+ - name: 'set fact: EIP allocation ID and EIP public IP'
+ set_fact:
+ eip_address: '{{ eip_result.public_ip }}'
+ allocation_id: '{{ eip_result.allocation_id }}'
+
+
+ # ============================================================
+ - name: Create subnet and associate to the VPC
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ cidr: '{{ subnet_cidr }}'
+ register: subnet_result
+
+ - name: Assert success
+ assert:
+ that:
+ - subnet_result is successful
+ - '"subnet" in subnet_result'
+ - '"cidr_block" in subnet_result.subnet'
+ - subnet_result.subnet.cidr_block == subnet_cidr
+ - '"id" in subnet_result.subnet'
+ - subnet_result.subnet.id.startswith("subnet-")
+ - '"state" in subnet_result.subnet'
+ - subnet_result.subnet.state == 'available'
+ - '"tags" in subnet_result.subnet'
+ - subnet_result.subnet.vpc_id == vpc_id
+
+ - name: 'set fact: VPC subnet ID'
+ set_fact:
+ subnet_id: '{{ subnet_result.subnet.id }}'
+
+
+ # ============================================================
+ - name: Search for NAT gateways by subnet (no matches) - CHECK_MODE
+ ec2_vpc_nat_gateway_info:
+ filters:
+ subnet-id: '{{ subnet_id }}'
+ state: [available]
+ register: existing_ngws
+ check_mode: yes
+
+ - name: Assert no NAT gateway found - CHECK_MODE
+ assert:
+ that:
+ - existing_ngws is successful
+ - (existing_ngws.result|length) == 0
+
+ - name: Search for NAT gateways by subnet - no matches
+ ec2_vpc_nat_gateway_info:
+ filters:
+ subnet-id: '{{ subnet_id }}'
+ state: [available]
+ register: existing_ngws
+
+ - name: Assert no NAT gateway found
+ assert:
+ that:
+ - existing_ngws is successful
+ - (existing_ngws.result|length) == 0
+
+
+ # ============================================================
+ - name: Create IGW
+ ec2_vpc_igw:
+ vpc_id: '{{ vpc_id }}'
+ register: create_igw
+
+ - name: Assert success
+ assert:
+ that:
+ - create_igw is successful
+ - create_igw.gateway_id.startswith("igw-")
+ - create_igw.vpc_id == vpc_id
+ - '"gateway_id" in create_igw'
+
+
+ # ============================================================
+ - name: Create new NAT gateway with eip allocation-id - CHECK_MODE
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ allocation_id: '{{ allocation_id }}'
+ wait: yes
+ register: create_ngw
+ check_mode: yes
+
+ - name: Assert creation happened (expected changed=true) - CHECK_MODE
+ assert:
+ that:
+ - create_ngw.changed
+
+ - name: Create new NAT gateway with eip allocation-id
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ allocation_id: '{{ allocation_id }}'
+ wait: yes
+ register: create_ngw
+
+ - name: Assert creation happened (expected changed=true)
+ assert:
+ that:
+ - create_ngw.changed
+ - '"create_time" in create_ngw'
+ - '"nat_gateway_addresses" in create_ngw'
+ - '"nat_gateway_id" in create_ngw'
+ - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id
+ - create_ngw.nat_gateway_id.startswith("nat-")
+ - '"state" in create_ngw'
+ - create_ngw.state == 'available'
+ - '"subnet_id" in create_ngw'
+ - create_ngw.subnet_id == subnet_id
+ - '"tags" in create_ngw'
+ - '"vpc_id" in create_ngw'
+ - create_ngw.vpc_id == vpc_id
+
+ - name: 'set facts: NAT gateway ID'
+ set_fact:
+ nat_gateway_id: '{{ create_ngw.nat_gateway_id }}'
+ network_interface_id: '{{ create_ngw.nat_gateway_addresses[0].network_interface_id }}'
+
+
+ # ============================================================
+ - name: Get NAT gateway with specific filters (state and subnet)
+ ec2_vpc_nat_gateway_info:
+ filters:
+ subnet-id: '{{ subnet_id }}'
+ state: [available]
+ register: avalaible_ngws
+
+ - name: Assert success
+ assert:
+ that:
+ - avalaible_ngws is successful
+ - avalaible_ngws.result | length == 1
+ - '"create_time" in first_ngw'
+ - '"nat_gateway_addresses" in first_ngw'
+ - '"nat_gateway_id" in first_ngw'
+ - first_ngw.nat_gateway_id == nat_gateway_id
+ - '"state" in first_ngw'
+ - first_ngw.state == 'available'
+ - '"subnet_id" in first_ngw'
+ - first_ngw.subnet_id == subnet_id
+ - '"tags" in first_ngw'
+ - '"vpc_id" in first_ngw'
+ - first_ngw.vpc_id == vpc_id
+ vars:
+ first_ngw: '{{ avalaible_ngws.result[0] }}'
+
+ # ============================================================
+ - name: Trying this again for idempotency - create new NAT gateway with eip allocation-id
+ - CHECK_MODE
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ allocation_id: '{{ allocation_id }}'
+ wait: yes
+ register: create_ngw
+ check_mode: yes
+
+ - name: Assert recreation would do nothing (expected changed=false) - CHECK_MODE
+ assert:
+ that:
+ - not create_ngw.changed
+ - '"create_time" in create_ngw'
+ - '"nat_gateway_addresses" in create_ngw'
+ - '"nat_gateway_id" in create_ngw'
+ - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id
+ - create_ngw.nat_gateway_id.startswith("nat-")
+ - '"state" in create_ngw'
+ - create_ngw.state == 'available'
+ - '"subnet_id" in create_ngw'
+ - create_ngw.subnet_id == subnet_id
+ - '"tags" in create_ngw'
+ - '"vpc_id" in create_ngw'
+ - create_ngw.vpc_id == vpc_id
+
+ - name: Trying this again for idempotency - create new NAT gateway with eip allocation-id
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ allocation_id: '{{ allocation_id }}'
+ wait: yes
+ register: create_ngw
+
+ - name: Assert recreation would do nothing (expected changed=false)
+ assert:
+ that:
+ - not create_ngw.changed
+ - '"create_time" in create_ngw'
+ - '"nat_gateway_addresses" in create_ngw'
+ - '"nat_gateway_id" in create_ngw'
+ - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id
+ - create_ngw.nat_gateway_id.startswith("nat-")
+ - '"state" in create_ngw'
+ - create_ngw.state == 'available'
+ - '"subnet_id" in create_ngw'
+ - create_ngw.subnet_id == subnet_id
+ - '"tags" in create_ngw'
+ - '"vpc_id" in create_ngw'
+ - create_ngw.vpc_id == vpc_id
+
+
+ # ============================================================
+ - name: Create new NAT gateway only if one does not exist already - CHECK_MODE
+ ec2_vpc_nat_gateway:
+ if_exist_do_not_create: yes
+ subnet_id: '{{ subnet_id }}'
+ wait: yes
+ register: create_ngw
+ check_mode: yes
+
+ - name: Assert recreation would do nothing (expected changed=false) - CHECK_MODE
+ assert:
+ that:
+ - not create_ngw.changed
+ - '"create_time" in create_ngw'
+ - '"nat_gateway_addresses" in create_ngw'
+ - '"nat_gateway_id" in create_ngw'
+ - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id
+ - create_ngw.nat_gateway_id.startswith("nat-")
+ - '"state" in create_ngw'
+ - create_ngw.state == 'available'
+ - '"subnet_id" in create_ngw'
+ - create_ngw.subnet_id == subnet_id
+ - '"tags" in create_ngw'
+ - '"vpc_id" in create_ngw'
+ - create_ngw.vpc_id == vpc_id
+
+ - name: Create new NAT gateway only if one does not exist already
+ ec2_vpc_nat_gateway:
+ if_exist_do_not_create: yes
+ subnet_id: '{{ subnet_id }}'
+ wait: yes
+ register: create_ngw
+
+ - name: Assert recreation would do nothing (expected changed=false)
+ assert:
+ that:
+ - not create_ngw.changed
+ - '"create_time" in create_ngw'
+ - '"nat_gateway_addresses" in create_ngw'
+ - '"nat_gateway_id" in create_ngw'
+ - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id
+ - create_ngw.nat_gateway_id.startswith("nat-")
+ - '"state" in create_ngw'
+ - create_ngw.state == 'available'
+ - '"subnet_id" in create_ngw'
+ - create_ngw.subnet_id == subnet_id
+ - '"tags" in create_ngw'
+ - '"vpc_id" in create_ngw'
+ - create_ngw.vpc_id == vpc_id
+
+
+ # ============================================================
+ - name: Allocate a new EIP
+ ec2_eip:
+ in_vpc: true
+ reuse_existing_ip_allowed: true
+ tag_name: FREE
+ register: eip_result
+
+ - name: Assert success
+ assert:
+ that:
+ - eip_result is successful
+ - '"allocation_id" in eip_result'
+ - eip_result.allocation_id.startswith("eipalloc-")
+ - '"public_ip" in eip_result'
+
+ - name: 'Set fact: EIP allocation ID and EIP public IP'
+ set_fact:
+ second_eip_address: '{{ eip_result.public_ip }}'
+ second_allocation_id: '{{ eip_result.allocation_id }}'
+
+
+ # ============================================================
+ - name: Create new nat gateway with eip address - CHECK_MODE
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ eip_address: '{{ second_eip_address }}'
+ wait: yes
+ register: create_ngw
+ check_mode: yes
+
+ - name: Assert creation happened (expected changed=true) - CHECK_MODE
+ assert:
+ that:
+ - create_ngw.changed
+
+ - name: Create new NAT gateway with eip address
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ eip_address: '{{ second_eip_address }}'
+ wait: yes
+ register: create_ngw
+
+ - name: Assert creation happened (expected changed=true)
+ assert:
+ that:
+ - create_ngw.changed
+ - '"create_time" in create_ngw'
+ - '"nat_gateway_addresses" in create_ngw'
+ - '"nat_gateway_id" in create_ngw'
+ - create_ngw.nat_gateway_addresses[0].allocation_id == second_allocation_id
+ - create_ngw.nat_gateway_id.startswith("nat-")
+ - '"state" in create_ngw'
+ - create_ngw.state == 'available'
+ - '"subnet_id" in create_ngw'
+ - create_ngw.subnet_id == subnet_id
+ - '"tags" in create_ngw'
+ - '"vpc_id" in create_ngw'
+ - create_ngw.vpc_id == vpc_id
+
+
+ # ============================================================
+ - name: Trying this again for idempotency - create new NAT gateway with eip address - CHECK_MODE
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ eip_address: '{{ second_eip_address }}'
+ wait: yes
+ register: create_ngw
+ check_mode: yes
+
+ - name: Assert recreation would do nothing (expected changed=false) - CHECK_MODE
+ assert:
+ that:
+ - not create_ngw.changed
+ - '"create_time" in create_ngw'
+ - '"nat_gateway_addresses" in create_ngw'
+ - '"nat_gateway_id" in create_ngw'
+ - create_ngw.nat_gateway_addresses[0].allocation_id == second_allocation_id
+ - create_ngw.nat_gateway_id.startswith("nat-")
+ - '"state" in create_ngw'
+ - create_ngw.state == 'available'
+ - '"subnet_id" in create_ngw'
+ - create_ngw.subnet_id == subnet_id
+ - '"tags" in create_ngw'
+ - '"vpc_id" in create_ngw'
+ - create_ngw.vpc_id == vpc_id
+
+ - name: Trying this again for idempotency - create new NAT gateway with eip address
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ eip_address: '{{ second_eip_address }}'
+ wait: yes
+ register: create_ngw
+
+ - name: Assert recreation would do nothing (expected changed=false)
+ assert:
+ that:
+ - not create_ngw.changed
+ - '"create_time" in create_ngw'
+ - '"nat_gateway_addresses" in create_ngw'
+ - '"nat_gateway_id" in create_ngw'
+ - create_ngw.nat_gateway_addresses[0].allocation_id == second_allocation_id
+ - create_ngw.nat_gateway_id.startswith("nat-")
+ - '"state" in create_ngw'
+ - create_ngw.state == 'available'
+ - '"subnet_id" in create_ngw'
+ - create_ngw.subnet_id == subnet_id
+ - '"tags" in create_ngw'
+ - '"vpc_id" in create_ngw'
+ - create_ngw.vpc_id == vpc_id
+
+
+ # ============================================================
+ - name: Fetch NAT gateway by ID (list)
+ ec2_vpc_nat_gateway_info:
+ nat_gateway_ids:
+ - '{{ nat_gateway_id }}'
+ register: ngw_info
+
+ - name: Check NAT gateway exists
+ assert:
+ that:
+ - ngw_info is successful
+ - ngw_info.result | length == 1
+ - '"create_time" in first_ngw'
+ - '"nat_gateway_addresses" in first_ngw'
+ - '"nat_gateway_id" in first_ngw'
+ - first_ngw.nat_gateway_id == nat_gateway_id
+ - '"state" in first_ngw'
+ - first_ngw.state == 'available'
+ - '"subnet_id" in first_ngw'
+ - first_ngw.subnet_id == subnet_id
+ - '"tags" in first_ngw'
+ - '"vpc_id" in first_ngw'
+ - first_ngw.vpc_id == vpc_id
+ vars:
+ first_ngw: '{{ ngw_info.result[0] }}'
+
+
+ # ============================================================
+ - name: Delete NAT gateway - CHECK_MODE
+ ec2_vpc_nat_gateway:
+ nat_gateway_id: '{{ nat_gateway_id }}'
+ state: absent
+ wait: yes
+ register: delete_nat_gateway
+ check_mode: yes
+
+ - name: Assert state=absent (expected changed=true) - CHECK_MODE
+ assert:
+ that:
+ - delete_nat_gateway.changed
+
+ - name: Delete NAT gateway
+ ec2_vpc_nat_gateway:
+ nat_gateway_id: '{{ nat_gateway_id }}'
+ state: absent
+ wait: yes
+ register: delete_nat_gateway
+
+ - name: Assert state=absent (expected changed=true)
+ assert:
+ that:
+ - delete_nat_gateway.changed
+ - '"delete_time" in delete_nat_gateway'
+ - '"nat_gateway_addresses" in delete_nat_gateway'
+ - '"nat_gateway_id" in delete_nat_gateway'
+ - delete_nat_gateway.nat_gateway_id == nat_gateway_id
+ - '"state" in delete_nat_gateway'
+ - delete_nat_gateway.state in ['deleted', 'deleting']
+ - '"subnet_id" in delete_nat_gateway'
+ - delete_nat_gateway.subnet_id == subnet_id
+ - '"tags" in delete_nat_gateway'
+ - '"vpc_id" in delete_nat_gateway'
+ - delete_nat_gateway.vpc_id == vpc_id
+
+
+ # ============================================================
+ - name: Create new NAT gateway with eip allocation-id and tags - CHECK_MODE
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ allocation_id: '{{ allocation_id }}'
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ Tag Two: two {{ resource_prefix }}
+ wait: yes
+ register: create_ngw
+ check_mode: yes
+
+ - name: Assert creation happened (expected changed=true) - CHECK_MODE
+ assert:
+ that:
+ - create_ngw.changed
+
+ - name: Create new NAT gateway with eip allocation-id and tags
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ allocation_id: '{{ allocation_id }}'
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ Tag Two: two {{ resource_prefix }}
+ wait: yes
+ register: create_ngw
+
+ - name: Assert creation happened (expected changed=true)
+ assert:
+ that:
+ - create_ngw.changed
+ - '"create_time" in create_ngw'
+ - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id
+ - '"nat_gateway_id" in create_ngw'
+ - create_ngw.nat_gateway_id.startswith("nat-")
+ - '"state" in create_ngw'
+ - create_ngw.state == 'available'
+ - '"subnet_id" in create_ngw'
+ - create_ngw.subnet_id == subnet_id
+ - '"tags" in create_ngw'
+ - create_ngw.tags | length == 2
+ - create_ngw.tags["tag_one"] == '{{ resource_prefix }} One'
+ - create_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}'
+ - '"vpc_id" in create_ngw'
+ - create_ngw.vpc_id == vpc_id
+ - create_ngw.connectivity_type == 'public'
+
+ - name: 'Set facts: NAT gateway ID'
+ set_fact:
+ ngw_id: '{{ create_ngw.nat_gateway_id }}'
+
+
+ # ============================================================
+ - name: Update the tags (no change) - CHECK_MODE
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ allocation_id: '{{ allocation_id }}'
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ Tag Two: two {{ resource_prefix }}
+ wait: yes
+ register: update_tags_ngw
+ check_mode: yes
+
+ - name: Assert tag update would do nothing (expected changed=false) - CHECK_MODE
+ assert:
+ that:
+ - not update_tags_ngw.changed
+ - '"nat_gateway_id" in update_tags_ngw'
+ - update_tags_ngw.nat_gateway_id == ngw_id
+ - '"subnet_id" in update_tags_ngw'
+ - update_tags_ngw.subnet_id == subnet_id
+ - '"tags" in update_tags_ngw'
+ - update_tags_ngw.tags | length == 2
+ - update_tags_ngw.tags["tag_one"] == '{{ resource_prefix }} One'
+ - update_tags_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}'
+ - '"vpc_id" in update_tags_ngw'
+ - update_tags_ngw.vpc_id == vpc_id
+
+ - name: Update the tags (no change)
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ allocation_id: '{{ allocation_id }}'
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ Tag Two: two {{ resource_prefix }}
+ wait: yes
+ register: update_tags_ngw
+
+ - name: Assert tag update would do nothing (expected changed=false)
+ assert:
+ that:
+ - not update_tags_ngw.changed
+ - '"nat_gateway_id" in update_tags_ngw'
+ - update_tags_ngw.nat_gateway_id == ngw_id
+ - '"subnet_id" in update_tags_ngw'
+ - update_tags_ngw.subnet_id == subnet_id
+ - '"tags" in update_tags_ngw'
+ - update_tags_ngw.tags | length == 2
+ - update_tags_ngw.tags["tag_one"] == '{{ resource_prefix }} One'
+ - update_tags_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}'
+ - '"vpc_id" in update_tags_ngw'
+ - update_tags_ngw.vpc_id == vpc_id
+
+
+ # ============================================================
+ - name: Gather information about a filtered list of NAT Gateways using tags and state - CHECK_MODE
+ ec2_vpc_nat_gateway_info:
+ filters:
+ tag:Tag Two: two {{ resource_prefix }}
+ state: [available]
+ register: ngw_info
+ check_mode: yes
+
+ - name: Assert success - CHECK_MODE
+ assert:
+ that:
+ - ngw_info is successful
+ - ngw_info.result | length == 1
+ - '"create_time" in second_ngw'
+ - '"nat_gateway_addresses" in second_ngw'
+ - '"nat_gateway_id" in second_ngw'
+ - second_ngw.nat_gateway_id == ngw_id
+ - '"state" in second_ngw'
+ - second_ngw.state == 'available'
+ - '"subnet_id" in second_ngw'
+ - second_ngw.subnet_id == subnet_id
+ - '"tags" in second_ngw'
+ - second_ngw.tags | length == 2
+ - '"tag_one" in second_ngw.tags'
+ - '"Tag Two" in second_ngw.tags'
+ - second_ngw.tags["tag_one"] == '{{ resource_prefix }} One'
+ - second_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}'
+ - '"vpc_id" in second_ngw'
+ - second_ngw.vpc_id == vpc_id
+ vars:
+ second_ngw: '{{ ngw_info.result[0] }}'
+
+ - name: Gather information about a filtered list of NAT Gateways using tags and state
+ ec2_vpc_nat_gateway_info:
+ filters:
+ tag:Tag Two: two {{ resource_prefix }}
+ state: [available]
+ register: ngw_info
+
+ - name: Assert success
+ assert:
+ that:
+ - ngw_info is successful
+ - ngw_info.result | length == 1
+ - '"create_time" in second_ngw'
+ - '"nat_gateway_addresses" in second_ngw'
+ - '"nat_gateway_id" in second_ngw'
+ - second_ngw.nat_gateway_id == ngw_id
+ - '"state" in second_ngw'
+ - second_ngw.state == 'available'
+ - '"subnet_id" in second_ngw'
+ - second_ngw.subnet_id == subnet_id
+ - '"tags" in second_ngw'
+ - second_ngw.tags | length == 2
+ - '"tag_one" in second_ngw.tags'
+ - '"Tag Two" in second_ngw.tags'
+ - second_ngw.tags["tag_one"] == '{{ resource_prefix }} One'
+ - second_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}'
+ - '"vpc_id" in second_ngw'
+ - second_ngw.vpc_id == vpc_id
+ vars:
+ second_ngw: '{{ ngw_info.result[0] }}'
+
+
+ # ============================================================
+ - name: Update the tags (remove and add) - CHECK_MODE
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ allocation_id: '{{ allocation_id }}'
+ tags:
+ tag_three: '{{ resource_prefix }} Three'
+ Tag Two: two {{ resource_prefix }}
+ wait: yes
+ register: update_tags_ngw
+ check_mode: yes
+
+ - name: Assert tag update would happen (expected changed=true) - CHECK_MODE
+ assert:
+ that:
+ - update_tags_ngw.changed
+ - '"nat_gateway_id" in update_tags_ngw'
+ - update_tags_ngw.nat_gateway_id == ngw_id
+ - '"subnet_id" in update_tags_ngw'
+ - update_tags_ngw.subnet_id == subnet_id
+ - '"tags" in update_tags_ngw'
+ - '"vpc_id" in update_tags_ngw'
+ - update_tags_ngw.vpc_id == vpc_id
+
+ - name: Update the tags (remove and add)
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ allocation_id: '{{ allocation_id }}'
+ tags:
+ tag_three: '{{ resource_prefix }} Three'
+ Tag Two: two {{ resource_prefix }}
+ wait: yes
+ register: update_tags_ngw
+
+ - name: Assert tag update would happen (expected changed=true)
+ assert:
+ that:
+ - update_tags_ngw.changed
+ - '"nat_gateway_id" in update_tags_ngw'
+ - update_tags_ngw.nat_gateway_id == ngw_id
+ - '"subnet_id" in update_tags_ngw'
+ - update_tags_ngw.subnet_id == subnet_id
+ - '"tags" in update_tags_ngw'
+ - update_tags_ngw.tags | length == 2
+ - update_tags_ngw.tags["tag_three"] == '{{ resource_prefix }} Three'
+ - update_tags_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}'
+ - '"vpc_id" in update_tags_ngw'
+ - update_tags_ngw.vpc_id == vpc_id
+
+
+ # ============================================================
+ - name: Gather information about a filtered list of NAT Gateways using tags and state (no match) - CHECK_MODE
+ ec2_vpc_nat_gateway_info:
+ filters:
+ tag:tag_one: '{{ resource_prefix }} One'
+ state: [available]
+ register: ngw_info
+ check_mode: yes
+
+ - name: Assert success - CHECK_MODE
+ assert:
+ that:
+ - ngw_info is successful
+ - ngw_info.result | length == 0
+
+ - name: Gather information about a filtered list of NAT Gateways using tags and
+ state (no match)
+ ec2_vpc_nat_gateway_info:
+ filters:
+ tag:tag_one: '{{ resource_prefix }} One'
+ state: [available]
+ register: ngw_info
+
+ - name: Assert success
+ assert:
+ that:
+ - ngw_info is successful
+ - ngw_info.result | length == 0
+
+
+ # ============================================================
+ - name: Update the tags add without purge - CHECK_MODE
+ ec2_vpc_nat_gateway:
+ if_exist_do_not_create: yes
+ subnet_id: '{{ subnet_id }}'
+ allocation_id: '{{ allocation_id }}'
+ purge_tags: no
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ wait: yes
+ register: update_tags_ngw
+ check_mode: yes
+
+ - name: Assert tags would be added - CHECK_MODE
+ assert:
+ that:
+ - update_tags_ngw.changed
+ - '"nat_gateway_id" in update_tags_ngw'
+ - update_tags_ngw.nat_gateway_id == ngw_id
+ - '"subnet_id" in update_tags_ngw'
+ - update_tags_ngw.subnet_id == subnet_id
+ - '"tags" in update_tags_ngw'
+ - '"vpc_id" in update_tags_ngw'
+ - update_tags_ngw.vpc_id == vpc_id
+
+ - name: Update the tags add without purge
+ ec2_vpc_nat_gateway:
+ if_exist_do_not_create: yes
+ subnet_id: '{{ subnet_id }}'
+ allocation_id: '{{ allocation_id }}'
+ purge_tags: no
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ wait: yes
+ register: update_tags_ngw
+
+ - name: Assert tags would be added
+ assert:
+ that:
+ - update_tags_ngw.changed
+ - '"nat_gateway_id" in update_tags_ngw'
+ - update_tags_ngw.nat_gateway_id == ngw_id
+ - '"subnet_id" in update_tags_ngw'
+ - update_tags_ngw.subnet_id == subnet_id
+ - '"tags" in update_tags_ngw'
+ - update_tags_ngw.tags | length == 3
+ - update_tags_ngw.tags["tag_one"] == '{{ resource_prefix }} One'
+ - update_tags_ngw.tags["tag_three"] == '{{ resource_prefix }} Three'
+ - update_tags_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}'
+ - '"vpc_id" in update_tags_ngw'
+ - update_tags_ngw.vpc_id == vpc_id
+
+
+ # ============================================================
+ - name: Remove all tags - CHECK_MODE
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ allocation_id: '{{ allocation_id }}'
+ tags: {}
+ register: delete_tags_ngw
+ check_mode: yes
+
+ - name: assert tags would be removed - CHECK_MODE
+ assert:
+ that:
+ - delete_tags_ngw.changed
+ - '"nat_gateway_id" in delete_tags_ngw'
+ - delete_tags_ngw.nat_gateway_id == ngw_id
+ - '"subnet_id" in delete_tags_ngw'
+ - delete_tags_ngw.subnet_id == subnet_id
+ - '"tags" in delete_tags_ngw'
+ - '"vpc_id" in delete_tags_ngw'
+ - delete_tags_ngw.vpc_id == vpc_id
+
+ - name: Remove all tags
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ allocation_id: '{{ allocation_id }}'
+ tags: {}
+ register: delete_tags_ngw
+
+ - name: Assert tags would be removed
+ assert:
+ that:
+ - delete_tags_ngw.changed
+ - '"nat_gateway_id" in delete_tags_ngw'
+ - delete_tags_ngw.nat_gateway_id == ngw_id
+ - '"subnet_id" in delete_tags_ngw'
+ - delete_tags_ngw.subnet_id == subnet_id
+ - '"tags" in delete_tags_ngw'
+ - delete_tags_ngw.tags | length == 0
+ - '"vpc_id" in delete_tags_ngw'
+ - delete_tags_ngw.vpc_id == vpc_id
+
+
+ # ============================================================
+ - name: Update with CamelCase tags - CHECK_MODE
+ ec2_vpc_nat_gateway:
+ if_exist_do_not_create: yes
+ subnet_id: '{{ subnet_id }}'
+ allocation_id: '{{ allocation_id }}'
+ purge_tags: no
+ tags:
+ lowercase spaced: "hello cruel world"
+ Title Case: "Hello Cruel World"
+ CamelCase: "SimpleCamelCase"
+ snake_case: "simple_snake_case"
+ wait: yes
+ register: update_tags_ngw
+ check_mode: yes
+
+ - name: Assert tags would be added - CHECK_MODE
+ assert:
+ that:
+ - update_tags_ngw.changed
+ - '"nat_gateway_id" in update_tags_ngw'
+ - update_tags_ngw.nat_gateway_id == ngw_id
+ - '"subnet_id" in update_tags_ngw'
+ - update_tags_ngw.subnet_id == subnet_id
+ - '"tags" in update_tags_ngw'
+ - '"vpc_id" in update_tags_ngw'
+ - update_tags_ngw.vpc_id == vpc_id
+
+ - name: Update with CamelCase tags
+ ec2_vpc_nat_gateway:
+ if_exist_do_not_create: yes
+ subnet_id: '{{ subnet_id }}'
+ allocation_id: '{{ allocation_id }}'
+ purge_tags: no
+ tags:
+ lowercase spaced: "hello cruel world"
+ Title Case: "Hello Cruel World"
+ CamelCase: "SimpleCamelCase"
+ snake_case: "simple_snake_case"
+ wait: yes
+ register: update_tags_ngw
+
+ - name: Assert tags would be added
+ assert:
+ that:
+ - update_tags_ngw.changed
+ - '"nat_gateway_id" in update_tags_ngw'
+ - update_tags_ngw.nat_gateway_id == ngw_id
+ - '"subnet_id" in update_tags_ngw'
+ - update_tags_ngw.subnet_id == subnet_id
+ - '"tags" in update_tags_ngw'
+ - update_tags_ngw.tags | length == 4
+ - update_tags_ngw.tags["lowercase spaced"] == 'hello cruel world'
+ - update_tags_ngw.tags["Title Case"] == 'Hello Cruel World'
+ - update_tags_ngw.tags["CamelCase"] == 'SimpleCamelCase'
+ - update_tags_ngw.tags["snake_case"] == 'simple_snake_case'
+ - '"vpc_id" in update_tags_ngw'
+ - update_tags_ngw.vpc_id == vpc_id
+
+
+ # ============================================================
+
+ - name: Delete NAT gateway
+ ec2_vpc_nat_gateway:
+ nat_gateway_id: '{{ nat_gateway_id }}'
+ state: absent
+ wait: yes
+ register: delete_nat_gateway
+
+ # ============================================================
+
+ - name: Create new NAT gateway with connectivity_type = private - CHECK_MODE
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ connectivity_type: 'private'
+ wait: yes
+ register: create_ngw
+ check_mode: yes
+
+ - name: Assert creation happened (expected changed=true) - CHECK_MODE
+ assert:
+ that:
+ - create_ngw.changed
+ - '"ec2:CreateNatGateway" not in create_ngw.resource_actions'
+
+ - name: Create new NAT gateway with eip connectivity_type = private
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ subnet_id }}'
+ connectivity_type: 'private'
+ wait: yes
+ register: create_ngw
+
+ - name: Assert creation happened (expected changed=true)
+ assert:
+ that:
+ - create_ngw.changed
+ - create_ngw.connectivity_type == 'private'
+ - '"create_time" in create_ngw'
+
+ - name: 'set facts: NAT gateway ID'
+ set_fact:
+ nat_gateway_id: '{{ create_ngw.nat_gateway_id }}'
+ network_interface_id: '{{ create_ngw.nat_gateway_addresses[0].network_interface_id }}'
+
+ # ============================================================
+
+
+ always:
+ - name: Get NAT gateways
+ ec2_vpc_nat_gateway_info:
+ filters:
+ vpc-id: '{{ vpc_id }}'
+ state: [available]
+ register: existing_ngws
+ ignore_errors: true
+
+ - name: Tidy up NAT gateway
+ ec2_vpc_nat_gateway:
+ subnet_id: '{{ item.subnet_id }}'
+ nat_gateway_id: '{{ item.nat_gateway_id }}'
+ connectivity_type: '{{ item.connectivity_type }}'
+ release_eip: yes
+ state: absent
+ wait: yes
+ with_items: '{{ existing_ngws.result }}'
+ ignore_errors: true
+
+ - name: Delete IGW
+ ec2_vpc_igw:
+ vpc_id: '{{ vpc_id }}'
+ state: absent
+ ignore_errors: true
+
+ - name: Remove subnet
+ ec2_vpc_subnet:
+ state: absent
+ cidr: '{{ subnet_cidr }}'
+ vpc_id: '{{ vpc_id }}'
+ ignore_errors: true
+
+ - name: Ensure EIP is actually released
+ ec2_eip:
+ state: absent
+ device_id: '{{ item.nat_gateway_addresses[0].network_interface_id }}'
+ in_vpc: yes
+ with_items: '{{ existing_ngws.result }}'
+ ignore_errors: yes
+
+ - name: Delete VPC
+ ec2_vpc_net:
+ name: '{{ vpc_name }}'
+ cidr_block: '{{ vpc_cidr }}'
+ state: absent
+ purge_cidrs: yes
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/aliases
new file mode 100644
index 000000000..92bd4d57f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/aliases
@@ -0,0 +1,2 @@
+ec2_vpc_net_info
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/defaults/main.yml
new file mode 100644
index 000000000..f35d4cb87
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+# defaults file for ec2_vpc_net
+vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24'
+vpc_cidr_a: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24'
+vpc_cidr_b: '10.{{ 256 | random(seed=resource_prefix) }}.2.0/24'
+
+vpc_name: '{{ resource_prefix }}-vpc-net'
+vpc_name_updated: '{{ resource_prefix }}-updated-vpc-net'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/tasks/main.yml
new file mode 100644
index 000000000..da40c16f6
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/tasks/main.yml
@@ -0,0 +1,1560 @@
+---
+- name: Setup AWS Environment
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ vars:
+ first_tags:
+ 'Key with Spaces': Value with spaces
+ CamelCaseKey: CamelCaseValue
+ pascalCaseKey: pascalCaseValue
+ snake_case_key: snake_case_value
+ second_tags:
+ 'New Key with Spaces': Value with spaces
+ NewCamelCaseKey: CamelCaseValue
+ newPascalCaseKey: pascalCaseValue
+ new_snake_case_key: snake_case_value
+ third_tags:
+ 'Key with Spaces': Value with spaces
+ CamelCaseKey: CamelCaseValue
+ pascalCaseKey: pascalCaseValue
+ snake_case_key: snake_case_value
+ 'New Key with Spaces': Updated Value with spaces
+ final_tags:
+ 'Key with Spaces': Value with spaces
+ CamelCaseKey: CamelCaseValue
+ pascalCaseKey: pascalCaseValue
+ snake_case_key: snake_case_value
+ 'New Key with Spaces': Updated Value with spaces
+ NewCamelCaseKey: CamelCaseValue
+ newPascalCaseKey: pascalCaseValue
+ new_snake_case_key: snake_case_value
+ name_tags:
+ Name: "{{ vpc_name }}"
+ block:
+
+ # ============================================================
+
+ - name: Get the current caller identity facts
+ aws_caller_info:
+ register: caller_facts
+
+ - name: run the module without parameters
+ ec2_vpc_net:
+ ignore_errors: yes
+ register: result
+
+ - name: assert failure
+ assert:
+ that:
+ - result is failed
+ #- result.msg.startswith("missing required arguments")
+ - result.msg.startswith("one of")
+
+ # ============================================================
+
+ - name: Fetch existing VPC info
+ ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+ - name: Check no-one is using the Prefix before we start
+ assert:
+ that:
+ - vpc_info.vpcs | length == 0
+
+ - name: test check mode creating a VPC
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ check_mode: true
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: check for a change
+ assert:
+ that:
+ - result is changed
+ - vpc_info.vpcs | length == 0
+
+ # ============================================================
+
+ - name: create a VPC
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ ipv6_cidr: True
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the VPC was created successfully
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - vpc_info.vpcs | length == 1
+
+ - name: assert the output
+ assert:
+ that:
+ - '"cidr_block" in result.vpc'
+ - result.vpc.cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 1
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - '"classic_link_enabled" in result.vpc'
+ - result.vpc.dhcp_options_id.startswith("dopt-")
+ - result.vpc.id.startswith("vpc-")
+ - '"instance_tenancy" in result.vpc'
+ - result.vpc.ipv6_cidr_block_association_set | length == 1
+ - result.vpc.ipv6_cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | ansible.utils.ipv6
+ - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"]
+ - '"is_default" in result.vpc'
+ - '"state" in result.vpc'
+ - result.vpc.tags.keys() | length == 1
+ - result.vpc.tags.Name == vpc_name
+
+ - name: set the first VPC's details as facts for comparison and cleanup
+ set_fact:
+ vpc_1_result: "{{ result }}"
+ vpc_1: "{{ result.vpc.id }}"
+ vpc_1_ipv6_cidr: "{{ result.vpc.ipv6_cidr_block_association_set.0.ipv6_cidr_block }}"
+ default_dhcp_options_id: "{{ result.vpc.dhcp_options_id }}"
+
+ - name: create a VPC (retry)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ ipv6_cidr: True
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert nothing changed
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - vpc_info.vpcs | length == 1
+ - '"cidr_block" in result.vpc'
+ - result.vpc.cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 1
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - '"classic_link_enabled" in result.vpc'
+ - result.vpc.dhcp_options_id.startswith("dopt-")
+ - result.vpc.id.startswith("vpc-")
+ - '"instance_tenancy" in result.vpc'
+ - result.vpc.ipv6_cidr_block_association_set | length == 1
+ - result.vpc.ipv6_cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | ansible.utils.ipv6
+ - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"]
+ - '"is_default" in result.vpc'
+ - '"state" in result.vpc'
+ - result.vpc.tags.keys() | length == 1
+ - result.vpc.tags.Name == vpc_name
+ - result.vpc.id == vpc_1
+
+ - name: No-op VPC configuration, missing ipv6_cidr property
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ # Intentionaly commenting out 'ipv6_cidr'
+ # When the 'ipv6_cidr' property is missing, the VPC should retain its configuration.
+ # That should not cause the module to set default value 'false' and disassociate the IPv6 block.
+ #ipv6_cidr: True
+ register: result
+ - name: assert configuration did not change
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+
+ # ============================================================
+
+ - name: VPC info (no filters)
+ ec2_vpc_net_info:
+ register: vpc_info
+ retries: 3
+ delay: 3
+ until: '"InvalidVpcID.NotFound" not in ( vpc_info.msg | default("") )'
+
+ - name: Test that our new VPC shows up in the results
+ assert:
+ that:
+ - vpc_1 in ( vpc_info.vpcs | map(attribute="vpc_id") | list )
+
+ - name: VPC info (Simple tag filter)
+ ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: Test vpc_info results
+ assert:
+ that:
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 1
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id == result.vpc.cidr_block_association_set[0].association_id
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block == result.vpc.cidr_block_association_set[0].cidr_block
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - '"classic_link_dns_supported" in vpc_info.vpcs[0]'
+ - '"classic_link_enabled" in vpc_info.vpcs[0]'
+ - vpc_info.vpcs[0].dhcp_options_id == result.vpc.dhcp_options_id
+ - ( vpc_info.vpcs[0].enable_dns_hostnames | bool ) == True
+ - ( vpc_info.vpcs[0].enable_dns_support | bool ) == True
+ - vpc_info.vpcs[0].id == result.vpc.id
+ - '"instance_tenancy" in vpc_info.vpcs[0]'
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set | length == 1
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].association_id == result.vpc.ipv6_cidr_block_association_set[0].association_id
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block == result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"]
+ - '"is_default" in vpc_info.vpcs[0]'
+ - vpc_info.vpcs[0].owner_id == caller_facts.account
+ - '"state" in vpc_info.vpcs[0]'
+ - vpc_info.vpcs[0].vpc_id == result.vpc.id
+
+ # ============================================================
+
+ - name: Try to add IPv6 CIDR when one already exists
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ ipv6_cidr: True
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: Assert no changes made
+ assert:
+ that:
+ - result is not changed
+ - vpc_info.vpcs | length == 1
+
+ # ============================================================
+
+ - name: test check mode creating an identical VPC (multi_ok)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ ipv6_cidr: True
+ multi_ok: yes
+ check_mode: true
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert a change would be made
+ assert:
+ that:
+ - result is changed
+ - name: assert a change was not actually made
+ assert:
+ that:
+ - vpc_info.vpcs | length == 1
+
+ # ============================================================
+
+ - name: create a VPC with a dedicated tenancy using the same CIDR and name
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ ipv6_cidr: True
+ tenancy: dedicated
+ multi_ok: yes
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert a new VPC was created
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.instance_tenancy == "dedicated"
+ - result.vpc.id != vpc_1
+ - vpc_info.vpcs | length == 2
+
+ - name: set the second VPC's details as facts for comparison and cleanup
+ set_fact:
+ vpc_2_result: "{{ result }}"
+ vpc_2: "{{ result.vpc.id }}"
+
+ # ============================================================
+
+ - name: VPC info (Simple VPC-ID filter)
+ ec2_vpc_net_info:
+ filters:
+ "vpc-id": "{{ vpc_2 }}"
+ register: vpc_info
+
+ - name: Test vpc_info results
+ assert:
+ that:
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 1
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id == result.vpc.cidr_block_association_set[0].association_id
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block == result.vpc.cidr_block_association_set[0].cidr_block
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - '"classic_link_dns_supported" in vpc_info.vpcs[0]'
+ - '"classic_link_enabled" in vpc_info.vpcs[0]'
+ - vpc_info.vpcs[0].dhcp_options_id == result.vpc.dhcp_options_id
+ - ( vpc_info.vpcs[0].enable_dns_hostnames | bool ) == True
+ - ( vpc_info.vpcs[0].enable_dns_support | bool ) == True
+ - vpc_info.vpcs[0].id == vpc_2
+ - '"instance_tenancy" in vpc_info.vpcs[0]'
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set | length == 1
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].association_id == result.vpc.ipv6_cidr_block_association_set[0].association_id
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block == result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"]
+ - '"is_default" in vpc_info.vpcs[0]'
+ - vpc_info.vpcs[0].owner_id == caller_facts.account
+ - '"state" in vpc_info.vpcs[0]'
+ - vpc_info.vpcs[0].vpc_id == vpc_2
+
+ # ============================================================
+
+ # This will only fail if there are already *2* vpcs otherwise ec2_vpc_net
+ # assumes you want to update your existing VPC...
+ - name: attempt to create another VPC with the same CIDR and name without multi_ok
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ ipv6_cidr: True
+ tenancy: dedicated
+ multi_ok: no
+ register: new_result
+ ignore_errors: yes
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert failure
+ assert:
+ that:
+ - new_result is failed
+ - '"If you would like to create the VPC anyway please pass True to the multi_ok param" in new_result.msg'
+ - vpc_info.vpcs | length == 2
+
+ # ============================================================
+
+ - name: Set new name for second VPC
+ ec2_vpc_net:
+ state: present
+ vpc_id: "{{ vpc_2 }}"
+ name: "{{ vpc_name_updated }}"
+ cidr_block: "{{ vpc_cidr }}"
+ register: result
+
+ - name: assert name changed
+ assert:
+ that:
+ - '"cidr_block" in result.vpc'
+ - result.vpc.cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 1
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - '"classic_link_enabled" in result.vpc'
+ - result.vpc.dhcp_options_id.startswith("dopt-")
+ - '"instance_tenancy" in result.vpc'
+ - result.vpc.ipv6_cidr_block_association_set | length == 1
+ - result.vpc.ipv6_cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | ansible.utils.ipv6
+ - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"]
+ - '"is_default" in result.vpc'
+ - '"state" in result.vpc'
+ - result.vpc.tags.keys() | length == 1
+ - result.vpc.tags.Name == vpc_name_updated
+ - result.vpc.id == vpc_2
+
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert success
+ assert:
+ that:
+ - result is changed
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].vpc_id == vpc_1
+
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name_updated }}"
+ register: vpc_info
+
+ - name: assert success
+ assert:
+ that:
+ - result is changed
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].vpc_id == vpc_2
+
+ - name: delete second VPC (by id)
+ ec2_vpc_net:
+ vpc_id: "{{ vpc_2 }}"
+ state: absent
+ cidr_block: "{{ vpc_cidr }}"
+ register: result
+
+ # ============================================================
+
+ - name: attempt to delete a VPC that doesn't exist
+ ec2_vpc_net:
+ state: absent
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}-does-not-exist"
+ register: result
+
+ - name: assert no changes were made
+ assert:
+ that:
+ - result is not changed
+ - not result.vpc
+
+ # ============================================================
+
+ - name: create a DHCP option set to use in next test
+ ec2_vpc_dhcp_option:
+ dns_servers:
+ - 4.4.4.4
+ - 8.8.8.8
+ tags:
+ Name: "{{ vpc_name }}"
+ register: new_dhcp
+ - name: assert the DHCP option set was successfully created
+ assert:
+ that:
+ - new_dhcp is changed
+
+ - name: modify the DHCP options set for a VPC (check_mode)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ dhcp_opts_id: "{{ new_dhcp.dhcp_options_id }}"
+ register: result
+ check_mode: True
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the DHCP option set changed but didn't update
+ assert:
+ that:
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].dhcp_options_id == default_dhcp_options_id
+
+ - name: modify the DHCP options set for a VPC
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ dhcp_opts_id: "{{ new_dhcp.dhcp_options_id }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the DHCP option set changed
+ assert:
+ that:
+ - result is changed
+ - result.vpc.id == vpc_1
+ - default_dhcp_options_id != result.vpc.dhcp_options_id
+ - result.vpc.dhcp_options_id == new_dhcp.dhcp_options_id
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].dhcp_options_id == new_dhcp.dhcp_options_id
+
+ - name: modify the DHCP options set for a VPC (retry)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ dhcp_opts_id: "{{ new_dhcp.dhcp_options_id }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the DHCP option set changed
+ assert:
+ that:
+ - result is not changed
+ - result.vpc.id == vpc_1
+ - result.vpc.dhcp_options_id == new_dhcp.dhcp_options_id
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].dhcp_options_id == new_dhcp.dhcp_options_id
+
+ # ============================================================
+
+ - name: disable dns_hostnames (check mode)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ dns_hostnames: False
+ register: result
+ check_mode: True
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert changed was set but not made
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].enable_dns_hostnames | bool == True
+ - vpc_info.vpcs[0].enable_dns_support | bool == True
+
+ - name: disable dns_hostnames
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ dns_hostnames: False
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert a change was made
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].enable_dns_hostnames | bool == False
+ - vpc_info.vpcs[0].enable_dns_support | bool == True
+
+ - name: disable dns_hostnames (retry)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ dns_hostnames: False
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert a change was made
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].enable_dns_hostnames | bool == False
+ - vpc_info.vpcs[0].enable_dns_support | bool == True
+
+ - name: disable dns_support (check mode)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ dns_hostnames: False
+ dns_support: False
+ check_mode: True
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert changed was set but not made
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].enable_dns_hostnames | bool == False
+ - vpc_info.vpcs[0].enable_dns_support | bool == True
+
+ - name: disable dns_support
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ dns_hostnames: False
+ dns_support: False
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert a change was made
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].enable_dns_hostnames | bool == False
+ - vpc_info.vpcs[0].enable_dns_support | bool == False
+
+ - name: disable dns_support (retry)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ dns_hostnames: False
+ dns_support: False
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert a change was not made
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].enable_dns_hostnames | bool == False
+ - vpc_info.vpcs[0].enable_dns_support | bool == False
+
+ - name: re-enable dns_support (check mode)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ dns_hostnames: True
+ dns_support: True
+ register: result
+ check_mode: True
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert a change would be made but has not been
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].enable_dns_hostnames | bool == False
+ - vpc_info.vpcs[0].enable_dns_support | bool == False
+
+ - name: re-enable dns_support
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ dns_hostnames: True
+ dns_support: True
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert a change was made
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].enable_dns_hostnames | bool == True
+ - vpc_info.vpcs[0].enable_dns_support | bool == True
+
+ - name: re-enable dns_support (retry)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ dns_hostnames: True
+ dns_support: True
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert a change was not made
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].enable_dns_hostnames | bool == True
+ - vpc_info.vpcs[0].enable_dns_support | bool == True
+
+ # ============================================================
+
+ - name: add tags (check mode)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ tags: "{{ first_tags }}"
+ check_mode: true
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the VPC has Name but not Ansible tag
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - result.vpc.tags | length == 1
+ - result.vpc.tags.Name == vpc_name
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].tags == name_tags
+
+ - name: add tags
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ tags: "{{ first_tags }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the VPC has Name and Ansible tags
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - result.vpc.tags == (first_tags | combine(name_tags))
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].tags == (first_tags | combine(name_tags))
+
+ - name: add tags (no change)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ tags: "{{ first_tags }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the VPC has Name and Ansible tags
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - result.vpc.id == vpc_1
+ - result.vpc.tags == (first_tags | combine(name_tags))
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].tags == (first_tags | combine(name_tags))
+
+ # ============================================================
+
+ - name: modify tags with purge (check mode)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ tags: "{{ second_tags }}"
+ purge_tags: true
+ check_mode: true
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the VPC has Name but not Ansible tag
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - result.vpc.tags == (first_tags | combine(name_tags))
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].tags == (first_tags | combine(name_tags))
+
+ - name: modify tags with purge
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ tags: "{{ second_tags }}"
+ purge_tags: true
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the VPC has Name and Ansible tags
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - result.vpc.tags == (second_tags | combine(name_tags))
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].tags == (second_tags | combine(name_tags))
+
+ - name: modify tags with purge (no change)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ tags: "{{ second_tags }}"
+ purge_tags: true
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the VPC has Name and Ansible tags
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - result.vpc.id == vpc_1
+ - result.vpc.tags == (second_tags | combine(name_tags))
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].tags == (second_tags | combine(name_tags))
+
+ # ============================================================
+
+ - name: modify tags without purge (check mode)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ tags: "{{ third_tags }}"
+ purge_tags: false
+ check_mode: true
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the VPC has Name but not Ansible tag
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - result.vpc.tags == (second_tags | combine(name_tags))
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].tags == (second_tags | combine(name_tags))
+
+ - name: modify tags with purge
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ tags: "{{ third_tags }}"
+ purge_tags: false
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the VPC has Name and Ansible tags
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - result.vpc.tags == (final_tags | combine(name_tags))
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].tags == (final_tags | combine(name_tags))
+
+ - name: modify tags with purge (no change)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ tags: "{{ third_tags }}"
+ purge_tags: false
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the VPC has Name and Ansible tags
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - result.vpc.id == vpc_1
+ - result.vpc.tags == (final_tags | combine(name_tags))
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].tags == (final_tags | combine(name_tags))
+
+ # ============================================================
+
+ - name: modify CIDR (check mode)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_a }}"
+ name: "{{ vpc_name }}"
+ check_mode: true
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: Check the CIDRs weren't changed
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a not in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b not in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 1
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a not in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b not in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+
+ - name: modify CIDR
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_a }}"
+ name: "{{ vpc_name }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the CIDRs changed
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 2
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b not in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 2
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b not in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+
+ - name: modify CIDR (no change)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_a }}"
+ name: "{{ vpc_name }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the CIDRs didn't change
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 2
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b not in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 2
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b not in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+
+ - name: modify CIDR - no purge (check mode)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_b }}"
+ name: "{{ vpc_name }}"
+ check_mode: true
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: Check the CIDRs weren't changed
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b not in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 2
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b not in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+
+ - name: modify CIDR - no purge
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_b }}"
+ name: "{{ vpc_name }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the CIDRs changed
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 3
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 3
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+
+ - name: modify CIDR - no purge (no change)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_b }}"
+ name: "{{ vpc_name }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the CIDRs didn't change
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 3
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 3
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+
+ - name: modify CIDR - no purge (no change - list all - check mode)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_a }}"
+ - "{{ vpc_cidr_b }}"
+ name: "{{ vpc_name }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the CIDRs didn't change
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 3
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 3
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+
+ - name: modify CIDR - no purge (no change - list all)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_a }}"
+ - "{{ vpc_cidr_b }}"
+ name: "{{ vpc_name }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the CIDRs didn't change
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 3
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 3
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+
+ - name: modify CIDR - no purge (no change - different order - check mode)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_b }}"
+ - "{{ vpc_cidr_a }}"
+ name: "{{ vpc_name }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the CIDRs didn't change
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 3
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 3
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+
+ - name: modify CIDR - no purge (no change - different order)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_b }}"
+ - "{{ vpc_cidr_a }}"
+ name: "{{ vpc_name }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the CIDRs didn't change
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 3
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 3
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+
+ - name: modify CIDR - purge (check mode)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_b }}"
+ name: "{{ vpc_name }}"
+ purge_cidrs: yes
+ check_mode: true
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: Check the CIDRs weren't changed
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 3
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list)
+
+ - name: modify CIDR - purge
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_b }}"
+ name: "{{ vpc_name }}"
+ purge_cidrs: yes
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the CIDRs changed
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list | length == 2
+ - vpc_cidr in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list)
+ - vpc_cidr_a not in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block'))
+ - vpc_cidr_b in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block'))
+ - vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list | length == 2
+ - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list)
+ - vpc_cidr_a not in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list)
+
+ - name: modify CIDR - purge (no change)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_b }}"
+ name: "{{ vpc_name }}"
+ purge_cidrs: yes
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ vpc_name }}"
+ register: vpc_info
+
+ - name: assert the CIDRs didn't change
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list | length == 2
+ - vpc_cidr in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list)
+ - vpc_cidr_a not in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list)
+ - vpc_cidr_b in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list | length == 2
+ - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list)
+ - vpc_cidr_a not in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list)
+
+ # ============================================================
+
+ - name: Remove IPv6 CIDR association from VPC in check mode
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ ipv6_cidr: False
+ check_mode: true
+ register: result
+ - name: assert configuration would change
+ assert:
+ that:
+ - result is successful
+ - result is changed
+
+ - name: Set IPv6 CIDR association to VPC, no change expected
+ # I.e. assert the previous ec2_vpc_net task in check_mode did not
+ # mistakenly modify the VPC configuration.
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ ipv6_cidr: True
+ register: result
+ - name: assert configuration did not change
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+
+ - name: Remove IPv6 CIDR association from VPC
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ ipv6_cidr: False
+ register: result
+ - name: assert IPv6 CIDR association removed from VPC
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.ipv6_cidr_block_association_set | length == 1
+ - result.vpc.ipv6_cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | ansible.netcommon.ipv6
+ - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["disassociated"]
+
+ - name: Add IPv6 CIDR association to VPC again
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ ipv6_cidr: True
+ register: result
+ - name: assert configuration change
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ # Because the IPv6 CIDR was associated, then disassociated, then reassociated,
+ # now there should be one disassociated block and one associated block.
+ - result.vpc.ipv6_cidr_block_association_set | length == 2
+ - result.vpc.ipv6_cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | ansible.netcommon.ipv6
+ - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["disassociated", "disassociating"]
+ - result.vpc.ipv6_cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.ipv6_cidr_block_association_set[1].ipv6_cidr_block | ansible.netcommon.ipv6
+ - result.vpc.ipv6_cidr_block_association_set[1].ipv6_cidr_block_state.state in ["associated", "associating"]
+
+
+ # ============================================================
+
+ - name: test check mode to delete a VPC
+ ec2_vpc_net:
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ state: absent
+ check_mode: true
+ register: result
+
+ - name: assert that a change would have been made
+ assert:
+ that:
+ - result is changed
+
+ # ============================================================
+
+ always:
+
+ - name: Describe VPCs before deleting them (for debugging)
+ ec2_vpc_net_info:
+ ignore_errors: true
+
+ - name: replace the DHCP options set so the new one can be deleted
+ ec2_vpc_net:
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ state: present
+ multi_ok: no
+ dhcp_opts_id: "{{ default_dhcp_options_id }}"
+ ignore_errors: true
+
+ - name: remove the DHCP option set
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ new_dhcp.dhcp_options_id }}"
+ state: absent
+ ignore_errors: true
+
+ - name: remove the VPC
+ ec2_vpc_net:
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ vpc_name }}"
+ state: absent
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/aliases
new file mode 100644
index 000000000..4b396a8bb
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/aliases
@@ -0,0 +1,5 @@
+time=15m
+
+cloud/aws
+
+ec2_vpc_route_table_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/defaults/main.yml
new file mode 100644
index 000000000..111510850
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+availability_zone_a: '{{ ec2_availability_zone_names[0] }}'
+availability_zone_b: '{{ ec2_availability_zone_names[1] }}'
+vpc_cidr: 10.228.224.0/21
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/meta/main.yml
new file mode 100644
index 000000000..1d40168d0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- setup_ec2_facts
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/tasks/main.yml
new file mode 100644
index 000000000..f5fa7c740
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/tasks/main.yml
@@ -0,0 +1,1499 @@
+- name: ec2_vpc_route_table integration tests
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ - name: create VPC
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ name: '{{ resource_prefix }}_vpc'
+ state: present
+ register: vpc
+ - name: assert that VPC has an id
+ assert:
+ that:
+ - vpc.vpc.id is defined
+ - vpc.changed
+ - name: Assign IPv6 CIDR block to existing VPC, check mode
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ name: '{{ resource_prefix }}_vpc'
+ ipv6_cidr: true
+ check_mode: true
+ register: vpc_update
+ - name: assert that VPC would changed
+ assert:
+ that:
+ - vpc_update.changed
+ - name: Assign Amazon-provided IPv6 CIDR block to existing VPC
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ name: '{{ resource_prefix }}_vpc'
+ ipv6_cidr: true
+ register: vpc_update
+ - name: assert that VPC was changed, IPv6 CIDR is configured
+ assert:
+ that:
+ - vpc_update.vpc.id == vpc.vpc.id
+ - vpc_update.changed
+ - vpc_update.vpc.ipv6_cidr_block_association_set | length == 1
+ - name: Fetch existing VPC info
+ ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}_vpc"
+ register: vpc_info
+ - name: assert vpc net info after configuring IPv6 CIDR
+ assert:
+ that:
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].id == vpc.vpc.id
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set | length == 1
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state == "associated"
+ - name: get Amazon-provided IPv6 CIDR associated with the VPC
+ set_fact:
+ # Example value: 2600:1f1c:1b3:8f00::/56
+ vpc_ipv6_cidr_block: '{{ vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block }}'
+ - name: create subnets
+ ec2_vpc_subnet:
+ cidr: '{{ item.cidr }}'
+ az: '{{ item.zone }}'
+ assign_instances_ipv6: '{{ item.assign_instances_ipv6 }}'
+ ipv6_cidr: '{{ item.ipv6_cidr }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: present
+ tags:
+ Public: '{{ item.public | string }}'
+ Name: "{{ (item.public | bool) | ternary('public', 'private') }}-{{ item.zone }}"
+ with_items:
+ - cidr: 10.228.224.0/24
+ zone: '{{ availability_zone_a }}'
+ public: 'True'
+ assign_instances_ipv6: false
+ ipv6_cidr: null
+ - cidr: 10.228.225.0/24
+ zone: '{{ availability_zone_b }}'
+ public: 'True'
+ assign_instances_ipv6: false
+ ipv6_cidr: null
+ - cidr: 10.228.226.0/24
+ zone: '{{ availability_zone_a }}'
+ public: 'False'
+ assign_instances_ipv6: false
+ ipv6_cidr: null
+ - cidr: 10.228.227.0/24
+ zone: '{{ availability_zone_b }}'
+ public: 'False'
+ assign_instances_ipv6: false
+ ipv6_cidr: null
+ - cidr: 10.228.228.0/24
+ zone: '{{ availability_zone_a }}'
+ public: 'False'
+ assign_instances_ipv6: true
+ # Carve first /64 subnet of the Amazon-provided CIDR for the VPC.
+ ipv6_cidr: "{{ vpc_ipv6_cidr_block | ansible.netcommon.ipsubnet(64, 1) }}"
+ - cidr: 10.228.229.0/24
+ zone: '{{ availability_zone_a }}'
+ public: 'True'
+ assign_instances_ipv6: true
+ ipv6_cidr: "{{ vpc_ipv6_cidr_block | ansible.netcommon.ipsubnet(64, 2) }}"
+ - cidr: 10.228.230.0/24
+ zone: '{{ availability_zone_b }}'
+ public: 'False'
+ assign_instances_ipv6: true
+ ipv6_cidr: "{{ vpc_ipv6_cidr_block | ansible.netcommon.ipsubnet(64, 3) }}"
+ register: subnets
+ - ec2_vpc_subnet_info:
+ filters:
+ vpc-id: '{{ vpc.vpc.id }}'
+ register: vpc_subnets
+ - set_fact:
+ public_subnets: "{{ (vpc_subnets.subnets | selectattr('tags.Public', 'equalto',\
+ \ 'True') | map(attribute='id') | list) }}"
+ public_cidrs: "{{ (vpc_subnets.subnets | selectattr('tags.Public', 'equalto',\
+ \ 'True') | map(attribute='cidr_block') | list) }}"
+ private_subnets: "{{ (vpc_subnets.subnets | selectattr('tags.Public', 'equalto',\
+ \ 'False') | map(attribute='id') | list) }}"
+ - name: create IGW
+ ec2_vpc_igw:
+ vpc_id: '{{ vpc.vpc.id }}'
+ register: vpc_igw
+ - name: create NAT GW
+ ec2_vpc_nat_gateway:
+ if_exist_do_not_create: yes
+ wait: yes
+ subnet_id: '{{ subnets.results[0].subnet.id }}'
+ register: nat_gateway
+ - name: CHECK MODE - route table should be created
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'true'
+ Name: Public route table
+ check_mode: true
+ register: check_mode_results
+ - name: assert that the public route table would be created
+ assert:
+ that:
+ - check_mode_results.changed
+
+ - name: create public route table
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'true'
+ Name: Public route table
+ register: create_public_table
+ - name: assert that public route table has an id
+ assert:
+ that:
+ - create_public_table.changed
+ - create_public_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_public_table.route_table.tags"
+ - create_public_table.route_table.tags['Public'] == 'true'
+ - create_public_table.route_table.associations | length == 0
+ - create_public_table.route_table.vpc_id == "{{ vpc.vpc.id }}"
+ - create_public_table.route_table.propagating_vgws | length == 0
+ # One route for IPv4, one route for IPv6
+ - create_public_table.route_table.routes | length == 2
+
+ - name: CHECK MODE - route table should already exist
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'true'
+ Name: Public route table
+ check_mode: true
+ register: check_mode_results
+ - name: assert the table already exists
+ assert:
+ that:
+ - not check_mode_results.changed
+
+ - name: recreate public route table
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'true'
+ Name: Public route table
+ register: recreate_public_route_table
+ - name: assert that public route table did not change
+ assert:
+ that:
+ - not recreate_public_route_table.changed
+ - create_public_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_public_table.route_table.tags"
+ - create_public_table.route_table.tags['Public'] == 'true'
+ - create_public_table.route_table.associations | length == 0
+ - create_public_table.route_table.vpc_id == "{{ vpc.vpc.id }}"
+ - create_public_table.route_table.propagating_vgws | length == 0
+ - create_public_table.route_table.routes | length == 2
+
+ - name: CHECK MODE - add route to public route table
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'true'
+ Name: Public route table
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ - dest: ::/0
+ gateway_id: igw
+ check_mode: true
+ register: check_mode_results
+ - name: assert a route would be added
+ assert:
+ that:
+ - check_mode_results.changed
+
+ - name: add a route to public route table
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'true'
+ Name: Public route table
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ - dest: ::/0
+ gateway_id: igw
+ register: add_routes
+ - name: assert route table contains new route
+ assert:
+ that:
+ - add_routes.changed
+ - add_routes.route_table.id.startswith('rtb-')
+ - "'Public' in add_routes.route_table.tags"
+ - add_routes.route_table.tags['Public'] == 'true'
+ # 10.228.224.0/21
+ # 0.0.0.0/0
+ # ::/0
+ # Amazon-provide IPv6 block
+ - add_routes.route_table.routes | length == 4
+ - add_routes.route_table.associations | length == 0
+ - add_routes.route_table.vpc_id == "{{ vpc.vpc.id }}"
+ - add_routes.route_table.propagating_vgws | length == 0
+
+ - name: CHECK MODE - re-add route to public route table
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'true'
+ Name: Public route table
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ check_mode: true
+ register: check_mode_results
+ - name: assert a route would not be added
+ assert:
+ that:
+ - check_mode_results is not changed
+
+ - name: re-add a route to public route table
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'true'
+ Name: Public route table
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ register: add_routes
+ - name: assert route table contains route
+ assert:
+ that:
+ - add_routes is not changed
+ - add_routes.route_table.routes | length == 4
+
+ - name: CHECK MODE - add subnets to public route table
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'true'
+ Name: Public route table
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ subnets: '{{ public_subnets }}'
+ check_mode: true
+ register: check_mode_results
+ - name: assert the subnets would be added to the route table
+ assert:
+ that:
+ - check_mode_results.changed
+
+ - name: add subnets to public route table
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'true'
+ Name: Public route table
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ subnets: '{{ public_subnets }}'
+ register: add_subnets
+ - name: assert route table contains subnets
+ assert:
+ that:
+ - add_subnets.changed
+ - add_subnets.route_table.associations | length == 3
+
+ - name: CHECK MODE - no routes but purge_routes set to false
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'true'
+ Name: Public route table
+ purge_routes: no
+ subnets: '{{ public_subnets }}'
+ check_mode: true
+ register: check_mode_results
+ - name: assert no routes would be removed
+ assert:
+ that:
+ - not check_mode_results.changed
+
+ - name: rerun with purge_routes set to false
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'true'
+ Name: Public route table
+ purge_routes: no
+ subnets: '{{ public_subnets }}'
+ register: no_purge_routes
+ - name: assert route table still has routes
+ assert:
+ that:
+ - not no_purge_routes.changed
+ - no_purge_routes.route_table.routes | length == 4
+ - no_purge_routes.route_table.associations | length == 3
+
+ - name: rerun with purge_subnets set to false
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'true'
+ Name: Public route table
+ purge_subnets: no
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ register: no_purge_subnets
+ - name: assert route table still has subnets
+ assert:
+ that:
+ - not no_purge_subnets.changed
+ - no_purge_subnets.route_table.routes | length == 4
+ - no_purge_subnets.route_table.associations | length == 3
+
+ - name: rerun with purge_tags not set (implicitly false)
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ lookup: id
+ route_table_id: '{{ create_public_table.route_table.id }}'
+ subnets: '{{ public_subnets }}'
+ register: no_purge_tags
+ - name: assert route table still has tags
+ assert:
+ that:
+ - not no_purge_tags.changed
+ - "'Public' in no_purge_tags.route_table.tags"
+ - no_purge_tags.route_table.tags['Public'] == 'true'
+
+ - name: CHECK MODE - purge subnets
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ subnets: []
+ tags:
+ Public: 'true'
+ Name: Public route table
+ check_mode: true
+ register: check_mode_results
+ - name: assert subnets would be removed
+ assert:
+ that:
+ - check_mode_results.changed
+
+ - name: purge subnets
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ subnets: []
+ tags:
+ Public: 'true'
+ Name: Public route table
+ register: purge_subnets
+ - name: assert purge subnets worked
+ assert:
+ that:
+ - purge_subnets.changed
+ - purge_subnets.route_table.associations | length == 0
+ - purge_subnets.route_table.id == create_public_table.route_table.id
+
+ - name: CHECK MODE - purge routes
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'true'
+ Name: Public route table
+ routes: []
+ check_mode: true
+ register: check_mode_results
+ - name: assert routes would be removed
+ assert:
+ that:
+ - check_mode_results.changed
+
+ - name: add subnets by cidr to public route table
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ subnets: '{{ public_cidrs }}'
+ lookup: id
+ route_table_id: '{{ create_public_table.route_table.id }}'
+ register: add_subnets_cidr
+ - name: assert route table contains subnets added by cidr
+ assert:
+ that:
+ - add_subnets_cidr.changed
+ - add_subnets_cidr.route_table.associations | length == 3
+
+ - name: purge subnets added by cidr
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ subnets: []
+ lookup: id
+ route_table_id: '{{ create_public_table.route_table.id }}'
+ register: purge_subnets_cidr
+ - name: assert purge subnets added by cidr worked
+ assert:
+ that:
+ - purge_subnets_cidr.changed
+ - purge_subnets_cidr.route_table.associations | length == 0
+
+ - name: add subnets by name to public route table
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ subnets: '{{ public_subnets }}'
+ lookup: id
+ route_table_id: '{{ create_public_table.route_table.id }}'
+ register: add_subnets_name
+ - name: assert route table contains subnets added by name
+ assert:
+ that:
+ - add_subnets_name.changed
+ - add_subnets_name.route_table.associations | length == 3
+
+ - name: purge subnets added by name
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ subnets: []
+ lookup: id
+ route_table_id: '{{ create_public_table.route_table.id }}'
+ register: purge_subnets_name
+ - name: assert purge subnets added by name worked
+ assert:
+ that:
+ - purge_subnets_name.changed
+ - purge_subnets_name.route_table.associations | length == 0
+
+ - name: purge routes
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'true'
+ Name: Public route table
+ routes: []
+ register: purge_routes
+ - name: assert purge routes worked
+ assert:
+ that:
+ - purge_routes.changed
+ - purge_routes.route_table.routes | length == 3
+ - purge_routes.route_table.id == create_public_table.route_table.id
+
+ - name: CHECK MODE - update tags
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ route_table_id: '{{ create_public_table.route_table.id }}'
+ lookup: id
+ purge_tags: yes
+ tags:
+ Name: Public route table
+ Updated: new_tag
+ check_mode: true
+ register: check_mode_results
+ - name: assert tags would be changed
+ assert:
+ that:
+ - check_mode_results.changed
+
+ - name: update tags
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ route_table_id: '{{ create_public_table.route_table.id }}'
+ lookup: id
+ purge_tags: yes
+ tags:
+ Name: Public route table
+ Updated: new_tag
+ register: update_tags
+ - name: assert update tags worked
+ assert:
+ that:
+ - update_tags.changed
+ - "'Updated' in update_tags.route_table.tags"
+ - update_tags.route_table.tags['Updated'] == 'new_tag'
+ - "'Public' not in update_tags.route_table.tags"
+
+ - name: create NAT GW
+ ec2_vpc_nat_gateway:
+ if_exist_do_not_create: yes
+ wait: yes
+ subnet_id: '{{ subnets.results[0].subnet.id }}'
+ register: nat_gateway
+ - name: CHECK MODE - create private route table
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'false'
+ Name: Private route table
+ routes:
+ - gateway_id: '{{ nat_gateway.nat_gateway_id }}'
+ dest: 0.0.0.0/0
+ subnets: '{{ private_subnets }}'
+ check_mode: true
+ register: check_mode_results
+ - name: assert the route table would be created
+ assert:
+ that:
+ - check_mode_results.changed
+
+ - name: create private route table
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'false'
+ Name: Private route table
+ routes:
+ - gateway_id: '{{ nat_gateway.nat_gateway_id }}'
+ dest: 0.0.0.0/0
+ subnets: '{{ private_subnets }}'
+ register: create_private_table
+ - name: assert creating private route table worked
+ assert:
+ that:
+ - create_private_table.changed
+ - create_private_table.route_table.id != create_public_table.route_table.id
+ - "'Public' in create_private_table.route_table.tags"
+
+ - name: CHECK MODE - destroy public route table by tags
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: absent
+ tags:
+ Updated: new_tag
+ Name: Public route table
+ check_mode: true
+ register: check_mode_results
+ - name: assert the route table would be deleted
+ assert:
+ that: check_mode_results.changed
+ - name: destroy public route table by tags
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: absent
+ tags:
+ Updated: new_tag
+ Name: Public route table
+ register: destroy_table
+ - name: assert destroy table worked
+ assert:
+ that:
+ - destroy_table.changed
+
+ - name: CHECK MODE - redestroy public route table
+ ec2_vpc_route_table:
+ route_table_id: '{{ create_public_table.route_table.id }}'
+ lookup: id
+ state: absent
+ check_mode: true
+ register: check_mode_results
+ - name: assert the public route table does not exist
+ assert:
+ that:
+ - not check_mode_results.changed
+
+ - name: redestroy public route table
+ ec2_vpc_route_table:
+ route_table_id: '{{ create_public_table.route_table.id }}'
+ lookup: id
+ state: absent
+ register: redestroy_table
+ - name: assert redestroy table worked
+ assert:
+ that:
+ - not redestroy_table.changed
+
+ - name: destroy NAT GW
+ ec2_vpc_nat_gateway:
+ state: absent
+ wait: yes
+ release_eip: yes
+ subnet_id: '{{ subnets.results[0].subnet.id }}'
+ nat_gateway_id: '{{ nat_gateway.nat_gateway_id }}'
+ register: nat_gateway
+ - name: show route table info, get table using route-table-id
+ ec2_vpc_route_table_info:
+ filters:
+ route-table-id: '{{ create_private_table.route_table.id }}'
+ register: route_table_info
+ - name: assert route_table_info has correct attributes
+ assert:
+ that:
+ - '"route_tables" in route_table_info'
+ - route_table_info.route_tables | length == 1
+ - '"id" in route_table_info.route_tables[0]'
+ - '"routes" in route_table_info.route_tables[0]'
+ - '"associations" in route_table_info.route_tables[0]'
+ - '"tags" in route_table_info.route_tables[0]'
+ - '"vpc_id" in route_table_info.route_tables[0]'
+ - route_table_info.route_tables[0].id == create_private_table.route_table.id
+ - '"propagating_vgws" in route_table_info.route_tables[0]'
+
+ - name: show route table info, get table using tags
+ ec2_vpc_route_table_info:
+ filters:
+ tag:Public: 'false'
+ tag:Name: Private route table
+ vpc-id: '{{ vpc.vpc.id }}'
+ register: route_table_info
+ - name: assert route_table_info has correct tags
+ assert:
+ that:
+ - route_table_info.route_tables | length == 1
+ - '"tags" in route_table_info.route_tables[0]'
+ - '"Public" in route_table_info.route_tables[0].tags'
+ - route_table_info.route_tables[0].tags["Public"] == "false"
+ - '"Name" in route_table_info.route_tables[0].tags'
+ - route_table_info.route_tables[0].tags["Name"] == "Private route table"
+
+ - name: create NAT GW
+ ec2_vpc_nat_gateway:
+ if_exist_do_not_create: yes
+ wait: yes
+ subnet_id: '{{ subnets.results[0].subnet.id }}'
+ register: nat_gateway
+ - name: show route table info
+ ec2_vpc_route_table_info:
+ filters:
+ route-table-id: '{{ create_private_table.route_table.id }}'
+ - name: recreate private route table with new NAT GW
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'false'
+ Name: Private route table
+ routes:
+ - nat_gateway_id: '{{ nat_gateway.nat_gateway_id }}'
+ dest: 0.0.0.0/0
+ subnets: '{{ private_subnets }}'
+ register: recreate_private_table
+ - name: assert creating private route table worked
+ assert:
+ that:
+ - recreate_private_table.changed
+ - recreate_private_table.route_table.id != create_public_table.route_table.id
+
+ - name: create a VPC endpoint to test ec2_vpc_route_table ignores it
+ ec2_vpc_endpoint:
+ state: present
+ vpc_id: '{{ vpc.vpc.id }}'
+ service: com.amazonaws.{{ aws_region }}.s3
+ route_table_ids:
+ - '{{ recreate_private_table.route_table.route_table_id }}'
+ wait: True
+ register: vpc_endpoint
+ - name: purge routes
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Public: 'false'
+ Name: Private route table
+ routes:
+ - nat_gateway_id: '{{ nat_gateway.nat_gateway_id }}'
+ dest: 0.0.0.0/0
+ subnets: '{{ private_subnets }}'
+ purge_routes: true
+ register: result
+ - name: Get endpoint infos to verify that it wasn't purged from the route table
+ ec2_vpc_endpoint_info:
+ query: endpoints
+ vpc_endpoint_ids:
+ - '{{ vpc_endpoint.result.vpc_endpoint_id }}'
+ register: endpoint_details
+ - name: assert the route table is associated with the VPC endpoint
+ assert:
+ that:
+ - endpoint_details.vpc_endpoints[0].route_table_ids[0] == recreate_private_table.route_table.route_table_id
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Create gateway route table - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ register: create_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - create_gateway_table is changed
+
+ - name: Create gateway route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ register: create_gateway_table
+
+ - assert:
+ that:
+ - create_gateway_table is changed
+ - create_gateway_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_gateway_table.route_table.tags"
+ - create_gateway_table.route_table.tags['Public'] == 'true'
+ - create_gateway_table.route_table.routes | length == 2
+ - create_gateway_table.route_table.associations | length == 1
+ - create_gateway_table.route_table.vpc_id == vpc.vpc.id
+ - create_gateway_table.route_table.propagating_vgws | length == 0
+
+ - name: Create gateway route table (idempotence) - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ register: create_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - create_gateway_table is not changed
+
+ - name: Create gateway route table (idempotence)
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ register: create_gateway_table
+
+ - assert:
+ that:
+ - create_gateway_table is not changed
+ - create_gateway_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_gateway_table.route_table.tags"
+ - create_gateway_table.route_table.tags['Public'] == 'true'
+ - create_gateway_table.route_table.routes | length == 2
+ - create_gateway_table.route_table.associations | length == 1
+ - create_gateway_table.route_table.vpc_id == vpc.vpc.id
+ - create_gateway_table.route_table.propagating_vgws | length == 0
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Create ENI for gateway route table
+ ec2_eni:
+ subnet_id: '{{ public_subnets[0] }}'
+ register: eni
+
+ - name: Replace route to gateway route table - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ routes:
+ - dest: "{{ vpc_cidr }}"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - create_gateway_table is changed
+
+ - name: Replace route to gateway route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ routes:
+ - dest: "{{ vpc_cidr }}"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+
+ - assert:
+ that:
+ - create_gateway_table is changed
+ - create_gateway_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_gateway_table.route_table.tags"
+ - create_gateway_table.route_table.tags['Public'] == 'true'
+ - create_gateway_table.route_table.routes | length == 2
+ - create_gateway_table.route_table.associations | length == 1
+ - create_gateway_table.route_table.vpc_id == vpc.vpc.id
+ - create_gateway_table.route_table.propagating_vgws | length == 0
+ - create_gateway_table.route_table.routes[0].destination_cidr_block == vpc_cidr
+ - create_gateway_table.route_table.routes[0].network_interface_id == eni.interface.id
+
+ - name: Replace route to gateway route table (idempotence) - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ routes:
+ - dest: "{{ vpc_cidr }}"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - create_gateway_table is not changed
+
+ - name: Replace route to gateway route table (idempotence)
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ routes:
+ - dest: "{{ vpc_cidr }}"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+
+ - assert:
+ that:
+ - create_gateway_table is not changed
+ - create_gateway_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_gateway_table.route_table.tags"
+ - create_gateway_table.route_table.tags['Public'] == 'true'
+ - create_gateway_table.route_table.routes | length == 2
+ - create_gateway_table.route_table.associations | length == 1
+ - create_gateway_table.route_table.vpc_id == vpc.vpc.id
+ - create_gateway_table.route_table.propagating_vgws | length == 0
+ - create_gateway_table.route_table.routes[0].destination_cidr_block == vpc_cidr
+ - create_gateway_table.route_table.routes[0].network_interface_id == eni.interface.id
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Add route to gateway route table - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ routes:
+ - dest: "10.228.228.0/24"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - create_gateway_table is changed
+
+ - name: Add route to gateway route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ routes:
+ - dest: "10.228.228.0/24"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+
+ - assert:
+ that:
+ - create_gateway_table is changed
+ - create_gateway_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_gateway_table.route_table.tags"
+ - create_gateway_table.route_table.tags['Public'] == 'true'
+ - create_gateway_table.route_table.routes | length == 3
+ - create_gateway_table.route_table.associations | length == 1
+ - create_gateway_table.route_table.vpc_id == vpc.vpc.id
+ - create_gateway_table.route_table.propagating_vgws | length == 0
+
+ - name: Add route to gateway route table (idempotence) - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ routes:
+ - dest: "10.228.228.0/24"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - create_gateway_table is not changed
+
+ - name: Add route to gateway route table (idempotence)
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ routes:
+ - dest: "10.228.228.0/24"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+
+ - assert:
+ that:
+ - create_gateway_table is not changed
+ - create_gateway_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_gateway_table.route_table.tags"
+ - create_gateway_table.route_table.tags['Public'] == 'true'
+ - create_gateway_table.route_table.routes | length == 3
+ - create_gateway_table.route_table.associations | length == 1
+ - create_gateway_table.route_table.vpc_id == vpc.vpc.id
+ - create_gateway_table.route_table.propagating_vgws | length == 0
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Ensure gateway doesn't disassociate when not passed in - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ routes:
+ - dest: "10.228.228.0/24"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - create_gateway_table is not changed
+
+ - name: Ensure gateway doesn't disassociate when not passed in
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ routes:
+ - dest: "10.228.228.0/24"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+
+ - assert:
+ that:
+ - create_gateway_table is not changed
+ - create_gateway_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_gateway_table.route_table.tags"
+ - create_gateway_table.route_table.tags['Public'] == 'true'
+ - create_gateway_table.route_table.routes | length == 3
+ - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 }}"
+ - create_gateway_table.route_table.vpc_id == vpc.vpc.id
+ - create_gateway_table.route_table.propagating_vgws | length == 0
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Disassociate gateway when gateway_id is 'None' - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: None
+ routes:
+ - dest: "10.228.228.0/24"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - create_gateway_table is changed
+
+ - name: Disassociate gateway when gateway_id is 'None'
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: None
+ routes:
+ - dest: "10.228.228.0/24"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+
+ - assert:
+ that:
+ - create_gateway_table is changed
+ - create_gateway_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_gateway_table.route_table.tags"
+ - create_gateway_table.route_table.tags['Public'] == 'true'
+ - create_gateway_table.route_table.routes | length == 3
+ - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 0 }}"
+ - create_gateway_table.route_table.vpc_id == vpc.vpc.id
+ - create_gateway_table.route_table.propagating_vgws | length == 0
+
+ - name: Disassociate gateway when gateway_id is 'None' (idempotence) - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: None
+ routes:
+ - dest: "10.228.228.0/24"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - create_gateway_table is not changed
+
+ - name: Disassociate gateway when gateway_id is 'None' (idempotence)
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: None
+ routes:
+ - dest: "10.228.228.0/24"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+
+ - assert:
+ that:
+ - create_gateway_table is not changed
+ - create_gateway_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_gateway_table.route_table.tags"
+ - create_gateway_table.route_table.tags['Public'] == 'true'
+ - create_gateway_table.route_table.routes | length == 3
+ - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 0 }}"
+ - create_gateway_table.route_table.vpc_id == vpc.vpc.id
+ - create_gateway_table.route_table.propagating_vgws | length == 0
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Associate gateway with route table - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ purge_routes: no
+ register: create_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - create_gateway_table is changed
+
+ - name: Associate gateway with route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ purge_routes: no
+ register: create_gateway_table
+
+ - assert:
+ that:
+ - create_gateway_table is changed
+ - create_gateway_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_gateway_table.route_table.tags"
+ - create_gateway_table.route_table.tags['Public'] == 'true'
+ - create_gateway_table.route_table.routes | length == 3
+ - create_gateway_table.route_table.associations | length == 1
+ - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 }}"
+ - create_gateway_table.route_table.vpc_id == vpc.vpc.id
+ - create_gateway_table.route_table.propagating_vgws | length == 0
+
+ - name: Associate gateway with route table (idempotence) - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ purge_routes: no
+ register: create_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - create_gateway_table is not changed
+
+ - name: Associate gateway with route table (idempotence)
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vpc_igw.gateway_id }}"
+ purge_routes: no
+ register: create_gateway_table
+
+ - assert:
+ that:
+ - create_gateway_table is not changed
+ - create_gateway_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_gateway_table.route_table.tags"
+ - create_gateway_table.route_table.tags['Public'] == 'true'
+ - create_gateway_table.route_table.routes | length == 3
+ - create_gateway_table.route_table.associations | length == 1
+ - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 }}"
+ - create_gateway_table.route_table.vpc_id == vpc.vpc.id
+ - create_gateway_table.route_table.propagating_vgws | length == 0
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Disassociate gateway when gateway_id is '' - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: ''
+ routes:
+ - dest: "10.228.228.0/24"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - create_gateway_table is changed
+
+ - name: Disassociate gateway when gateway_id is ''
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: ''
+ routes:
+ - dest: "10.228.228.0/24"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+
+ - assert:
+ that:
+ - create_gateway_table is changed
+ - create_gateway_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_gateway_table.route_table.tags"
+ - create_gateway_table.route_table.tags['Public'] == 'true'
+ - create_gateway_table.route_table.routes | length == 3
+ - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 0 }}"
+ - create_gateway_table.route_table.vpc_id == vpc.vpc.id
+ - create_gateway_table.route_table.propagating_vgws | length == 0
+
+ - name: Disassociate gateway when gateway_id is '' (idempotence) - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: ''
+ routes:
+ - dest: "10.228.228.0/24"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - create_gateway_table is not changed
+
+ - name: Disassociate gateway when gateway_id is '' (idempotence)
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: ''
+ routes:
+ - dest: "10.228.228.0/24"
+ network_interface_id: "{{ eni.interface.id }}"
+ purge_routes: no
+ register: create_gateway_table
+
+ - assert:
+ that:
+ - create_gateway_table is not changed
+ - create_gateway_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_gateway_table.route_table.tags"
+ - create_gateway_table.route_table.tags['Public'] == 'true'
+ - create_gateway_table.route_table.routes | length == 3
+ - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 0 }}"
+ - create_gateway_table.route_table.vpc_id == vpc.vpc.id
+ - create_gateway_table.route_table.propagating_vgws | length == 0
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Create vgw for gateway route table
+ ec2_vpc_vgw:
+ state: present
+ vpc_id: "{{ vpc.vpc.id }}"
+ type: ipsec.1
+ name: '{{ resource_prefix }}_vpc'
+ register: vgw
+
+ - name: Associate vgw with route table - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vgw.vgw.id }}"
+ purge_routes: no
+ register: create_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - create_gateway_table is changed
+
+ - name: Associate vgw with route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vgw.vgw.id }}"
+ purge_routes: no
+ register: create_gateway_table
+
+ - assert:
+ that:
+ - create_gateway_table is changed
+ - create_gateway_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_gateway_table.route_table.tags"
+ - create_gateway_table.route_table.tags['Public'] == 'true'
+ - create_gateway_table.route_table.routes | length == 3
+ - create_gateway_table.route_table.associations | length == 2
+ - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 }}"
+ - create_gateway_table.route_table.vpc_id == vpc.vpc.id
+ - create_gateway_table.route_table.propagating_vgws | length == 0
+
+ - name: Associate vgw with route table (idempotence) - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vgw.vgw.id }}"
+ purge_routes: no
+ register: create_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - create_gateway_table is not changed
+
+ - name: Associate vgw with route table (idempotence)
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ gateway_id: "{{ vgw.vgw.id }}"
+ purge_routes: no
+ register: create_gateway_table
+
+ - assert:
+ that:
+ - create_gateway_table is not changed
+ - create_gateway_table.route_table.id.startswith('rtb-')
+ - "'Public' in create_gateway_table.route_table.tags"
+ - create_gateway_table.route_table.tags['Public'] == 'true'
+ - create_gateway_table.route_table.routes | length == 3
+ - create_gateway_table.route_table.associations | length == 2
+ - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 }}"
+ - create_gateway_table.route_table.vpc_id == vpc.vpc.id
+ - create_gateway_table.route_table.propagating_vgws | length == 0
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Get route table info
+ ec2_vpc_route_table_info:
+ filters:
+ route-table-id: "{{ create_gateway_table.route_table.id }}"
+ register: rt_info
+
+ - name: Assert route table exists prior to deletion
+ assert:
+ that:
+ - rt_info.route_tables | length == 1
+
+ - name: Delete gateway route table - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ state: absent
+ register: delete_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - delete_gateway_table is changed
+
+ - name: Delete gateway route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ state: absent
+ register: delete_gateway_table
+
+ - name: Get route table info
+ ec2_vpc_route_table_info:
+ filters:
+ route-table-id: "{{ create_gateway_table.route_table.id }}"
+ register: rt_info
+
+ - name: Assert route table was deleted
+ assert:
+ that:
+ - delete_gateway_table is changed
+ - rt_info.route_tables | length == 0
+
+ - name: Delete gateway route table (idempotence) - check_mode
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ state: absent
+ register: delete_gateway_table
+ check_mode: yes
+
+ - assert:
+ that:
+ - delete_gateway_table is not changed
+
+ - name: Delete gateway route table (idempotence)
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc.vpc.id }}"
+ tags:
+ Public: 'true'
+ Name: Gateway route table
+ state: absent
+ register: delete_gateway_table
+
+ - name: Get route table info
+ ec2_vpc_route_table_info:
+ filters:
+ route-table-id: "{{ create_gateway_table.route_table.id }}"
+ register: rt_info
+
+ - name: Assert route table was deleted
+ assert:
+ that:
+ - delete_gateway_table is not changed
+ - rt_info.route_tables | length == 0
+
+ always:
+ #############################################################################
+ # TEAR DOWN STARTS HERE
+ #############################################################################
+ - name: remove the VPC endpoint
+ ec2_vpc_endpoint:
+ state: absent
+ vpc_endpoint_id: '{{ vpc_endpoint.result.vpc_endpoint_id }}'
+ when: vpc_endpoint is defined
+ ignore_errors: yes
+ - name: destroy route tables
+ ec2_vpc_route_table:
+ route_table_id: '{{ item.route_table.id }}'
+ lookup: id
+ state: absent
+ with_items:
+ - '{{ create_public_table | default() }}'
+ - '{{ create_private_table | default() }}'
+ - '{{ create_gateway_table | default() }}'
+ when: item and not item.failed
+ ignore_errors: yes
+ - name: destroy NAT GW
+ ec2_vpc_nat_gateway:
+ state: absent
+ wait: yes
+ release_eip: yes
+ subnet_id: '{{ subnets.results[0].subnet.id }}'
+ nat_gateway_id: '{{ nat_gateway.nat_gateway_id }}'
+ ignore_errors: yes
+ - name: destroy IGW
+ ec2_vpc_igw:
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: absent
+ ignore_errors: yes
+ - name: destroy VGW
+ ec2_vpc_vgw:
+ state: absent
+ type: ipsec.1
+ name: '{{ resource_prefix }}_vpc'
+ vpc_id: "{{ vpc.vpc.id }}"
+ ignore_errors: yes
+ - name: destroy ENI
+ ec2_eni:
+ state: absent
+ eni_id: '{{ eni.interface.id }}'
+ ignore_errors: yes
+ - name: destroy subnets
+ ec2_vpc_subnet:
+ cidr: '{{ item.cidr }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ state: absent
+ with_items:
+ - cidr: 10.228.224.0/24
+ - cidr: 10.228.225.0/24
+ - cidr: 10.228.226.0/24
+ - cidr: 10.228.227.0/24
+ - cidr: 10.228.228.0/24
+ - cidr: 10.228.229.0/24
+ - cidr: 10.228.230.0/24
+ ignore_errors: yes
+ - name: destroy VPC
+ ec2_vpc_net:
+ cidr_block: 10.228.224.0/21
+ name: '{{ resource_prefix }}_vpc'
+ state: absent
+ ignore_errors: yes \ No newline at end of file
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/aliases
new file mode 100644
index 000000000..5dcc9055d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+ec2_vpc_subnet_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/defaults/main.yml
new file mode 100644
index 000000000..75ff93f1b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+availability_zone: '{{ ec2_availability_zone_names[0] }}'
+
+# defaults file for ec2_vpc_subnet
+ec2_vpc_subnet_name: '{{resource_prefix}}'
+ec2_vpc_subnet_description: 'Created by ansible integration tests'
+vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16'
+subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24'
+subnet_cidr_b: '10.{{ 256 | random(seed=resource_prefix) }}.2.0/24'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/meta/main.yml
new file mode 100644
index 000000000..1471b11f6
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_ec2_facts
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/tasks/main.yml
new file mode 100644
index 000000000..fd367f0c3
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/tasks/main.yml
@@ -0,0 +1,683 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ # ============================================================
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ ipv6_cidr: True
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ register: vpc_result
+
+ - set_fact:
+ vpc_ipv6_cidr: "{{ vpc_result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block }}"
+
+ - set_fact:
+ subnet_ipv6_cidr: "{{ vpc_ipv6_cidr | regex_replace('::/.*', '::/64') }}"
+
+ # ============================================================
+ - name: check subnet does not exist
+ ec2_vpc_subnet_info:
+ filters:
+ "tag:Name": '{{ec2_vpc_subnet_name}}'
+ register: vpc_subnet_info
+
+ - name: Assert info result is zero
+ assert:
+ that:
+ - (vpc_subnet_info.subnets|length) == 0
+
+ - name: create subnet (expected changed=true) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ check_mode: true
+ register: vpc_subnet_create
+
+ - name: assert creation would happen
+ assert:
+ that:
+ - vpc_subnet_create is changed
+
+ - name: create subnet (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ register: vpc_subnet_create
+
+ - name: assert creation happened (expected changed=true)
+ assert:
+ that:
+ - 'vpc_subnet_create'
+ - 'vpc_subnet_create.subnet.id.startswith("subnet-")'
+ - '"Name" in vpc_subnet_create.subnet.tags and vpc_subnet_create.subnet.tags["Name"] == ec2_vpc_subnet_name'
+ - '"Description" in vpc_subnet_create.subnet.tags and vpc_subnet_create.subnet.tags["Description"] == ec2_vpc_subnet_description'
+
+ - name: get info about the subnet
+ ec2_vpc_subnet_info:
+ subnet_ids: '{{ vpc_subnet_create.subnet.id }}'
+ register: vpc_subnet_info
+
+ - name: Assert info result matches create result
+ assert:
+ that:
+ - 'vpc_subnet_info.subnets | length == 1'
+ - '"assign_ipv6_address_on_creation" in subnet_info'
+ - 'subnet_info.assign_ipv6_address_on_creation == False'
+ - '"availability_zone" in subnet_info'
+ - 'subnet_info.availability_zone == availability_zone'
+ - '"available_ip_address_count" in subnet_info'
+ - '"cidr_block" in subnet_info'
+ - 'subnet_info.cidr_block == subnet_cidr'
+ - '"default_for_az" in subnet_info'
+ - '"id" in subnet_info'
+ - 'subnet_info.id == vpc_subnet_create.subnet.id'
+ - '"map_public_ip_on_launch" in subnet_info'
+ - 'subnet_info.map_public_ip_on_launch == False'
+ - '"state" in subnet_info'
+ - '"subnet_id" in subnet_info'
+ - 'subnet_info.subnet_id == vpc_subnet_create.subnet.id'
+ - '"tags" in subnet_info'
+ - 'subnet_info.tags["Description"] == ec2_vpc_subnet_description'
+ - 'subnet_info.tags["Name"] == vpc_subnet_create.subnet.tags["Name"]'
+ - '"vpc_id" in subnet_info'
+ - 'subnet_info.vpc_id == vpc_result.vpc.id'
+ vars:
+ subnet_info: '{{ vpc_subnet_info.subnets[0] }}'
+
+ # ============================================================
+ - name: recreate subnet (expected changed=false) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ check_mode: true
+ register: vpc_subnet_recreate
+
+ - name: assert recreation changed nothing (expected changed=false)
+ assert:
+ that:
+ - vpc_subnet_recreate is not changed
+
+ - name: recreate subnet (expected changed=false)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ register: vpc_subnet_recreate
+
+ - name: assert recreation changed nothing (expected changed=false)
+ assert:
+ that:
+ - vpc_subnet_recreate is not changed
+ - 'vpc_subnet_recreate.subnet == vpc_subnet_create.subnet'
+
+ # ============================================================
+ - name: update subnet so instances launched in it are assigned an IP (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ map_public: true
+ check_mode: true
+ register: vpc_subnet_modify
+
+ - name: assert subnet changed
+ assert:
+ that:
+ - vpc_subnet_modify is changed
+
+ - name: update subnet so instances launched in it are assigned an IP
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ map_public: true
+ register: vpc_subnet_modify
+
+ - name: assert subnet changed
+ assert:
+ that:
+ - vpc_subnet_modify is changed
+ - vpc_subnet_modify.subnet.map_public_ip_on_launch
+
+ # ============================================================
+ - name: add invalid ipv6 block to subnet (expected failed)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: 2001:db8::/64
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ register: vpc_subnet_ipv6_failed
+ ignore_errors: yes
+
+ - name: assert failure happened (expected failed)
+ assert:
+ that:
+ - vpc_subnet_ipv6_failed is failed
+ - "'Couldn\\'t associate ipv6 cidr' in vpc_subnet_ipv6_failed.msg"
+
+ # ============================================================
+ - name: add a tag (expected changed=true) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ AnotherTag: SomeValue
+ state: present
+ check_mode: true
+ register: vpc_subnet_add_a_tag
+
+ - name: assert tag addition happened (expected changed=true)
+ assert:
+ that:
+ - vpc_subnet_add_a_tag is changed
+
+ - name: add a tag (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ AnotherTag: SomeValue
+ state: present
+ register: vpc_subnet_add_a_tag
+
+ - name: assert tag addition happened (expected changed=true)
+ assert:
+ that:
+ - vpc_subnet_add_a_tag is changed
+ - '"Name" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["Name"] == ec2_vpc_subnet_name'
+ - '"Description" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["Description"] == ec2_vpc_subnet_description'
+ - '"AnotherTag" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["AnotherTag"] == "SomeValue"'
+
+ - name: Get info by tag
+ ec2_vpc_subnet_info:
+ filters:
+ "tag:Name": '{{ec2_vpc_subnet_name}}'
+ register: vpc_subnet_info_by_tag
+
+ - name: assert info matches expected output
+ assert:
+ that:
+ - 'vpc_subnet_info_by_tag.subnets[0].id == vpc_subnet_add_a_tag.subnet.id'
+ - (vpc_subnet_info_by_tag.subnets[0].tags|length) == 3
+ - '"Description" in vpc_subnet_info_by_tag.subnets[0].tags and vpc_subnet_info_by_tag.subnets[0].tags["Description"] == ec2_vpc_subnet_description'
+ - '"AnotherTag" in vpc_subnet_info_by_tag.subnets[0].tags and vpc_subnet_info_by_tag.subnets[0].tags["AnotherTag"] == "SomeValue"'
+
+ # ============================================================
+ - name: remove tags with default purge_tags=true (expected changed=true) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ AnotherTag: SomeValue
+ state: present
+ check_mode: true
+ register: vpc_subnet_remove_tags
+
+ - name: assert tag removal happened (expected changed=true)
+ assert:
+ that:
+ - vpc_subnet_remove_tags is changed
+
+ - name: remove tags with default purge_tags=true (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ AnotherTag: SomeValue
+ state: present
+ register: vpc_subnet_remove_tags
+
+ - name: assert tag removal happened (expected changed=true)
+ assert:
+ that:
+ - vpc_subnet_remove_tags is changed
+ - '"Name" not in vpc_subnet_remove_tags.subnet.tags'
+ - '"Description" not in vpc_subnet_remove_tags.subnet.tags'
+ - '"AnotherTag" in vpc_subnet_remove_tags.subnet.tags and vpc_subnet_remove_tags.subnet.tags["AnotherTag"] == "SomeValue"'
+
+ - name: Check tags by info
+ ec2_vpc_subnet_info:
+ subnet_id: '{{ vpc_subnet_remove_tags.subnet.id }}'
+ register: vpc_subnet_info_removed_tags
+
+ - name: assert info matches expected output
+ assert:
+ that:
+ - '"Name" not in vpc_subnet_info_removed_tags.subnets[0].tags'
+ - '"Description" not in vpc_subnet_info_removed_tags.subnets[0].tags'
+ - '"AnotherTag" in vpc_subnet_info_removed_tags.subnets[0].tags and vpc_subnet_info_removed_tags.subnets[0].tags["AnotherTag"] == "SomeValue"'
+
+
+ # ============================================================
+ - name: change tags with purge_tags=false (expected changed=true) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ purge_tags: false
+ check_mode: true
+ register: vpc_subnet_change_tags
+
+ - name: assert tag addition happened (expected changed=true)
+ assert:
+ that:
+ - vpc_subnet_change_tags is changed
+
+ - name: change tags with purge_tags=false (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ purge_tags: false
+ register: vpc_subnet_change_tags
+
+ - name: assert tag addition happened (expected changed=true)
+ assert:
+ that:
+ - vpc_subnet_change_tags is changed
+ - '"Name" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["Name"] == ec2_vpc_subnet_name'
+ - '"Description" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["Description"] == ec2_vpc_subnet_description'
+ - '"AnotherTag" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["AnotherTag"] == "SomeValue"'
+
+ # ============================================================
+ - name: test state=absent (expected changed=true) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ check_mode: true
+ register: result
+
+ - name: assert state=absent (expected changed=true)
+ assert:
+ that:
+ - result is changed
+
+ - name: test state=absent (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ register: result
+
+ - name: assert state=absent (expected changed=true)
+ assert:
+ that:
+ - result is changed
+
+ # ============================================================
+ - name: test state=absent (expected changed=false) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ check_mode: true
+ register: result
+
+ - name: assert state=absent (expected changed=false)
+ assert:
+ that:
+ - result is not changed
+
+ - name: test state=absent (expected changed=false)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ register: result
+
+ - name: assert state=absent (expected changed=false)
+ assert:
+ that:
+ - result is not changed
+
+ # ============================================================
+ - name: create subnet without AZ (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: present
+ check_mode: true
+ register: subnet_without_az
+
+ - name: check that subnet without AZ works fine
+ assert:
+ that:
+ - subnet_without_az is changed
+
+ - name: create subnet without AZ
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: present
+ register: subnet_without_az
+
+ - name: check that subnet without AZ works fine
+ assert:
+ that:
+ - subnet_without_az is changed
+
+ # ============================================================
+ - name: remove subnet without AZ (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ check_mode: true
+ register: result
+
+ - name: assert state=absent (expected changed=true)
+ assert:
+ that:
+ - result is changed
+
+ - name: remove subnet without AZ
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ register: result
+
+ - name: assert state=absent (expected changed=true)
+ assert:
+ that:
+ - result is changed
+
+
+ # ============================================================
+ - name: create subnet with IPv6 (expected changed=true) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: "{{ subnet_ipv6_cidr }}"
+ assign_instances_ipv6: true
+ state: present
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ check_mode: true
+ register: vpc_subnet_ipv6_create
+
+ - name: assert creation with IPv6 happened (expected changed=true)
+ assert:
+ that:
+ - vpc_subnet_ipv6_create is changed
+
+ - name: create subnet with IPv6 (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: "{{ subnet_ipv6_cidr }}"
+ assign_instances_ipv6: true
+ state: present
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ register: vpc_subnet_ipv6_create
+
+ - name: assert creation with IPv6 happened (expected changed=true)
+ assert:
+ that:
+ - vpc_subnet_ipv6_create is changed
+ - 'vpc_subnet_ipv6_create.subnet.id.startswith("subnet-")'
+ - "vpc_subnet_ipv6_create.subnet.ipv6_cidr_block == subnet_ipv6_cidr"
+ - '"Name" in vpc_subnet_ipv6_create.subnet.tags and vpc_subnet_ipv6_create.subnet.tags["Name"] == ec2_vpc_subnet_name'
+ - '"Description" in vpc_subnet_ipv6_create.subnet.tags and vpc_subnet_ipv6_create.subnet.tags["Description"] == ec2_vpc_subnet_description'
+ - 'vpc_subnet_ipv6_create.subnet.assign_ipv6_address_on_creation'
+
+ # ============================================================
+ - name: recreate subnet (expected changed=false) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: "{{ subnet_ipv6_cidr }}"
+ assign_instances_ipv6: true
+ state: present
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ check_mode: true
+ register: vpc_subnet_ipv6_recreate
+
+ - name: assert recreation changed nothing (expected changed=false)
+ assert:
+ that:
+ - vpc_subnet_ipv6_recreate is not changed
+
+ - name: recreate subnet (expected changed=false)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: "{{ subnet_ipv6_cidr }}"
+ assign_instances_ipv6: true
+ state: present
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ register: vpc_subnet_ipv6_recreate
+
+ - name: assert recreation changed nothing (expected changed=false)
+ assert:
+ that:
+ - vpc_subnet_ipv6_recreate is not changed
+ - 'vpc_subnet_ipv6_recreate.subnet == vpc_subnet_ipv6_create.subnet'
+
+ # ============================================================
+ - name: change subnet assign_instances_ipv6 attribute (expected changed=true) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: "{{ subnet_ipv6_cidr }}"
+ assign_instances_ipv6: false
+ state: present
+ purge_tags: false
+ check_mode: true
+ register: vpc_change_attribute
+
+ - name: assert assign_instances_ipv6 attribute changed (expected changed=true)
+ assert:
+ that:
+ - vpc_change_attribute is changed
+
+ - name: change subnet assign_instances_ipv6 attribute (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: "{{ subnet_ipv6_cidr }}"
+ assign_instances_ipv6: false
+ state: present
+ purge_tags: false
+ register: vpc_change_attribute
+
+ - name: assert assign_instances_ipv6 attribute changed (expected changed=true)
+ assert:
+ that:
+ - vpc_change_attribute is changed
+ - 'not vpc_change_attribute.subnet.assign_ipv6_address_on_creation'
+
+ # ============================================================
+ - name: add second subnet with duplicate ipv6 cidr (expected failure)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr_b }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: "{{ subnet_ipv6_cidr }}"
+ state: present
+ purge_tags: false
+ register: vpc_add_duplicate_ipv6
+ ignore_errors: true
+
+ - name: assert graceful failure (expected failed)
+ assert:
+ that:
+ - vpc_add_duplicate_ipv6 is failed
+ - "'The IPv6 CIDR \\'{{ subnet_ipv6_cidr }}\\' conflicts with another subnet' in vpc_add_duplicate_ipv6.msg"
+
+ # ============================================================
+ - name: remove subnet ipv6 cidr (expected changed=true) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: present
+ purge_tags: false
+ check_mode: true
+ register: vpc_remove_ipv6_cidr
+
+ - name: assert subnet ipv6 cidr removed (expected changed=true)
+ assert:
+ that:
+ - vpc_remove_ipv6_cidr is changed
+
+ - name: remove subnet ipv6 cidr (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: present
+ purge_tags: false
+ register: vpc_remove_ipv6_cidr
+
+ - name: assert subnet ipv6 cidr removed (expected changed=true)
+ assert:
+ that:
+ - vpc_remove_ipv6_cidr is changed
+ - "vpc_remove_ipv6_cidr.subnet.ipv6_cidr_block == ''"
+ - 'not vpc_remove_ipv6_cidr.subnet.assign_ipv6_address_on_creation'
+
+ # ============================================================
+ - name: test adding a tag that looks like a boolean to the subnet (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: present
+ purge_tags: false
+ tags:
+ looks_like_boolean: true
+ check_mode: true
+ register: vpc_subnet_info
+
+ - name: assert a tag was added
+ assert:
+ that:
+ - vpc_subnet_info is changed
+
+ - name: test adding a tag that looks like a boolean to the subnet
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: present
+ purge_tags: false
+ tags:
+ looks_like_boolean: true
+ register: vpc_subnet_info
+
+ - name: assert a tag was added
+ assert:
+ that:
+ - vpc_subnet_info is changed
+ - 'vpc_subnet_info.subnet.tags.looks_like_boolean == "True"'
+
+ # ============================================================
+ - name: test idempotence adding a tag that looks like a boolean (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: present
+ purge_tags: false
+ tags:
+ looks_like_boolean: true
+ check_mode: true
+ register: vpc_subnet_info
+
+ - name: assert tags haven't changed
+ assert:
+ that:
+ - vpc_subnet_info is not changed
+
+ - name: test idempotence adding a tag that looks like a boolean
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: present
+ purge_tags: false
+ tags:
+ looks_like_boolean: true
+ register: vpc_subnet_info
+
+ - name: assert tags haven't changed
+ assert:
+ that:
+ - vpc_subnet_info is not changed
+
+ always:
+
+ ################################################
+ # TEARDOWN STARTS HERE
+ ################################################
+
+ - name: tidy up subnet
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+
+ - name: tidy up VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: absent
+ cidr_block: "{{ vpc_cidr }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/aliases b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/aliases
new file mode 100644
index 000000000..948352f20
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+slow
+elb_application_lb_info \ No newline at end of file
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml
new file mode 100644
index 000000000..719851924
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml
@@ -0,0 +1,28 @@
+# defaults file for elb_application_lb
+
+resource_short: "{{ '%0.8x'%((16**8) | random(seed=resource_prefix)) }}"
+alb_name: alb-test-{{ resource_short }}
+alb_2_name: alb-test-2-{{ resource_short }}
+tg_name: alb-test-{{ resource_short }}
+tg_2_name: alb-test-2-{{ resource_short }}
+vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16
+private_subnet_cidr_1: 10.{{ 256 | random(seed=resource_prefix) }}.1.0/24
+private_subnet_cidr_2: 10.{{ 256 | random(seed=resource_prefix) }}.2.0/24
+public_subnet_cidr_1: 10.{{ 256 | random(seed=resource_prefix) }}.3.0/24
+public_subnet_cidr_2: 10.{{ 256 | random(seed=resource_prefix) }}.4.0/24
+s3_bucket_name: alb-test-{{ resource_short }}
+
+# Amazon's SDKs don't provide the list of account ID's. Amazon only provide a
+# web page. If you want to run the tests outside the US regions you'll need to
+# update this.
+# https://docs.aws.amazon.com/elasticloadbalancing/latest/application/enable-access-logging.html
+elb_access_log_account_id_map:
+ us-east-1: "127311923021"
+ us-east-2: "033677994240"
+ us-west-1: "027434742980"
+ us-west-2: "797873946194"
+ us-gov-east-1: "190560391635"
+ us-gov-west-1: "048591011584"
+
+
+elb_account_id: '{{ elb_access_log_account_id_map[aws_region] }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml
new file mode 100644
index 000000000..2a0cab761
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml
@@ -0,0 +1,1558 @@
+- name: elb_application_lb integration tests
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ - name: Create a test VPC
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ name: '{{ resource_prefix }}_vpc'
+ state: present
+ ipv6_cidr: true
+ tags:
+ Name: elb_application_lb testing
+ ResourcePrefix: '{{ resource_prefix }}'
+ register: vpc
+ - name: 'Set fact: VPC ID'
+ set_fact:
+ vpc_id: '{{ vpc.vpc.id }}'
+ - name: Get VPC's default security group
+ ec2_group_info:
+ filters:
+ vpc-id: '{{ vpc_id }}'
+ register: default_sg
+ - name: Create an internet gateway
+ ec2_vpc_igw:
+ vpc_id: '{{ vpc_id }}'
+ state: present
+ tags:
+ Name: '{{ resource_prefix }}'
+ register: igw
+ - name: Create private subnets
+ ec2_vpc_subnet:
+ cidr: '{{ item.cidr }}'
+ az: '{{ aws_region }}{{ item.az }}'
+ vpc_id: '{{ vpc_id }}'
+ state: present
+ tags:
+ Public: 'False'
+ Name: private-{{ item.az }}
+ with_items:
+ - cidr: '{{ private_subnet_cidr_1 }}'
+ az: a
+ - cidr: '{{ private_subnet_cidr_2 }}'
+ az: b
+ register: private_subnets
+ - name: Create public subnets with ipv6
+ ec2_vpc_subnet:
+ cidr: '{{ item.cidr }}'
+ az: '{{ aws_region }}{{ item.az }}'
+ vpc_id: '{{ vpc_id }}'
+ state: present
+ ipv6_cidr: '{{ item.vpc_ipv6_cidr }}'
+ tags:
+ Public: 'True'
+ Name: public-{{ item.az }}
+ with_items:
+ - cidr: '{{ public_subnet_cidr_1 }}'
+ az: a
+ vpc_ipv6_cidr: "{{ vpc.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block\
+ \ | replace('0::/56','0::/64') }}"
+ - cidr: '{{ public_subnet_cidr_2 }}'
+ az: b
+ vpc_ipv6_cidr: "{{ vpc.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block\
+ \ | replace('0::/56','1::/64') }}"
+ register: public_subnets
+ - name: Create list of subnet ids
+ set_fact:
+ public_subnets: "{{ public_subnets.results | map(attribute='subnet') | map(attribute='id')\
+ \ }}"
+ private_subnets: "{{ private_subnets.results | map(attribute='subnet') | map(attribute='id')\
+ \ }}"
+ - name: Create a route table
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc_id }}'
+ tags:
+ Name: igw-route
+ Created: '{{ resource_prefix }}'
+ subnets: '{{ public_subnets + private_subnets }}'
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: '{{ igw.gateway_id }}'
+ register: route_table
+ - name: Create a security group for Ansible ALB integration tests
+ ec2_group:
+ name: '{{ resource_prefix }}'
+ description: security group for Ansible ALB integration tests
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ rules:
+ - proto: tcp
+ from_port: 1
+ to_port: 65535
+ cidr_ip: 0.0.0.0/0
+ register: sec_group
+ - name: Create another security group for Ansible ALB integration tests
+ ec2_group:
+ name: '{{ resource_prefix }}-2'
+ description: security group for Ansible ALB integration tests
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ rules:
+ - proto: tcp
+ from_port: 1
+ to_port: 65535
+ cidr_ip: 0.0.0.0/0
+ register: sec_group2
+ - name: Create a target group for testing
+ elb_target_group:
+ name: '{{ tg_name }}'
+ protocol: http
+ port: 80
+ vpc_id: '{{ vpc_id }}'
+ state: present
+ register: tg
+ - name: Create a second target group for testing
+ community.aws.elb_target_group:
+ name: '{{ tg_2_name }}'
+ protocol: http
+ port: 80
+ vpc_id: '{{ vpc_id }}'
+ state: present
+ register: tg_2
+ - name: Get ARN of calling user
+ amazon.aws.aws_caller_info:
+ register: aws_caller_info
+ - name: Register account id
+ ansible.builtin.set_fact:
+ aws_account: "{{ aws_caller_info.account }}"
+ - name: Create S3 bucket for testing
+ amazon.aws.s3_bucket:
+ name: "{{ s3_bucket_name }}"
+ state: present
+ encryption: "aws:kms"
+ policy: "{{ lookup('template', 'policy.json') }}"
+
+ - name: Create an ALB (invalid - SslPolicy is required when Protocol == HTTPS)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTPS
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ ignore_errors: yes
+ register: alb
+ - assert:
+ that:
+ - alb is failed
+ - alb.msg is match("'SslPolicy' is a required listener dict key when Protocol
+ = HTTPS")
+
+ - name: Create an ALB (invalid - didn't provide required listener options)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Port: 80
+ ignore_errors: yes
+ register: alb
+ - assert:
+ that:
+ - alb is failed
+ - alb.msg is match("missing required arguments:\ DefaultActions, Protocol found
+ in listeners")
+
+ - name: Create an ALB (invalid - invalid listener option type)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: bad type
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ ignore_errors: yes
+ register: alb
+ - assert:
+ that:
+ - alb is failed
+ - "'unable to convert to int' in alb.msg"
+
+ - name: Create an ALB (invalid - invalid ip address type)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ ip_address_type: ip_addr_v4_v6
+ ignore_errors: yes
+ register: alb
+ - assert:
+ that:
+ - alb is failed
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Create an ALB with defaults - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: []
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is changed
+ - alb.msg is match('Would have created ALB if not in check mode.')
+
+ - name: Create an ALB with defaults
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: []
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ register: alb
+ - assert:
+ that:
+ - alb is changed
+ - alb.listeners[0].rules | length == 1
+ - alb.security_groups | length == 1
+ - alb.security_groups[0] == default_sg.security_groups[0].group_id
+
+ - name: Create an ALB with defaults (idempotence) - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: []
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is not changed
+ - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.')
+
+ - name: Create an ALB with defaults (idempotence)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: []
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ register: alb
+ - assert:
+ that:
+ - alb is not changed
+ - alb.listeners[0].rules | length == 1
+ - alb.security_groups[0] == default_sg.security_groups[0].group_id
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Create an ALB with attributes - check_mode
+ amazon.aws.elb_application_lb:
+ name: '{{ alb_2_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_2_name }}'
+ access_logs_enabled: true
+ access_logs_s3_bucket: "{{ s3_bucket_name }}"
+ access_logs_s3_prefix: "alb-logs"
+ ip_address_type: dualstack
+ http2: false
+ http_desync_mitigation_mode: monitor
+ http_drop_invalid_header_fields: true
+ http_x_amzn_tls_version_and_cipher_suite: true
+ http_xff_client_port: true
+ waf_fail_open: true
+ register: alb_2
+ check_mode: true
+
+ - name: Verify check mode response
+ ansible.builtin.assert:
+ that:
+ - alb_2 is changed
+ - alb_2.msg is match('Would have created ALB if not in check mode.')
+
+ - name: Create an ALB with attributes
+ amazon.aws.elb_application_lb:
+ name: '{{ alb_2_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_2_name }}'
+ access_logs_enabled: true
+ access_logs_s3_bucket: "{{ s3_bucket_name }}"
+ access_logs_s3_prefix: "alb-logs"
+ http2: false
+ http_desync_mitigation_mode: monitor
+ http_drop_invalid_header_fields: true
+ http_x_amzn_tls_version_and_cipher_suite: true
+ http_xff_client_port: true
+ idle_timeout: 120
+ ip_address_type: dualstack
+ waf_fail_open: true
+ register: alb_2
+
+ - name: Verify ALB was created with correct attributes
+ ansible.builtin.assert:
+ that:
+ - alb_2 is changed
+ - alb_2.listeners[0].rules | length == 1
+ - alb_2.security_groups | length == 1
+ - alb_2.security_groups[0] == sec_group.group_id
+ - alb_2.ip_address_type == 'dualstack'
+ - alb_2.access_logs_s3_enabled | bool
+ - alb_2.access_logs_s3_bucket == "{{ s3_bucket_name }}"
+ - alb_2.access_logs_s3_prefix == "alb-logs"
+ - not alb_2.routing_http2_enabled | bool
+ - alb_2.routing_http_desync_mitigation_mode == 'monitor'
+ - alb_2.routing_http_drop_invalid_header_fields_enabled | bool
+ - alb_2.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool
+ - alb_2.routing_http_xff_client_port_enabled | bool
+ - alb_2.idle_timeout_timeout_seconds == "120"
+ - alb_2.waf_fail_open_enabled | bool
+
+ - name: Create an ALB with attributes (idempotence) - check_mode
+ amazon.aws.elb_application_lb:
+ name: '{{ alb_2_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_2_name }}'
+ access_logs_enabled: true
+ access_logs_s3_bucket: "{{ s3_bucket_name }}"
+ access_logs_s3_prefix: "alb-logs"
+ ip_address_type: dualstack
+ http2: false
+ http_desync_mitigation_mode: monitor
+ http_drop_invalid_header_fields: true
+ http_x_amzn_tls_version_and_cipher_suite: true
+ http_xff_client_port: true
+ waf_fail_open: true
+ register: alb_2
+ check_mode: true
+
+ - name: Verify idempotence check mode response
+ ansible.builtin.assert:
+ that:
+ - alb_2 is not changed
+ - alb_2.msg is match('IN CHECK MODE - no changes to make to ALB specified.')
+
+ - name: Create an ALB with attributes (idempotence)
+ amazon.aws.elb_application_lb:
+ name: '{{ alb_2_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_2_name }}'
+ access_logs_enabled: true
+ access_logs_s3_bucket: "{{ s3_bucket_name }}"
+ access_logs_s3_prefix: "alb-logs"
+ ip_address_type: dualstack
+ http2: false
+ http_desync_mitigation_mode: monitor
+ http_drop_invalid_header_fields: true
+ http_x_amzn_tls_version_and_cipher_suite: true
+ http_xff_client_port: true
+ waf_fail_open: true
+ register: alb_2
+
+ - name: Verify ALB was not changed
+ ansible.builtin.assert:
+ that:
+ - alb_2 is not changed
+ - alb_2.listeners[0].rules | length == 1
+ - alb_2.security_groups | length == 1
+ - alb_2.security_groups[0] == sec_group.group_id
+ - alb_2.ip_address_type == 'dualstack'
+ - alb_2.access_logs_s3_enabled | bool
+ - alb_2.access_logs_s3_bucket == "{{ s3_bucket_name }}"
+ - alb_2.access_logs_s3_prefix == "alb-logs"
+ - not alb_2.routing_http2_enabled | bool
+ - alb_2.routing_http_desync_mitigation_mode == 'monitor'
+ - alb_2.routing_http_drop_invalid_header_fields_enabled | bool
+ - alb_2.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool
+ - alb_2.routing_http_xff_client_port_enabled | bool
+ - alb_2.idle_timeout_timeout_seconds == "120"
+ - alb_2.waf_fail_open_enabled | bool
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Update an ALB with ip address type - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ ip_address_type: dualstack
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is changed
+ - alb.msg is match('Would have updated ALB if not in check mode.')
+
+ - name: Update an ALB with ip address type
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ ip_address_type: dualstack
+ register: alb
+ - assert:
+ that:
+ - alb is changed
+ - alb.ip_address_type == 'dualstack'
+ - alb.listeners[0].rules | length == 1
+ - alb.routing_http2_enabled | bool
+ - alb.routing_http_desync_mitigation_mode == 'defensive'
+ - not alb.routing_http_drop_invalid_header_fields_enabled | bool
+ - not alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool
+ - not alb.routing_http_xff_client_port_enabled | bool
+ - not alb.waf_fail_open_enabled | bool
+
+ - name: Create an ALB with ip address type (idempotence) - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ ip_address_type: dualstack
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is not changed
+ - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.')
+
+ - name: Create an ALB with ip address type (idempotence)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ ip_address_type: dualstack
+ register: alb
+ - assert:
+ that:
+ - alb is not changed
+ - alb.ip_address_type == 'dualstack'
+ - alb.routing_http2_enabled | bool
+ - alb.routing_http_desync_mitigation_mode == 'defensive'
+ - not alb.routing_http_drop_invalid_header_fields_enabled | bool
+ - not alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool
+ - not alb.routing_http_xff_client_port_enabled | bool
+ - not alb.waf_fail_open_enabled | bool
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Update an ALB with different attributes - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ ip_address_type: dualstack
+ http2: no
+ http_desync_mitigation_mode: monitor
+ http_drop_invalid_header_fields: yes
+ http_x_amzn_tls_version_and_cipher_suite: yes
+ http_xff_client_port: yes
+ waf_fail_open: yes
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is changed
+ - alb.msg is match('Would have updated ALB if not in check mode.')
+
+ - name: Update an ALB with different attributes
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ ip_address_type: dualstack
+ http2: no
+ http_desync_mitigation_mode: monitor
+ http_drop_invalid_header_fields: yes
+ http_x_amzn_tls_version_and_cipher_suite: yes
+ http_xff_client_port: yes
+ waf_fail_open: yes
+ register: alb
+ - assert:
+ that:
+ - alb is changed
+ - alb.ip_address_type == 'dualstack'
+ - not alb.routing_http2_enabled | bool
+ - alb.routing_http_desync_mitigation_mode == 'monitor'
+ - alb.routing_http_drop_invalid_header_fields_enabled | bool
+ - alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool
+ - alb.routing_http_xff_client_port_enabled | bool
+ - alb.waf_fail_open_enabled | bool
+
+ - name: Update an ALB with different attributes (idempotence) - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ ip_address_type: dualstack
+ http2: no
+ http_desync_mitigation_mode: monitor
+ http_drop_invalid_header_fields: yes
+ http_x_amzn_tls_version_and_cipher_suite: yes
+ http_xff_client_port: yes
+ waf_fail_open: yes
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is not changed
+ - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.')
+
+ - name: Update an ALB with different attributes (idempotence)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ ip_address_type: dualstack
+ http2: no
+ http_desync_mitigation_mode: monitor
+ http_drop_invalid_header_fields: yes
+ http_x_amzn_tls_version_and_cipher_suite: yes
+ http_xff_client_port: yes
+ waf_fail_open: yes
+ register: alb
+ - assert:
+ that:
+ - alb is not changed
+ - alb.ip_address_type == 'dualstack'
+ - not alb.routing_http2_enabled | bool
+ - alb.routing_http_desync_mitigation_mode == 'monitor'
+ - alb.routing_http_drop_invalid_header_fields_enabled | bool
+ - alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool
+ - alb.routing_http_xff_client_port_enabled | bool
+ - alb.waf_fail_open_enabled | bool
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Update an ALB with different ip address type - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ ip_address_type: ipv4
+ http2: no
+ http_desync_mitigation_mode: monitor
+ http_drop_invalid_header_fields: yes
+ http_x_amzn_tls_version_and_cipher_suite: yes
+ http_xff_client_port: yes
+ waf_fail_open: yes
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is changed
+ - alb.msg is match('Would have updated ALB if not in check mode.')
+
+ - name: Update an ALB with different ip address type
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ ip_address_type: ipv4
+ http2: no
+ http_desync_mitigation_mode: monitor
+ http_drop_invalid_header_fields: yes
+ http_x_amzn_tls_version_and_cipher_suite: yes
+ http_xff_client_port: yes
+ waf_fail_open: yes
+ register: alb
+ - assert:
+ that:
+ - alb is changed
+ - alb.ip_address_type == 'ipv4'
+ - not alb.routing_http2_enabled | bool
+ - alb.routing_http_desync_mitigation_mode == 'monitor'
+ - alb.routing_http_drop_invalid_header_fields_enabled | bool
+ - alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool
+ - alb.routing_http_xff_client_port_enabled | bool
+ - alb.waf_fail_open_enabled | bool
+
+ - name: Update an ALB with different ip address type (idempotence) - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ ip_address_type: ipv4
+ http2: no
+ http_desync_mitigation_mode: monitor
+ http_drop_invalid_header_fields: yes
+ http_x_amzn_tls_version_and_cipher_suite: yes
+ http_xff_client_port: yes
+ waf_fail_open: yes
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is not changed
+ - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.')
+
+ - name: Update an ALB with different ip address type (idempotence)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ ip_address_type: ipv4
+ http2: no
+ http_desync_mitigation_mode: monitor
+ http_drop_invalid_header_fields: yes
+ http_x_amzn_tls_version_and_cipher_suite: yes
+ http_xff_client_port: yes
+ waf_fail_open: yes
+ register: alb
+ - assert:
+ that:
+ - alb is not changed
+ - alb.ip_address_type == 'ipv4'
+ - not alb.routing_http2_enabled | bool
+ - alb.routing_http_desync_mitigation_mode == 'monitor'
+ - alb.routing_http_drop_invalid_header_fields_enabled | bool
+ - alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool
+ - alb.routing_http_xff_client_port_enabled | bool
+ - alb.waf_fail_open_enabled | bool
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Update an ALB with different listener by adding rule - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ Rules:
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - /test
+ Priority: '1'
+ Actions:
+ - TargetGroupName: '{{ tg_name }}'
+ Type: forward
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is changed
+ - alb.msg is match('Would have updated ALB if not in check mode.')
+
+ - name: Update an ALB with different listener by adding rule
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ Rules:
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - /test
+ Priority: '1'
+ Actions:
+ - TargetGroupName: '{{ tg_name }}'
+ Type: forward
+ register: alb
+ - assert:
+ that:
+ - alb is changed
+ - alb.listeners[0].rules | length == 2
+ - "'1' in {{ alb.listeners[0].rules | map(attribute='priority') }}"
+
+ - name: Update an ALB with different listener by adding rule (idempotence) - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ Rules:
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - /test
+ Priority: '1'
+ Actions:
+ - TargetGroupName: '{{ tg_name }}'
+ Type: forward
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is not changed
+ - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.')
+
+ - name: Update an ALB with different listener by adding rule (idempotence)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ Rules:
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - /test
+ Priority: '1'
+ Actions:
+ - TargetGroupName: '{{ tg_name }}'
+ Type: forward
+ register: alb
+ - assert:
+ that:
+ - alb is not changed
+ - alb.listeners[0].rules | length == 2
+ - "'1' in {{ alb.listeners[0].rules | map(attribute='priority') }}"
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Update an ALB with different listener by modifying rule - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ Rules:
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - /test
+ Priority: '2'
+ Actions:
+ - TargetGroupName: '{{ tg_name }}'
+ Type: forward
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is changed
+ - alb.msg is match('Would have updated ALB if not in check mode.')
+
+ - name: Update an ALB with different listener by modifying rule
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ Rules:
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - /test
+ Priority: '2'
+ Actions:
+ - TargetGroupName: '{{ tg_name }}'
+ Type: forward
+ register: alb
+ - assert:
+ that:
+ - alb is changed
+ - alb.listeners[0].rules | length == 2
+ - "'2' in {{ alb.listeners[0].rules | map(attribute='priority') }}"
+
+ - name: Update an ALB with different listener by modifying rule (idempotence) -
+ check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ Rules:
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - /test
+ Priority: '2'
+ Actions:
+ - TargetGroupName: '{{ tg_name }}'
+ Type: forward
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is not changed
+ - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.')
+
+ - name: Update an ALB with different listener by modifying rule (idempotence)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ Rules:
+ - Conditions:
+ - Field: path-pattern
+ Values:
+ - /test
+ Priority: '2'
+ Actions:
+ - TargetGroupName: '{{ tg_name }}'
+ Type: forward
+ register: alb
+ - assert:
+ that:
+ - alb is not changed
+ - alb.listeners[0].rules | length == 2
+ - "'2' in {{ alb.listeners[0].rules | map(attribute='priority') }}"
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Update an ALB with different listener by deleting rule - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ Rules: []
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is changed
+ - alb.msg is match('Would have updated ALB if not in check mode.')
+
+ - name: Update an ALB with different listener by deleting rule
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ Rules: []
+ register: alb
+ - assert:
+ that:
+ - alb is changed
+ - alb.listeners[0].rules | length == 1
+ - "'2' not in {{ alb.listeners[0].rules | map(attribute='priority') }}"
+
+ - name: Update an ALB with different listener by deleting rule (idempotence) - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ Rules: []
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is not changed
+ - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.')
+
+ - name: Update an ALB with different listener by deleting rule (idempotence)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners:
+ - Protocol: HTTP
+ Port: 80
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: '{{ tg_name }}'
+ Rules: []
+ register: alb
+ - assert:
+ that:
+ - alb is not changed
+ - alb.listeners[0].rules | length == 1
+ - "'2' not in {{ alb.listeners[0].rules | map(attribute='priority') }}"
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Update an ALB by deleting listener - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners: []
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is changed
+ - alb.msg is match('Would have updated ALB if not in check mode.')
+
+ - name: Update an ALB by deleting listener
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners: []
+ register: alb
+ - assert:
+ that:
+ - alb is changed
+ - not alb.listeners
+
+ - name: Update an ALB by deleting listener (idempotence) - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners: []
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is not changed
+ - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.')
+
+ - name: Update an ALB by deleting listener (idempotence)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ listeners: []
+ register: alb
+ - assert:
+ that:
+ - alb is not changed
+ - not alb.listeners
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Update an ALB by adding tags - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ tags:
+ created_by: ALB test {{ resource_prefix }}
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is changed
+ - alb.msg is match('Would have updated ALB if not in check mode.')
+
+ - name: Update an ALB by adding tags
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ tags:
+ created_by: ALB test {{ resource_prefix }}
+ register: alb
+ - assert:
+ that:
+ - alb is changed
+ - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}"}'
+
+ - name: Update an ALB by adding tags (idempotence) - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ tags:
+ created_by: ALB test {{ resource_prefix }}
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is not changed
+ - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.')
+
+ - name: Update an ALB by adding tags (idempotence)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ tags:
+ created_by: ALB test {{ resource_prefix }}
+ register: alb
+ - assert:
+ that:
+ - alb is not changed
+ - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}"}'
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Update an ALB by modifying tags - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ tags:
+ created_by: ALB test {{ resource_prefix }}-2
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is changed
+ - alb.msg is match('Would have updated ALB if not in check mode.')
+
+ - name: Update an ALB by modifying tags
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ tags:
+ created_by: ALB test {{ resource_prefix }}-2
+ register: alb
+ - assert:
+ that:
+ - alb is changed
+ - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}-2"}'
+
+ - name: Update an ALB by modifying tags (idempotence) - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ tags:
+ created_by: ALB test {{ resource_prefix }}-2
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is not changed
+ - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.')
+
+ - name: Update an ALB by modifying tags (idempotence)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ tags:
+ created_by: ALB test {{ resource_prefix }}-2
+ register: alb
+ - assert:
+ that:
+ - alb is not changed
+ - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}-2"}'
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Update an ALB by removing tags - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ tags: {}
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is changed
+ - alb.msg is match('Would have updated ALB if not in check mode.')
+
+ - name: Update an ALB by removing tags
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ tags: {}
+ register: alb
+ - assert:
+ that:
+ - alb is changed
+ - not alb.tags
+
+ - name: Update an ALB by removing tags (idempotence) - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ tags: {}
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is not changed
+ - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.')
+
+ - name: Update an ALB by removing tags (idempotence)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group.group_id }}'
+ state: present
+ tags: {}
+ register: alb
+ - assert:
+ that:
+ - alb is not changed
+ - not alb.tags
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Update an ALB by changing security group - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group2.group_id }}'
+ state: present
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is changed
+ - alb.msg is match('Would have updated ALB if not in check mode.')
+
+ - name: Update an ALB by changing security group
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group2.group_id }}'
+ state: present
+ register: alb
+ - assert:
+ that:
+ - alb is changed
+ - alb.security_groups[0] == sec_group2.group_id
+
+ - name: Update an ALB by changing security group (idempotence) - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group2.group_id }}'
+ state: present
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is not changed
+ - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.')
+
+ - name: Update an ALB by changing security group (idempotence)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ subnets: '{{ public_subnets }}'
+ security_groups: '{{ sec_group2.group_id }}'
+ state: present
+ register: alb
+ - assert:
+ that:
+ - alb is not changed
+ - alb.security_groups[0] == sec_group2.group_id
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Ensure elb_application_lb_info supports check_mode
+ elb_application_lb_info:
+ register: alb_info
+ check_mode: yes
+ - assert:
+ that:
+ - alb_info.load_balancers | length > 0
+
+ - name: Get ALB application info using no args
+ elb_application_lb_info:
+ register: alb_info
+ - assert:
+ that:
+ - alb_info.load_balancers | length > 0
+
+ - name: Get ALB application info using load balancer arn
+ elb_application_lb_info:
+ load_balancer_arns:
+ - '{{ alb.load_balancer_arn }}'
+ register: alb_info
+ - assert:
+ that:
+ - alb_info.load_balancers[0].security_groups[0] == sec_group2.group_id
+
+ - name: Get ALB application info using load balancer name
+ elb_application_lb_info:
+ names:
+ - '{{ alb.load_balancer_name }}'
+ register: alb_info
+ - assert:
+ that:
+ - alb_info.load_balancers[0].security_groups[0] == sec_group2.group_id
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Delete an ALB - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ state: absent
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is changed
+ - alb.msg is match('Would have deleted ALB if not in check mode.')
+
+ - name: Delete an ALB
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ state: absent
+ register: alb
+ - assert:
+ that:
+ - alb is changed
+
+ - name: Delete an ALB (idempotence) - check_mode
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ state: absent
+ register: alb
+ check_mode: yes
+ - assert:
+ that:
+ - alb is not changed
+ - alb.msg is match('IN CHECK MODE - ALB already absent.')
+
+ - name: Delete an ALB (idempotence)
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ state: absent
+ register: alb
+ - assert:
+ that:
+ - alb is not changed
+
+ # ----- Cleanup ------------------------------------------------------------------------------
+
+ always:
+ - name: Destroy ALB
+ elb_application_lb:
+ name: '{{ alb_name }}'
+ state: absent
+ wait: true
+ wait_timeout: 600
+ ignore_errors: true
+ - name: Destroy ALB 2
+ amazon.aws.elb_application_lb:
+ name: '{{ alb_2_name }}'
+ state: absent
+ wait: true
+ wait_timeout: 600
+ ignore_errors: true
+ - name: Destroy target group if it was created
+ elb_target_group:
+ name: '{{ tg_name }}'
+ protocol: http
+ port: 80
+ vpc_id: '{{ vpc_id }}'
+ state: absent
+ wait: true
+ wait_timeout: 600
+ register: remove_tg
+ retries: 5
+ delay: 3
+ until: remove_tg is success
+ when: tg is defined
+ ignore_errors: true
+ - name: Destroy target group 2 if it was created
+ community.aws.elb_target_group:
+ name: '{{ tg_2_name }}'
+ protocol: http
+ port: 80
+ vpc_id: '{{ vpc_id }}'
+ state: absent
+ wait: true
+ wait_timeout: 600
+ register: remove_tg_2
+ retries: 5
+ delay: 3
+ until: remove_tg_2 is success
+ when: tg_2 is defined
+ ignore_errors: true
+ - name: Destroy sec groups
+ ec2_group:
+ name: '{{ item }}'
+ description: security group for Ansible ALB integration tests
+ state: absent
+ vpc_id: '{{ vpc_id }}'
+ register: remove_sg
+ retries: 10
+ delay: 5
+ until: remove_sg is success
+ ignore_errors: true
+ with_items:
+ - '{{ resource_prefix }}'
+ - '{{ resource_prefix }}-2'
+
+ - name: Destroy route table
+ ec2_vpc_route_table:
+ vpc_id: '{{ vpc_id }}'
+ route_table_id: '{{ route_table.route_table.route_table_id }}'
+ lookup: id
+ state: absent
+ register: remove_rt
+ retries: 10
+ delay: 5
+ until: remove_rt is success
+ ignore_errors: true
+ - name: Destroy subnets
+ ec2_vpc_subnet:
+ cidr: '{{ item }}'
+ vpc_id: '{{ vpc_id }}'
+ state: absent
+ register: remove_subnet
+ retries: 10
+ delay: 5
+ until: remove_subnet is success
+ with_items:
+ - '{{ private_subnet_cidr_1 }}'
+ - '{{ private_subnet_cidr_2 }}'
+ - '{{ public_subnet_cidr_1 }}'
+ - '{{ public_subnet_cidr_2 }}'
+ ignore_errors: true
+ - name: Destroy internet gateway
+ ec2_vpc_igw:
+ vpc_id: '{{ vpc_id }}'
+ tags:
+ Name: '{{ resource_prefix }}'
+ state: absent
+ register: remove_igw
+ retries: 10
+ delay: 5
+ until: remove_igw is success
+ ignore_errors: true
+ - name: Destroy VPC
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ name: '{{ resource_prefix }}_vpc'
+ state: absent
+ register: remove_vpc
+ retries: 10
+ delay: 5
+ until: remove_vpc is success
+ ignore_errors: true
+ - name: Destroy ELB acccess log test file
+ amazon.aws.s3_object:
+ bucket: "{{ s3_bucket_name }}"
+ mode: delobj
+ object: "alb-logs/AWSLogs/{{ aws_account }}/ELBAccessLogTestFile"
+ - name: Destroy S3 bucket
+ amazon.aws.s3_bucket:
+ name: "{{ s3_bucket_name }}"
+ state: absent
+ force: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/templates/policy.json b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/templates/policy.json
new file mode 100644
index 000000000..aa6ebf9b6
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/templates/policy.json
@@ -0,0 +1,13 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "arn:aws:iam::{{ elb_account_id}}:root"
+ },
+ "Action": "s3:PutObject",
+ "Resource": "arn:aws:s3:::{{ s3_bucket_name }}/alb-logs/AWSLogs/{{ aws_account }}/*"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/aliases b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/aliases
new file mode 100644
index 000000000..8e0974e45
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/aliases
@@ -0,0 +1,4 @@
+# 20+ minutes
+slow
+
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/defaults/main.yml
new file mode 100644
index 000000000..42339f0b8
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/defaults/main.yml
@@ -0,0 +1,170 @@
+---
+# defaults file for ec2_elb_lb
+elb_name: 'ansible-test-{{ tiny_prefix }}'
+
+vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16'
+subnet_cidr_1: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24'
+subnet_cidr_2: '10.{{ 256 | random(seed=resource_prefix) }}.2.0/24'
+subnet_cidr_3: '10.{{ 256 | random(seed=resource_prefix) }}.3.0/24'
+subnet_cidr_4: '10.{{ 256 | random(seed=resource_prefix) }}.4.0/24'
+
+default_tags:
+ snake_case_key: snake_case_value
+ camelCaseKey: camelCaseValue
+ PascalCaseKey: PascalCaseValue
+ "key with spaces": value with spaces
+ "Upper With Spaces": Upper With Spaces
+
+partial_tags:
+ snake_case_key: snake_case_value
+ camelCaseKey: camelCaseValue
+
+updated_tags:
+ updated_snake_case_key: updated_snake_case_value
+ updatedCamelCaseKey: updatedCamelCaseValue
+ UpdatedPascalCaseKey: UpdatedPascalCaseValue
+ "updated key with spaces": updated value with spaces
+ "updated Upper With Spaces": Updated Upper With Spaces
+
+default_listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ - protocol: http
+ load_balancer_port: 8080
+ instance_port: 8080
+ instance_protocol: http
+default_listener_tuples:
+ - [80, 80, "HTTP", "HTTP"]
+ - [8080, 8080, "HTTP", "HTTP"]
+
+purged_listeners:
+ - protocol: http
+ load_balancer_port: 8080
+ instance_port: 8080
+ instance_protocol: http
+purged_listener_tuples:
+ - [8080, 8080, "HTTP", "HTTP"]
+
+updated_listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 8181
+ - protocol: http
+ load_balancer_port: 8080
+ instance_port: 8080
+ instance_protocol: http
+updated_listener_tuples:
+ - [80, 8181, "HTTP", "HTTP"]
+ - [8080, 8080, "HTTP", "HTTP"]
+
+unproxied_listener:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 8181
+ proxy_protocol: False
+unproxied_listener_tuples:
+ - [80, 8181, "HTTP", "HTTP"]
+
+proxied_listener:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 8181
+ proxy_protocol: True
+proxied_listener_tuples:
+ - [80, 8181, "HTTP", "HTTP"]
+
+ssh_listeners:
+ - protocol: tcp
+ load_balancer_port: 22
+ instance_port: 22
+ instance_protocol: tcp
+ssh_listener_tuples:
+ - [22, 22, "TCP", "TCP"]
+
+default_health_check:
+ ping_protocol: http
+ ping_port: 80
+ ping_path: "/index.html"
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 10
+default_health_check_target: "HTTP:80/index.html"
+
+updated_health_check:
+ ping_protocol: http
+ ping_port: 8181
+ ping_path: "/healthz"
+ response_timeout: 15
+ interval: 42
+ unhealthy_threshold: 7
+ healthy_threshold: 6
+updated_health_check_target: "HTTP:8181/healthz"
+
+nonhttp_health_check:
+ ping_protocol: tcp
+ ping_port: 8282
+ response_timeout: 16
+ interval: 43
+ unhealthy_threshold: 8
+ healthy_threshold: 2
+nonhttp_health_check_target: "TCP:8282"
+
+ssh_health_check:
+ ping_protocol: tcp
+ ping_port: 22
+ response_timeout: 5
+ interval: 10
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ssh_health_check_target: "TCP:22"
+
+default_idle_timeout: 25
+updated_idle_timeout: 50
+default_drain_timeout: 15
+updated_drain_timeout: 25
+
+app_stickiness:
+ type: application
+ cookie: MyCookie
+ enabled: true
+
+updated_app_stickiness:
+ type: application
+ cookie: AnotherCookie
+
+lb_stickiness:
+ type: loadbalancer
+
+updated_lb_stickiness:
+ type: loadbalancer
+ expiration: 600
+
+# Amazon's SDKs don't provide the list of account ID's. Amazon only provide a
+# web page. If you want to run the tests outside the US regions you'll need to
+# update this.
+# https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html
+access_log_account_id_map:
+ us-east-1: '127311923021'
+ us-east-2: '033677994240'
+ us-west-1: '027434742980'
+ us-west-2: '797873946194'
+ us-gov-west-1: '048591011584'
+ us-gov-east-1: '190560391635'
+
+access_log_account_id: '{{ access_log_account_id_map[aws_region] }}'
+
+s3_logging_bucket_a: 'ansible-test-{{ tiny_prefix }}-a'
+s3_logging_bucket_b: 'ansible-test-{{ tiny_prefix }}-b'
+default_logging_prefix: 'logs'
+updated_logging_prefix: 'mylogs'
+default_logging_interval: 5
+updated_logging_interval: 60
+
+local_certs:
+ - priv_key: "{{ remote_tmp_dir }}/private-1.pem"
+ cert: "{{ remote_tmp_dir }}/public-1.pem"
+ csr: "{{ remote_tmp_dir }}/csr-1.csr"
+ domain: "elb-classic.{{ tiny_prefix }}.ansible.test"
+ name: "{{ resource_prefix }}_{{ resource_prefix }}_1"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/meta/main.yml
new file mode 100644
index 000000000..fd89b0e4f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - setup_ec2_facts
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml
new file mode 100644
index 000000000..28207ba69
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml
@@ -0,0 +1,292 @@
+---
+- block:
+ # For creation test some basic behaviour
+ - module_defaults:
+ elb_classic_lb:
+ # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}']
+ listeners: '{{ default_listeners }}'
+ wait: true
+ scheme: 'internal'
+ subnets: ['{{ subnet_a }}', '{{ subnet_b }}']
+ block:
+ # ============================================================
+ # create test elb with listeners, certificate, and health check
+
+ - name: Create internal ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.elb.status == "created"
+
+ - name: Create ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.elb.status == "created"
+ - availability_zone_a in result.elb.zones
+ - availability_zone_b in result.elb.zones
+ - subnet_a in result.elb.subnets
+ - subnet_b in result.elb.subnets
+
+ - name: Create internal ELB idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is not changed
+ - result.elb.status == "exists"
+
+ - name: Create internal ELB idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result.elb.status == "exists"
+ - availability_zone_a in result.elb.zones
+ - availability_zone_b in result.elb.zones
+ - subnet_a in result.elb.subnets
+ - subnet_b in result.elb.subnets
+
+ - ec2_eni_info:
+ filters:
+ description: 'ELB {{ elb_name }}'
+ register: info
+
+ - assert:
+ that:
+ - info.network_interfaces | length > 0
+
+ - elb_classic_lb_info:
+ names: ['{{ elb_name }}']
+ register: info
+
+ - assert:
+ that:
+ - info.elbs | length > 0
+
+ # ============================================================
+ # Now we're outside of the creation we drop the defaults
+ # ============================================================
+
+ - name: Add a subnet - no purge (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ subnets: ['{{ subnet_c }}']
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.elb.status == "exists"
+
+ - name: Add a subnet - no purge
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ subnets: ['{{ subnet_c }}']
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.elb.status == "exists"
+ - availability_zone_a in result.elb.zones
+ - availability_zone_b in result.elb.zones
+ - availability_zone_c in result.elb.zones
+ - subnet_a in result.elb.subnets
+ - subnet_b in result.elb.subnets
+ - subnet_c in result.elb.subnets
+
+ - name: Add a subnet - no purge - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ subnets: ['{{ subnet_c }}']
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is not changed
+ - result.elb.status == "exists"
+
+ - name: Add a subnet - no purge - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ subnets: ['{{ subnet_c }}']
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result.elb.status == "exists"
+ - availability_zone_a in result.elb.zones
+ - availability_zone_b in result.elb.zones
+ - availability_zone_c in result.elb.zones
+ - subnet_a in result.elb.subnets
+ - subnet_b in result.elb.subnets
+ - subnet_c in result.elb.subnets
+
+ # While purging try adding a subnet from the same AZ as one we're purging.
+ # This is important because you can't add 2 AZs to an LB from the same AZ at
+ # the same time.
+ - name: Add a subnet - purge (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ subnets: ['{{ subnet_c }}', '{{ subnet_a2 }}']
+ purge_subnets: true
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.elb.status == "exists"
+
+ - name: Add a subnet - purge
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ subnets: ['{{ subnet_c }}', '{{ subnet_a2 }}']
+ purge_subnets: true
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.elb.status == "exists"
+ - availability_zone_a in result.elb.zones
+ - availability_zone_b not in result.elb.zones
+ - availability_zone_c in result.elb.zones
+ - subnet_a not in result.elb.subnets
+ - subnet_b not in result.elb.subnets
+ - subnet_c in result.elb.subnets
+ - subnet_a2 in result.elb.subnets
+
+ - name: Add a subnet - purge - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ subnets: ['{{ subnet_c }}', '{{ subnet_a2 }}']
+ purge_subnets: true
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is not changed
+ - result.elb.status == "exists"
+
+ - name: Add a subnet - purge - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ subnets: ['{{ subnet_c }}', '{{ subnet_a2 }}']
+ purge_subnets: true
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result.elb.status == "exists"
+ - availability_zone_a in result.elb.zones
+ - availability_zone_b not in result.elb.zones
+ - availability_zone_c in result.elb.zones
+ - subnet_a not in result.elb.subnets
+ - subnet_b not in result.elb.subnets
+ - subnet_c in result.elb.subnets
+ - subnet_a2 in result.elb.subnets
+
+ # ============================================================
+
+ - name: remove the test load balancer completely (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: absent
+ wait: true
+ register: result
+ check_mode: true
+
+ - name: assert the load balancer would be removed
+ assert:
+ that:
+ - result is changed
+ - 'result.elb.name == "{{ elb_name }}"'
+ - 'result.elb.status == "deleted"'
+
+ - name: remove the test load balancer completely
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: absent
+ wait: true
+ register: result
+
+ - name: assert the load balancer was removed
+ assert:
+ that:
+ - result is changed
+ - 'result.elb.name == "{{ elb_name }}"'
+ - 'result.elb.status == "deleted"'
+
+ - name: remove the test load balancer completely (idempotency) (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: absent
+ wait: true
+ register: result
+ check_mode: true
+
+ - name: assert the load balancer is gone
+ assert:
+ that:
+ - result is not changed
+ - 'result.elb.name == "{{ elb_name }}"'
+ - 'result.elb.status == "gone"'
+
+ - name: remove the test load balancer completely (idempotency)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: absent
+ wait: true
+ register: result
+
+ - name: assert the load balancer is gone
+ assert:
+ that:
+ - result is not changed
+ - 'result.elb.name == "{{ elb_name }}"'
+ - 'result.elb.status == "gone"'
+
+ always:
+
+ # ============================================================
+ - name: remove the test load balancer
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: absent
+ wait: true
+ register: result
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_public.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_public.yml
new file mode 100644
index 000000000..d76f62be8
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_public.yml
@@ -0,0 +1,273 @@
+---
+- block:
+ # For creation test some basic behaviour
+ - module_defaults:
+ elb_classic_lb:
+ zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}']
+ listeners: '{{ default_listeners }}'
+ wait: true
+ scheme: 'internet-facing'
+ # subnets: ['{{ subnet_a }}', '{{ subnet_b }}']
+ block:
+ # ============================================================
+ # create test elb with listeners, certificate, and health check
+
+ - name: Create public ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.elb.status == "created"
+
+ - name: Create public ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.elb.status == "created"
+ - availability_zone_a in result.elb.zones
+ - availability_zone_b in result.elb.zones
+
+ - name: Create public ELB idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is not changed
+ - result.elb.status == "exists"
+
+ - name: Create public ELB idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result.elb.status == "exists"
+ - availability_zone_a in result.elb.zones
+ - availability_zone_b in result.elb.zones
+
+ - ec2_eni_info:
+ filters:
+ description: 'ELB {{ elb_name }}'
+ register: info
+
+ - assert:
+ that:
+ - info.network_interfaces | length > 0
+
+ - elb_classic_lb_info:
+ names: ['{{ elb_name }}']
+ register: info
+
+ - assert:
+ that:
+ - info.elbs | length > 0
+
+ # ============================================================
+ # Now we're outside of the creation we drop the defaults
+ # ============================================================
+
+ - name: Add a zone - no purge (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ zones: ['{{ availability_zone_c }}']
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.elb.status == "exists"
+
+ - name: Add a zone - no purge
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ zones: ['{{ availability_zone_c }}']
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.elb.status == "exists"
+ - availability_zone_a in result.elb.zones
+ - availability_zone_b in result.elb.zones
+ - availability_zone_c in result.elb.zones
+
+ - name: Add a zone - no purge - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ zones: ['{{ availability_zone_c }}']
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is not changed
+ - result.elb.status == "exists"
+
+ - name: Add a zone - no purge - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ zones: ['{{ availability_zone_c }}']
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result.elb.status == "exists"
+ - availability_zone_a in result.elb.zones
+ - availability_zone_b in result.elb.zones
+ - availability_zone_c in result.elb.zones
+
+ # ============================================================
+
+ - name: Remove a zone - purge (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ zones: ['{{ availability_zone_c }}']
+ purge_zones: true
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.elb.status == "exists"
+
+ - name: Remove a zone - purge
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ zones: ['{{ availability_zone_c }}']
+ purge_zones: true
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.elb.status == "exists"
+ - availability_zone_a not in result.elb.zones
+ - availability_zone_b not in result.elb.zones
+ - availability_zone_c in result.elb.zones
+
+ - name: Remove a zone - purge - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ zones: ['{{ availability_zone_c }}']
+ purge_zones: true
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is not changed
+ - result.elb.status == "exists"
+
+ - name: Remove a zone - purge - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ zones: ['{{ availability_zone_c }}']
+ purge_zones: true
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result.elb.status == "exists"
+ - availability_zone_a not in result.elb.zones
+ - availability_zone_b not in result.elb.zones
+ - availability_zone_c in result.elb.zones
+
+ # ============================================================
+
+ - name: remove the test load balancer completely (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: absent
+ wait: true
+ register: result
+ check_mode: true
+
+ - name: assert the load balancer would be removed
+ assert:
+ that:
+ - result is changed
+ - 'result.elb.name == "{{ elb_name }}"'
+ - 'result.elb.status == "deleted"'
+
+ - name: remove the test load balancer completely
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: absent
+ wait: true
+ register: result
+
+ - name: assert the load balancer was removed
+ assert:
+ that:
+ - result is changed
+ - 'result.elb.name == "{{ elb_name }}"'
+ - 'result.elb.status == "deleted"'
+
+ - name: remove the test load balancer completely (idempotency) (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: absent
+ wait: true
+ register: result
+ check_mode: true
+
+ - name: assert the load balancer is gone
+ assert:
+ that:
+ - result is not changed
+ - 'result.elb.name == "{{ elb_name }}"'
+ - 'result.elb.status == "gone"'
+
+ - name: remove the test load balancer completely (idempotency)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: absent
+ wait: true
+ register: result
+
+ - name: assert the load balancer is gone
+ assert:
+ that:
+ - result is not changed
+ - 'result.elb.name == "{{ elb_name }}"'
+ - 'result.elb.status == "gone"'
+
+ always:
+
+ # ============================================================
+ - name: remove the test load balancer
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: absent
+ wait: true
+ register: result
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml
new file mode 100644
index 000000000..92f253959
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml
@@ -0,0 +1,9 @@
+---
+- name: Delete instance
+ ec2_instance:
+ instance_ids:
+ - '{{ instance_a }}'
+ - '{{ instance_b }}'
+ state: absent
+ wait: true
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml
new file mode 100644
index 000000000..955f3da62
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml
@@ -0,0 +1,32 @@
+---
+- name: Create empty temporary directory
+ tempfile:
+ state: directory
+ register: tmpdir
+ ignore_errors: true
+
+- name: Empty S3 buckets before deletion
+ s3_sync:
+ bucket: '{{ item }}'
+ delete: true
+ file_root: '{{ tmpdir.path }}'
+ ignore_errors: true
+ loop:
+ - '{{ s3_logging_bucket_a }}'
+ - '{{ s3_logging_bucket_b }}'
+
+- name: Delete S3 bucket for access logs
+ s3_bucket:
+ name: '{{ item }}'
+ state: absent
+ register: logging_bucket
+ ignore_errors: true
+ loop:
+ - '{{ s3_logging_bucket_a }}'
+ - '{{ s3_logging_bucket_b }}'
+
+- name: Remove temporary directory
+ file:
+ state: absent
+ path: "{{ tmpdir.path }}"
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml
new file mode 100644
index 000000000..fd7ee965f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml
@@ -0,0 +1,29 @@
+---
+- name: delete security groups
+ ec2_group:
+ name: '{{ item }}'
+ state: absent
+ ignore_errors: true
+ loop:
+ - '{{ resource_prefix }}-a'
+ - '{{ resource_prefix }}-b'
+ - '{{ resource_prefix }}-c'
+
+- name: delete subnets
+ ec2_vpc_subnet:
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: '{{ item }}'
+ state: absent
+ ignore_errors: true
+ loop:
+ - '{{ subnet_cidr_1 }}'
+ - '{{ subnet_cidr_2 }}'
+ - '{{ subnet_cidr_3 }}'
+ - '{{ subnet_cidr_4 }}'
+
+- name: delete VPC
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ state: absent
+ name: '{{ resource_prefix }}'
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/complex_changes.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/complex_changes.yml
new file mode 100644
index 000000000..5f75f84d3
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/complex_changes.yml
@@ -0,0 +1,330 @@
+---
+- block:
+ - name: Create ELB for testing complex updates (CHECK)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}']
+ listeners: '{{ default_listeners }}'
+ health_check: '{{ default_health_check }}'
+ wait: true
+ scheme: 'internal'
+ subnets: ['{{ subnet_a }}', '{{ subnet_b }}']
+ security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-b']
+ tags: '{{ default_tags }}'
+ cross_az_load_balancing: True
+ idle_timeout: '{{ default_idle_timeout }}'
+ connection_draining_timeout: '{{ default_drain_timeout }}'
+ access_logs:
+ interval: '{{ default_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ enabled: true
+ register: result
+ check_mode: True
+
+ - name: Verify that we expect to change
+ assert:
+ that:
+ - result is changed
+
+ - name: Create ELB for testing complex updates
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}']
+ listeners: '{{ default_listeners }}'
+ health_check: '{{ default_health_check }}'
+ wait: true
+ scheme: 'internal'
+ subnets: ['{{ subnet_a }}', '{{ subnet_b }}']
+ security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-b']
+ tags: '{{ default_tags }}'
+ cross_az_load_balancing: True
+ idle_timeout: '{{ default_idle_timeout }}'
+ connection_draining_timeout: '{{ default_drain_timeout }}'
+ access_logs:
+ interval: '{{ default_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ enabled: true
+ register: result
+
+ - name: Verify that simple parameters were set
+ assert:
+ that:
+ - result is changed
+ - result.elb.status == "created"
+ - availability_zone_a in result.elb.zones
+ - availability_zone_b in result.elb.zones
+ - subnet_a in result.elb.subnets
+ - subnet_b in result.elb.subnets
+ - default_listener_tuples[0] in result.elb.listeners
+ - default_listener_tuples[1] in result.elb.listeners
+ - sg_a in result.elb.security_group_ids
+ - sg_b in result.elb.security_group_ids
+ - sg_c not in result.elb.security_group_ids
+ - result.elb.health_check.healthy_threshold == default_health_check['healthy_threshold']
+ - result.elb.health_check.interval == default_health_check['interval']
+ - result.elb.health_check.target == default_health_check_target
+ - result.elb.health_check.timeout == default_health_check['response_timeout']
+ - result.elb.health_check.unhealthy_threshold == default_health_check['unhealthy_threshold']
+ - result.elb.tags == default_tags
+ - result.elb.cross_az_load_balancing == 'yes'
+ - result.elb.idle_timeout == default_idle_timeout
+ - result.elb.connection_draining_timeout == default_drain_timeout
+ - result.elb.proxy_policy == None
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+ - name: Create ELB for testing complex updates - idempotency (CHECK)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}']
+ listeners: '{{ default_listeners }}'
+ health_check: '{{ default_health_check }}'
+ wait: true
+ scheme: 'internal'
+ subnets: ['{{ subnet_a }}', '{{ subnet_b }}']
+ security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-b']
+ tags: '{{ default_tags }}'
+ cross_az_load_balancing: True
+ idle_timeout: '{{ default_idle_timeout }}'
+ connection_draining_timeout: '{{ default_drain_timeout }}'
+ access_logs:
+ interval: '{{ default_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ enabled: true
+ register: result
+ check_mode: True
+
+ - name: Verify that we expect to not change
+ assert:
+ that:
+ - result is not changed
+
+ - name: Create ELB for testing complex updates - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}']
+ listeners: '{{ default_listeners }}'
+ health_check: '{{ default_health_check }}'
+ wait: true
+ scheme: 'internal'
+ subnets: ['{{ subnet_a }}', '{{ subnet_b }}']
+ security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-b']
+ tags: '{{ default_tags }}'
+ cross_az_load_balancing: True
+ idle_timeout: '{{ default_idle_timeout }}'
+ connection_draining_timeout: '{{ default_drain_timeout }}'
+ access_logs:
+ interval: '{{ default_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ enabled: true
+ register: result
+
+ - name: Verify that simple parameters were set
+ assert:
+ that:
+ - result is not changed
+ - result.elb.status == "exists"
+ - availability_zone_a in result.elb.zones
+ - availability_zone_b in result.elb.zones
+ - subnet_a in result.elb.subnets
+ - subnet_b in result.elb.subnets
+ - default_listener_tuples[0] in result.elb.listeners
+ - default_listener_tuples[1] in result.elb.listeners
+ - sg_a in result.elb.security_group_ids
+ - sg_b in result.elb.security_group_ids
+ - sg_c not in result.elb.security_group_ids
+ - result.elb.health_check.healthy_threshold == default_health_check['healthy_threshold']
+ - result.elb.health_check.interval == default_health_check['interval']
+ - result.elb.health_check.target == default_health_check_target
+ - result.elb.health_check.timeout == default_health_check['response_timeout']
+ - result.elb.health_check.unhealthy_threshold == default_health_check['unhealthy_threshold']
+ - result.elb.tags == default_tags
+ - result.elb.cross_az_load_balancing == 'yes'
+ - result.elb.idle_timeout == default_idle_timeout
+ - result.elb.connection_draining_timeout == default_drain_timeout
+ - result.elb.proxy_policy == None
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+ ###
+
+ - name: Perform complex update (CHECK)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}']
+ listeners: '{{ updated_listeners }}'
+ health_check: '{{ updated_health_check }}'
+ wait: true
+ scheme: 'internal'
+ subnets: ['{{ subnet_a }}', '{{ subnet_b }}']
+ security_group_names: ['{{ resource_prefix }}-c', '{{ resource_prefix }}-b']
+ tags: '{{ updated_tags }}'
+ cross_az_load_balancing: False
+ idle_timeout: '{{ updated_idle_timeout }}'
+ connection_draining_timeout: '{{ default_drain_timeout }}'
+ access_logs:
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ enabled: true
+ register: result
+ check_mode: True
+
+ - name: Verify that we expect to change
+ assert:
+ that:
+ - result is changed
+
+ - name: Perform complex update
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}']
+ listeners: '{{ updated_listeners }}'
+ health_check: '{{ updated_health_check }}'
+ wait: true
+ scheme: 'internal'
+ subnets: ['{{ subnet_a }}', '{{ subnet_b }}']
+ security_group_names: ['{{ resource_prefix }}-c', '{{ resource_prefix }}-b']
+ tags: '{{ updated_tags }}'
+ cross_az_load_balancing: False
+ idle_timeout: '{{ updated_idle_timeout }}'
+ connection_draining_timeout: '{{ default_drain_timeout }}'
+ access_logs:
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ enabled: true
+ register: result
+
+ - name: Verify that simple parameters were set
+ assert:
+ that:
+ - result is changed
+ - result.elb.status == "exists"
+ - availability_zone_a in result.elb.zones
+ - availability_zone_b in result.elb.zones
+ - subnet_a in result.elb.subnets
+ - subnet_b in result.elb.subnets
+ - updated_listener_tuples[0] in result.elb.listeners
+ - updated_listener_tuples[1] in result.elb.listeners
+ - sg_a not in result.elb.security_group_ids
+ - sg_b in result.elb.security_group_ids
+ - sg_c in result.elb.security_group_ids
+ - result.elb.health_check.healthy_threshold == updated_health_check['healthy_threshold']
+ - result.elb.health_check.interval == updated_health_check['interval']
+ - result.elb.health_check.target == updated_health_check_target
+ - result.elb.health_check.timeout == updated_health_check['response_timeout']
+ - result.elb.health_check.unhealthy_threshold == updated_health_check['unhealthy_threshold']
+ - result.elb.tags == updated_tags
+ - result.elb.cross_az_load_balancing == 'no'
+ - result.elb.idle_timeout == updated_idle_timeout
+ - result.elb.connection_draining_timeout == default_drain_timeout
+ - result.elb.proxy_policy == None
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+ - name: Perform complex update idempotency (CHECK)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}']
+ listeners: '{{ updated_listeners }}'
+ health_check: '{{ updated_health_check }}'
+ wait: true
+ scheme: 'internal'
+ subnets: ['{{ subnet_a }}', '{{ subnet_b }}']
+ security_group_names: ['{{ resource_prefix }}-c', '{{ resource_prefix }}-b']
+ tags: '{{ updated_tags }}'
+ cross_az_load_balancing: False
+ idle_timeout: '{{ updated_idle_timeout }}'
+ connection_draining_timeout: '{{ default_drain_timeout }}'
+ access_logs:
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ enabled: true
+ register: result
+ check_mode: True
+
+ - name: Verify we expect to not change
+ assert:
+ that:
+ - result is not changed
+
+ - name: Perform complex update - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}']
+ listeners: '{{ updated_listeners }}'
+ health_check: '{{ updated_health_check }}'
+ wait: true
+ scheme: 'internal'
+ subnets: ['{{ subnet_a }}', '{{ subnet_b }}']
+ security_group_names: ['{{ resource_prefix }}-c', '{{ resource_prefix }}-b']
+ tags: '{{ updated_tags }}'
+ cross_az_load_balancing: False
+ idle_timeout: '{{ updated_idle_timeout }}'
+ connection_draining_timeout: '{{ default_drain_timeout }}'
+ access_logs:
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ enabled: true
+ register: result
+
+ - name: Verify that simple parameters were set
+ assert:
+ that:
+ - result is not changed
+ - result.elb.status == "exists"
+ - availability_zone_a in result.elb.zones
+ - availability_zone_b in result.elb.zones
+ - subnet_a in result.elb.subnets
+ - subnet_b in result.elb.subnets
+ - updated_listener_tuples[0] in result.elb.listeners
+ - updated_listener_tuples[1] in result.elb.listeners
+ - sg_a not in result.elb.security_group_ids
+ - sg_b in result.elb.security_group_ids
+ - sg_c in result.elb.security_group_ids
+ - result.elb.health_check.healthy_threshold == updated_health_check['healthy_threshold']
+ - result.elb.health_check.interval == updated_health_check['interval']
+ - result.elb.health_check.target == updated_health_check_target
+ - result.elb.health_check.timeout == updated_health_check['response_timeout']
+ - result.elb.health_check.unhealthy_threshold == updated_health_check['unhealthy_threshold']
+ - result.elb.tags == updated_tags
+ - result.elb.cross_az_load_balancing == 'no'
+ - result.elb.idle_timeout == updated_idle_timeout
+ - result.elb.connection_draining_timeout == default_drain_timeout
+ - result.elb.proxy_policy == None
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+ always:
+
+ # ============================================================
+ - name: remove the test load balancer
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: absent
+ wait: true
+ register: result
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/describe_region.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/describe_region.yml
new file mode 100644
index 000000000..50679a8c1
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/describe_region.yml
@@ -0,0 +1,10 @@
+---
+- name: list available AZs
+ aws_az_info:
+ register: region_azs
+
+- name: pick AZs for testing
+ set_fact:
+ availability_zone_a: "{{ region_azs.availability_zones[0].zone_name }}"
+ availability_zone_b: "{{ region_azs.availability_zones[1].zone_name }}"
+ availability_zone_c: "{{ region_azs.availability_zones[2].zone_name }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/https_listeners.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/https_listeners.yml
new file mode 100644
index 000000000..1b29347f4
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/https_listeners.yml
@@ -0,0 +1,132 @@
+# Create a SSL Certificate to use in test
+
+- name: Generate private key for local certs
+ with_items: '{{ local_certs }}'
+ community.crypto.openssl_privatekey:
+ path: '{{ item.priv_key }}'
+ type: RSA
+ size: 2048
+
+- name: Generate an OpenSSL Certificate Signing Request for own certs
+ with_items: '{{ local_certs }}'
+ community.crypto.openssl_csr:
+ path: '{{ item.csr }}'
+ privatekey_path: '{{ item.priv_key }}'
+ common_name: '{{ item.domain }}'
+
+- name: Generate a Self Signed OpenSSL certificate for own certs
+ with_items: '{{ local_certs }}'
+ community.crypto.x509_certificate:
+ provider: selfsigned
+ path: '{{ item.cert }}'
+ csr_path: '{{ item.csr }}'
+ privatekey_path: '{{ item.priv_key }}'
+ selfsigned_digest: sha256
+ register: cert_create_result
+
+- name: upload certificates first time
+ aws_acm:
+ name_tag: '{{ item.name }}'
+ certificate: '{{ lookup(''file'', item.cert ) }}'
+ private_key: '{{ lookup(''file'', item.priv_key ) }}'
+ state: present
+ tags:
+ Application: search
+ Environment: development
+ purge_tags: false
+ register: upload
+ with_items: '{{ local_certs }}'
+ until: upload is succeeded
+ retries: 5
+ delay: 10
+
+- set_fact:
+ cert_arn: '{{ upload.results[0].certificate.arn }}'
+
+# Create ELB definition
+
+- name: Create elb definition
+ set_fact:
+ elb_definition:
+ connection_draining_timeout: 5
+ listeners:
+ - instance_port: 8080
+ instance_protocol: http
+ load_balancer_port: 443
+ protocol: https
+ ssl_certificate_id: "{{ cert_arn }}"
+ zones: ['{{ availability_zone_a }}']
+ name: "{{ tiny_prefix }}-integration-test-lb"
+ region: "{{ aws_region }}"
+ state: present
+ tags:
+ TestId: "{{ tiny_prefix }}"
+
+# Test creating ELB
+
+- name: Create a classic ELB with https method listeners - check_mode
+ amazon.aws.elb_classic_lb: "{{ elb_definition }}"
+ register: elb_create_result
+ check_mode: true
+- assert:
+ that:
+ - elb_create_result is changed
+ - elb_create_result.elb.status == "created"
+ - elb_create_result.load_balancer | length == 0
+ - "'elasticloadbalancing:CreateLoadBalancer' not in {{ elb_create_result.resource_actions }}"
+
+- name: Create a classic ELB with https method listeners
+ amazon.aws.elb_classic_lb: "{{ elb_definition }}"
+ register: elb_create_result
+- assert:
+ that:
+ - elb_create_result is changed
+ - elb_create_result.elb.status == "created"
+ - elb_create_result.load_balancer | length != 0
+ - "'elasticloadbalancing:CreateLoadBalancer' in {{ elb_create_result.resource_actions }}"
+
+- name: Create a classic ELB with https method listeners - idempotency - check_mode
+ amazon.aws.elb_classic_lb: "{{ elb_definition }}"
+ register: elb_create_result
+ check_mode: true
+- assert:
+ that:
+ - elb_create_result is not changed
+ - elb_create_result.elb.status != "created"
+ - elb_create_result.elb.status == "exists"
+ - elb_create_result.load_balancer | length != 0
+ - "'elasticloadbalancing:CreateLoadBalancer' not in {{ elb_create_result.resource_actions }}"
+
+- name: Create a classic ELB with https method listeners - idempotency
+ amazon.aws.elb_classic_lb: "{{ elb_definition }}"
+ register: elb_create_result
+- assert:
+ that:
+ - elb_create_result is not changed
+ - elb_create_result.elb.status != "created"
+ - elb_create_result.elb.status == "exists"
+ - elb_create_result.load_balancer | length != 0
+ - "'elasticloadbalancing:CreateLoadBalancer' not in {{ elb_create_result.resource_actions }}"
+
+# Remove ELB and certificate created during this test
+
+- name: Delete the ELB created during the test
+ amazon.aws.elb_classic_lb:
+ name: "{{ tiny_prefix }}-integration-test-lb"
+ state: absent
+
+- name: Delete the certificate created in this test
+ community.aws.aws_acm:
+ certificate_arn: '{{ cert_arn }}'
+ state: absent
+ # AWS doesn't always cleanup the associations properly
+ # https://repost.aws/questions/QU63csgGNEQl2M--xCdy-oxw/cant-delete-certificate-because-there-are-dangling-load-balancer-resources
+ ignore_errors: True
+ register: delete_result
+- assert:
+ that:
+ - delete_result is changed
+ - delete_result is not failed
+ # AWS doesn't always cleanup the associations properly
+ # https://repost.aws/questions/QU63csgGNEQl2M--xCdy-oxw/cant-delete-certificate-because-there-are-dangling-load-balancer-resources
+ ignore_errors: True
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/main.yml
new file mode 100644
index 000000000..e8acba10e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/main.yml
@@ -0,0 +1,58 @@
+---
+# __Test Info__
+# Create a self signed cert and upload it to AWS
+# http://www.akadia.com/services/ssh_test_certificate.html
+# http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/ssl-server-cert.html
+
+# __Test Outline__
+#
+# __elb_classic_lb__
+# create test elb with listeners and certificate
+# change AZ's
+# change listeners
+# remove listeners
+# remove elb
+
+- module_defaults:
+ group/aws:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ collections:
+ - amazon.aws
+ - community.aws
+ - community.crypto
+ block:
+
+ - include_tasks: missing_params.yml
+
+ - include_tasks: describe_region.yml
+ - include_tasks: setup_vpc.yml
+ - include_tasks: setup_instances.yml
+ - include_tasks: setup_s3.yml
+
+ - include_tasks: basic_public.yml
+ - include_tasks: basic_internal.yml
+ - include_tasks: schema_change.yml
+
+ - include_tasks: https_listeners.yml
+
+ - include_tasks: simple_changes.yml
+ - include_tasks: complex_changes.yml
+
+ always:
+
+ # ============================================================
+ # ELB should already be gone, but double-check
+ - name: remove the test load balancer
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: absent
+ wait: true
+ register: result
+ ignore_errors: true
+
+ - include_tasks: cleanup_s3.yml
+ - include_tasks: cleanup_instances.yml
+ - include_tasks: cleanup_vpc.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/missing_params.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/missing_params.yml
new file mode 100644
index 000000000..74779e32c
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/missing_params.yml
@@ -0,0 +1,203 @@
+---
+# Test behaviour when mandatory params aren't passed
+- block:
+ # ============================================================
+
+ - name: test with no name
+ elb_classic_lb:
+ state: present
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - 'result.failed'
+ - '"missing required arguments" in result.msg'
+ - '"name" in result.msg'
+
+ - name: test with only name (state missing)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when called with only name
+ assert:
+ that:
+ - 'result.failed'
+ - '"missing required arguments" in result.msg'
+ - '"state" in result.msg'
+
+ - elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ scheme: 'internal'
+ listeners:
+ - load_balancer_port: 80
+ instance_port: 80
+ protocol: http
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when neither subnets nor AZs are provided on creation
+ assert:
+ that:
+ - 'result.failed'
+ - '"subnets" in result.msg'
+ - '"zones" in result.msg'
+
+ - elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ scheme: 'internal'
+ subnets: ['subnet-123456789']
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when listeners not provided on creation
+ assert:
+ that:
+ - 'result.failed'
+ - '"listeners" in result.msg'
+
+ - elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ scheme: 'internal'
+ subnets: ['subnet-123456789']
+ listeners:
+ - load_balancer_port: 80
+ instance_port: 80
+ protocol: junk
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when listeners contains invalid protocol
+ assert:
+ that:
+ - 'result.failed'
+ - '"protocol" in result.msg'
+ - '"junk" in result.msg'
+
+ - elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ scheme: 'internal'
+ subnets: ['subnet-123456789']
+ listeners:
+ - load_balancer_port: 80
+ instance_port: 80
+ protocol: http
+ instance_protocol: junk
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when listeners contains invalid instance_protocol
+ assert:
+ that:
+ - 'result.failed'
+ - '"protocol" in result.msg'
+ - '"junk" in result.msg'
+
+ - elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ scheme: 'internal'
+ subnets: ['subnet-123456789']
+ listeners:
+ - load_balancer_port: 80
+ instance_port: 80
+ protocol: http
+ health_check:
+ ping_protocol: junk
+ ping_port: 80
+ interval: 5
+ timeout: 5
+ unhealthy_threshold: 5
+ healthy_threshold: 5
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when healthcheck ping_protocol is invalid
+ assert:
+ that:
+ - 'result.failed'
+ - '"protocol" in result.msg'
+ - '"junk" in result.msg'
+
+ - elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ scheme: 'internal'
+ subnets: ['subnet-123456789']
+ listeners:
+ - load_balancer_port: 80
+ instance_port: 80
+ protocol: http
+ health_check:
+ ping_protocol: http
+ ping_port: 80
+ interval: 5
+ timeout: 5
+ unhealthy_threshold: 5
+ healthy_threshold: 5
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when HTTP healthcheck missing a ping_path
+ assert:
+ that:
+ - 'result.failed'
+ - '"ping_path" in result.msg'
+
+ - elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ scheme: 'internal'
+ subnets: ['subnet-123456789']
+ listeners:
+ - load_balancer_port: 80
+ instance_port: 80
+ protocol: http
+ stickiness:
+ type: application
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when app stickiness policy missing cookie name
+ assert:
+ that:
+ - 'result.failed'
+ - '"cookie" in result.msg'
+
+ - elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ scheme: 'internal'
+ subnets: ['subnet-123456789']
+ listeners:
+ - load_balancer_port: 80
+ instance_port: 80
+ protocol: http
+ access_logs:
+ interval: 60
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when access log is missing a bucket
+ assert:
+ that:
+ - 'result.failed'
+ - '"s3_location" in result.msg'
+
+ always:
+
+ # ============================================================
+ - name: remove the test load balancer
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: absent
+ wait: true
+ register: result
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/schema_change.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/schema_change.yml
new file mode 100644
index 000000000..cc667bef2
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/schema_change.yml
@@ -0,0 +1,189 @@
+---
+- block:
+ # For creation test some basic behaviour
+ - module_defaults:
+ elb_classic_lb:
+ zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}']
+ listeners: '{{ default_listeners }}'
+ wait: true
+ scheme: 'internet-facing'
+ # subnets: ['{{ subnet_a }}', '{{ subnet_b }}']
+ block:
+ - name: Create ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.elb.status == 'created'
+ - result.elb.scheme == 'internet-facing'
+
+ - module_defaults:
+ elb_classic_lb:
+ # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}']
+ listeners: '{{ default_listeners }}'
+ wait: true
+ scheme: 'internal'
+ subnets: ['{{ subnet_a }}', '{{ subnet_b }}']
+ block:
+
+ - name: Change Schema to internal (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: Change Schema to internal
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.elb.scheme == 'internal'
+
+ - name: Change Schema to internal idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: Change Schema to internal idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result.elb.scheme == 'internal'
+
+ - name: No schema specified (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ schema: '{{ omit }}'
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: No schema specified
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ schema: '{{ omit }}'
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result.elb.scheme == 'internal'
+
+ # For creation test some basic behaviour
+ - module_defaults:
+ elb_classic_lb:
+ zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}']
+ listeners: '{{ default_listeners }}'
+ health_check: '{{ default_health_check }}'
+ wait: true
+ scheme: 'internet-facing'
+ # subnets: ['{{ subnet_a }}', '{{ subnet_b }}']
+ block:
+
+ - name: Change schema to internet-facing (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: Change schema to internet-facing
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.elb.scheme == 'internet-facing'
+
+ - name: Change schema to internet-facing idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: Change schema to internet-facing idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result.elb.scheme == 'internet-facing'
+
+ - name: No schema specified (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ schema: '{{ omit }}'
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: No schema specified
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ schema: '{{ omit }}'
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - result.elb.scheme == 'internet-facing'
+
+ always:
+
+ # ============================================================
+ - name: remove the test load balancer
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: absent
+ wait: true
+ register: result
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml
new file mode 100644
index 000000000..712ba351d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml
@@ -0,0 +1,25 @@
+---
+- name: Create instance a
+ ec2_instance:
+ name: "ansible-test-{{ tiny_prefix }}-elb-a"
+ image_id: "{{ ec2_ami_id }}"
+ vpc_subnet_id: "{{ subnet_a }}"
+ instance_type: t2.micro
+ wait: false
+ security_group: "{{ sg_a }}"
+ register: ec2_instance_a
+
+- name: Create instance b
+ ec2_instance:
+ name: "ansible-test-{{ tiny_prefix }}-elb-b"
+ image_id: "{{ ec2_ami_id }}"
+ vpc_subnet_id: "{{ subnet_b }}"
+ instance_type: t2.micro
+ wait: false
+ security_group: "{{ sg_b }}"
+ register: ec2_instance_b
+
+- name: store the Instance IDs
+ set_fact:
+ instance_a: "{{ ec2_instance_a.instance_ids[0] }}"
+ instance_b: "{{ ec2_instance_b.instance_ids[0] }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml
new file mode 100644
index 000000000..60e9c73cc
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml
@@ -0,0 +1,26 @@
+---
+- name: Create S3 bucket for access logs
+ vars:
+ s3_logging_bucket: '{{ s3_logging_bucket_a }}'
+ s3_bucket:
+ name: '{{ s3_logging_bucket_a }}'
+ state: present
+ policy: "{{ lookup('template','s3_policy.j2') }}"
+ register: logging_bucket
+
+- assert:
+ that:
+ - logging_bucket is changed
+
+- name: Create S3 bucket for access logs
+ vars:
+ s3_logging_bucket: '{{ s3_logging_bucket_b }}'
+ s3_bucket:
+ name: '{{ s3_logging_bucket_b }}'
+ state: present
+ policy: "{{ lookup('template','s3_policy.j2') }}"
+ register: logging_bucket
+
+- assert:
+ that:
+ - logging_bucket is changed
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml
new file mode 100644
index 000000000..7e35e1d9e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml
@@ -0,0 +1,103 @@
+---
+# SETUP: vpc, subnet, security group
+- name: create a VPC to work in
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ state: present
+ name: '{{ resource_prefix }}'
+ resource_tags:
+ Name: '{{ resource_prefix }}'
+ register: setup_vpc
+
+- name: create a subnet
+ ec2_vpc_subnet:
+ az: '{{ availability_zone_a }}'
+ tags: '{{ resource_prefix }}'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: '{{ subnet_cidr_1 }}'
+ state: present
+ resource_tags:
+ Name: '{{ resource_prefix }}-a'
+ register: setup_subnet_1
+
+- name: create a subnet
+ ec2_vpc_subnet:
+ az: '{{ availability_zone_b }}'
+ tags: '{{ resource_prefix }}'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: '{{ subnet_cidr_2 }}'
+ state: present
+ resource_tags:
+ Name: '{{ resource_prefix }}-b'
+ register: setup_subnet_2
+
+- name: create a subnet
+ ec2_vpc_subnet:
+ az: '{{ availability_zone_c }}'
+ tags: '{{ resource_prefix }}'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: '{{ subnet_cidr_3 }}'
+ state: present
+ resource_tags:
+ Name: '{{ resource_prefix }}-c'
+ register: setup_subnet_3
+
+- name: create a subnet
+ ec2_vpc_subnet:
+ az: '{{ availability_zone_a }}'
+ tags: '{{ resource_prefix }}'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: '{{ subnet_cidr_4 }}'
+ state: present
+ resource_tags:
+ Name: '{{ resource_prefix }}-a2'
+ register: setup_subnet_4
+
+- name: create a security group
+ ec2_group:
+ name: '{{ resource_prefix }}-a'
+ description: 'created by Ansible integration tests'
+ state: present
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: '{{ vpc_cidr }}'
+ register: setup_sg_1
+
+- name: create a security group
+ ec2_group:
+ name: '{{ resource_prefix }}-b'
+ description: 'created by Ansible integration tests'
+ state: present
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: '{{ vpc_cidr }}'
+ register: setup_sg_2
+
+- name: create a security group
+ ec2_group:
+ name: '{{ resource_prefix }}-c'
+ description: 'created by Ansible integration tests'
+ state: present
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: '{{ vpc_cidr }}'
+ register: setup_sg_3
+
+- name: store the IDs
+ set_fact:
+ subnet_a: "{{ setup_subnet_1.subnet.id }}"
+ subnet_b: "{{ setup_subnet_2.subnet.id }}"
+ subnet_c: "{{ setup_subnet_3.subnet.id }}"
+ subnet_a2: "{{ setup_subnet_4.subnet.id }}"
+ sg_a: "{{ setup_sg_1.group_id }}"
+ sg_b: "{{ setup_sg_2.group_id }}"
+ sg_c: "{{ setup_sg_3.group_id }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml
new file mode 100644
index 000000000..6644cf983
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml
@@ -0,0 +1,79 @@
+---
+- block:
+ ## Setup an ELB for testing changing one thing at a time
+ - name: Create ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}']
+ listeners: '{{ default_listeners }}'
+ health_check: '{{ default_health_check }}'
+ wait: true
+ scheme: 'internal'
+ subnets: ['{{ subnet_a }}', '{{ subnet_b }}']
+ security_group_ids: ['{{ sg_a }}']
+ tags: '{{ default_tags }}'
+ cross_az_load_balancing: True
+ idle_timeout: '{{ default_idle_timeout }}'
+ connection_draining_timeout: '{{ default_drain_timeout }}'
+ access_logs:
+ interval: '{{ default_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ enabled: true
+ register: result
+
+ - name: Verify that simple parameters were set
+ assert:
+ that:
+ - result is changed
+ - result.elb.status == "created"
+ - availability_zone_a in result.elb.zones
+ - availability_zone_b in result.elb.zones
+ - subnet_a in result.elb.subnets
+ - subnet_b in result.elb.subnets
+ - default_listener_tuples[0] in result.elb.listeners
+ - default_listener_tuples[1] in result.elb.listeners
+ - sg_a in result.elb.security_group_ids
+ - sg_b not in result.elb.security_group_ids
+ - sg_c not in result.elb.security_group_ids
+ - result.elb.health_check.healthy_threshold == default_health_check['healthy_threshold']
+ - result.elb.health_check.interval == default_health_check['interval']
+ - result.elb.health_check.target == default_health_check_target
+ - result.elb.health_check.timeout == default_health_check['response_timeout']
+ - result.elb.health_check.unhealthy_threshold == default_health_check['unhealthy_threshold']
+ - result.elb.tags == default_tags
+ - result.elb.cross_az_load_balancing == 'yes'
+ - result.elb.idle_timeout == default_idle_timeout
+ - result.elb.connection_draining_timeout == default_drain_timeout
+ - result.elb.proxy_policy == None
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+ ## AZ / Subnet changes are tested in wth the public/internal tests
+ ## because they depend on the scheme of the LB
+
+ - include_tasks: 'simple_securitygroups.yml'
+ - include_tasks: 'simple_listeners.yml'
+ - include_tasks: 'simple_healthcheck.yml'
+ - include_tasks: 'simple_tags.yml'
+ - include_tasks: 'simple_cross_az.yml'
+ - include_tasks: 'simple_idle_timeout.yml'
+ - include_tasks: 'simple_draining_timeout.yml'
+ - include_tasks: 'simple_proxy_policy.yml'
+ - include_tasks: 'simple_stickiness.yml'
+ - include_tasks: 'simple_instances.yml'
+ - include_tasks: 'simple_logging.yml'
+
+ always:
+
+ # ============================================================
+ - name: remove the test load balancer
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: absent
+ wait: true
+ register: result
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml
new file mode 100644
index 000000000..104b0afb5
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml
@@ -0,0 +1,100 @@
+---
+# ===========================================================
+
+- name: disable cross-az balancing on ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ cross_az_load_balancing: False
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: disable cross-az balancing on ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ cross_az_load_balancing: False
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.elb.cross_az_load_balancing == 'no'
+
+- name: disable cross-az balancing on ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ cross_az_load_balancing: False
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: disable cross-az balancing on ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ cross_az_load_balancing: False
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.elb.cross_az_load_balancing == 'no'
+
+# ===========================================================
+
+- name: re-enable cross-az balancing on ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ cross_az_load_balancing: True
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: re-enable cross-az balancing on ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ cross_az_load_balancing: True
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.elb.cross_az_load_balancing == 'yes'
+
+- name: re-enable cross-az balancing on ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ cross_az_load_balancing: True
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: re-enable cross-az balancing on ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ cross_az_load_balancing: True
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.elb.cross_az_load_balancing == 'yes'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml
new file mode 100644
index 000000000..825ce2185
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml
@@ -0,0 +1,148 @@
+---
+# ===========================================================
+
+- name: disable connection draining on ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ connection_draining_timeout: 0
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: disable connection draining on ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ connection_draining_timeout: 0
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: disable connection draining on ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ connection_draining_timeout: 0
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: disable connection draining on ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ connection_draining_timeout: 0
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+# ===========================================================
+
+- name: re-enable connection draining on ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ connection_draining_timeout: '{{ default_drain_timeout }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: re-enable connection draining on ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ connection_draining_timeout: '{{ default_drain_timeout }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.elb.connection_draining_timeout == default_drain_timeout
+
+- name: re-enable connection draining on ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ connection_draining_timeout: '{{ default_drain_timeout }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: re-enable connection draining on ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ connection_draining_timeout: '{{ default_drain_timeout }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.elb.connection_draining_timeout == default_drain_timeout
+
+# ===========================================================
+
+- name: update connection draining timout on ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ connection_draining_timeout: '{{ updated_drain_timeout }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: update connection draining timout on ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ connection_draining_timeout: '{{ updated_drain_timeout }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.elb.connection_draining_timeout == updated_drain_timeout
+
+- name: update connection draining timout on ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ connection_draining_timeout: '{{ updated_drain_timeout }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: update connection draining timout on ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ connection_draining_timeout: '{{ updated_drain_timeout }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.elb.connection_draining_timeout == updated_drain_timeout
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml
new file mode 100644
index 000000000..179e8cb80
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml
@@ -0,0 +1,116 @@
+---
+# Note: AWS doesn't support disabling health checks
+# ==============================================================
+- name: Non-HTTP Healthcheck (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ health_check: '{{ nonhttp_health_check }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Non-HTTP Healthcheck
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ health_check: '{{ nonhttp_health_check }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.elb.health_check.healthy_threshold == nonhttp_health_check['healthy_threshold']
+ - result.elb.health_check.interval == nonhttp_health_check['interval']
+ - result.elb.health_check.target == nonhttp_health_check_target
+ - result.elb.health_check.timeout == nonhttp_health_check['response_timeout']
+ - result.elb.health_check.unhealthy_threshold == nonhttp_health_check['unhealthy_threshold']
+
+- name: Non-HTTP Healthcheck - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ health_check: '{{ nonhttp_health_check }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Non-HTTP Healthcheck - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ health_check: '{{ nonhttp_health_check }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.elb.health_check.healthy_threshold == nonhttp_health_check['healthy_threshold']
+ - result.elb.health_check.interval == nonhttp_health_check['interval']
+ - result.elb.health_check.target == nonhttp_health_check_target
+ - result.elb.health_check.timeout == nonhttp_health_check['response_timeout']
+ - result.elb.health_check.unhealthy_threshold == nonhttp_health_check['unhealthy_threshold']
+
+# ==============================================================
+
+- name: Update Healthcheck (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ health_check: '{{ updated_health_check }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Update Healthcheck
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ health_check: '{{ updated_health_check }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.elb.health_check.healthy_threshold == updated_health_check['healthy_threshold']
+ - result.elb.health_check.interval == updated_health_check['interval']
+ - result.elb.health_check.target == updated_health_check_target
+ - result.elb.health_check.timeout == updated_health_check['response_timeout']
+ - result.elb.health_check.unhealthy_threshold == updated_health_check['unhealthy_threshold']
+
+- name: Update Healthcheck - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ health_check: '{{ updated_health_check }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Update Healthcheck - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ health_check: '{{ updated_health_check }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.elb.health_check.healthy_threshold == updated_health_check['healthy_threshold']
+ - result.elb.health_check.interval == updated_health_check['interval']
+ - result.elb.health_check.target == updated_health_check_target
+ - result.elb.health_check.timeout == updated_health_check['response_timeout']
+ - result.elb.health_check.unhealthy_threshold == updated_health_check['unhealthy_threshold']
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml
new file mode 100644
index 000000000..e89dd25f1
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml
@@ -0,0 +1,50 @@
+---
+# ===========================================================
+
+- name: update idle connection timeout on ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ idle_timeout: "{{ updated_idle_timeout }}"
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: update idle connection timeout on ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ idle_timeout: "{{ updated_idle_timeout }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.elb.idle_timeout == updated_idle_timeout
+
+- name: update idle connection timeout on ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ idle_timeout: "{{ updated_idle_timeout }}"
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: update idle connection timeout on ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ idle_timeout: "{{ updated_idle_timeout }}"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.elb.idle_timeout == updated_idle_timeout
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml
new file mode 100644
index 000000000..8c27bc27f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml
@@ -0,0 +1,415 @@
+---
+- name: Add SSH listener and health check to ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ ssh_listeners }}"
+ health_check: "{{ ssh_health_check }}"
+ purge_listeners: false
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - ssh_listener_tuples[0] in result.elb.listeners
+
+# Make sure that the instances are 'OK'
+
+- name: Wait for instance a
+ ec2_instance:
+ name: "ansible-test-{{ tiny_prefix }}-elb-a"
+ instance_ids:
+ - "{{ instance_a }}"
+ vpc_subnet_id: "{{ subnet_a }}"
+ instance_type: t2.micro
+ wait: true
+ security_group: "{{ sg_a }}"
+ register: ec2_instance_a
+
+- name: Wait for instance b
+ ec2_instance:
+ name: "ansible-test-{{ tiny_prefix }}-elb-b"
+ instance_ids:
+ - "{{ instance_b }}"
+ vpc_subnet_id: "{{ subnet_b }}"
+ instance_type: t2.micro
+ wait: true
+ security_group: "{{ sg_b }}"
+ register: ec2_instance_b
+
+- assert:
+ that:
+ - ec2_instance_a is successful
+ - ec2_instance_b is successful
+
+# ==============================================================
+
+- name: Add an instance to the LB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_a }}'
+ wait: true
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Add an instance to the LB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_a }}'
+ wait: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - instance_a in result.elb.instances
+ - instance_b not in result.elb.instances
+
+- name: Add an instance to the LB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_a }}'
+ wait: true
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Add an instance to the LB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_a }}'
+ wait: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - instance_a in result.elb.instances
+ - instance_b not in result.elb.instances
+
+# ==============================================================
+
+- name: Add second instance to the LB without purge (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_b }}'
+ purge_instance_ids: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Add second instance to the LB without purge
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_b }}'
+ purge_instance_ids: false
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - instance_a in result.elb.instances
+ - instance_b in result.elb.instances
+
+- name: Add second instance to the LB without purge - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_b }}'
+ purge_instance_ids: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Add second instance to the LB without purge - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_b }}'
+ purge_instance_ids: false
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - instance_a in result.elb.instances
+ - instance_b in result.elb.instances
+
+# ==============================================================
+
+- name: Both instances with purge - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_a }}'
+ - '{{ instance_b }}'
+ purge_instance_ids: true
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Both instances with purge - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_a }}'
+ - '{{ instance_b }}'
+ purge_instance_ids: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - instance_a in result.elb.instances
+ - instance_b in result.elb.instances
+
+- name: Both instances with purge - different order - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_b }}'
+ - '{{ instance_a }}'
+ purge_instance_ids: true
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Both instances with purge - different order - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_b }}'
+ - '{{ instance_a }}'
+ purge_instance_ids: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - instance_a in result.elb.instances
+ - instance_b in result.elb.instances
+
+# ==============================================================
+
+- name: Remove first instance from LB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_b }}'
+ purge_instance_ids: true
+ wait: true
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Remove first instance from LB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_b }}'
+ purge_instance_ids: true
+ wait: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - instance_a not in result.elb.instances
+ - instance_b in result.elb.instances
+
+- name: Remove first instance from LB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_b }}'
+ purge_instance_ids: true
+ wait: true
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Remove first instance from LB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_b }}'
+ purge_instance_ids: true
+ wait: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - instance_a not in result.elb.instances
+ - instance_b in result.elb.instances
+
+# ==============================================================
+
+- name: Switch instances in LB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_a }}'
+ purge_instance_ids: true
+ wait: true
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Switch instances in LB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_a }}'
+ purge_instance_ids: true
+ wait: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - instance_a in result.elb.instances
+ - instance_b not in result.elb.instances
+
+- name: Switch instances in LB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_a }}'
+ purge_instance_ids: true
+ wait: true
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Switch instances in LB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_a }}'
+ purge_instance_ids: true
+ wait: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - instance_a in result.elb.instances
+ - instance_b not in result.elb.instances
+
+# ==============================================================
+
+- name: Switch instances in LB - no wait (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_b }}'
+ purge_instance_ids: true
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Switch instances in LB - no wait
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_b }}'
+ purge_instance_ids: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - instance_a not in result.elb.instances
+ - instance_b in result.elb.instances
+
+- name: Switch instances in LB - no wait - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_b }}'
+ purge_instance_ids: true
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Switch instances in LB - no wait - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ instance_ids:
+ - '{{ instance_b }}'
+ purge_instance_ids: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - instance_a not in result.elb.instances
+ - instance_b in result.elb.instances
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml
new file mode 100644
index 000000000..8edb96543
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml
@@ -0,0 +1,196 @@
+---
+# ===========================================================
+# remove a listener (no purge)
+# remove a listener (purge)
+# add a listener
+# update a listener (same port)
+# ===========================================================
+# Test passing only one of the listeners
+# Without purge
+- name: Test partial Listener to ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ purged_listeners }}"
+ purge_listeners: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Test partial Listener to ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ purged_listeners }}"
+ purge_listeners: false
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - default_listener_tuples[0] in result.elb.listeners
+ - default_listener_tuples[1] in result.elb.listeners
+
+# With purge
+- name: Test partial Listener with purge to ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ purged_listeners }}"
+ purge_listeners: true
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Test partial Listener with purge to ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ purged_listeners }}"
+ purge_listeners: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - purged_listener_tuples[0] in result.elb.listeners
+
+- name: Test partial Listener with purge to ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ purged_listeners }}"
+ purge_listeners: true
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Test partial Listener with purge to ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ purged_listeners }}"
+ purge_listeners: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - purged_listener_tuples[0] in result.elb.listeners
+
+# ===========================================================
+# Test re-adding a listener
+- name: Test re-adding listener to ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ default_listeners }}"
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Test re-adding listener to ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ default_listeners }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - default_listener_tuples[0] in result.elb.listeners
+ - default_listener_tuples[1] in result.elb.listeners
+
+- name: Test re-adding listener to ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ default_listeners }}"
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Test re-adding listener to ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ default_listeners }}"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - default_listener_tuples[0] in result.elb.listeners
+ - default_listener_tuples[1] in result.elb.listeners
+
+# ===========================================================
+# Test passing an updated listener
+- name: Test updated listener to ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ updated_listeners }}"
+ purge_listeners: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Test updated listener to ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ updated_listeners }}"
+ purge_listeners: false
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - updated_listener_tuples[0] in result.elb.listeners
+ - updated_listener_tuples[1] in result.elb.listeners
+
+- name: Test updated listener to ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ updated_listeners }}"
+ purge_listeners: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Test updated listener to ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ updated_listeners }}"
+ purge_listeners: false
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - updated_listener_tuples[0] in result.elb.listeners
+ - updated_listener_tuples[1] in result.elb.listeners
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml
new file mode 100644
index 000000000..5e489eaf0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml
@@ -0,0 +1,587 @@
+---
+# ===========================================================
+
+- name: S3 logging for ELB - implied enabled (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ interval: '{{ default_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: S3 logging for ELB - implied enabled
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ interval: '{{ default_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+# ===========================================================
+
+- name: Disable S3 logging for ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: false
+ interval: '{{ default_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Disable S3 logging for ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: false
+ interval: '{{ default_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == False
+
+- name: Disable S3 logging for ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: false
+ interval: '{{ default_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Disable S3 logging for ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: false
+ interval: '{{ default_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == False
+
+# ===========================================================
+
+- name: Disable S3 logging for ELB - ignore extras (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: false
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_b }}'
+ s3_prefix: '{{ updated_logging_prefix }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Disable S3 logging for ELB - ignore extras
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: false
+ interval: '{{ default_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == False
+
+- name: Disable S3 logging for ELB - no extras (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Disable S3 logging for ELB - no extras
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: false
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == False
+
+# ===========================================================
+
+- name: Re-enable S3 logging for ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ default_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Re-enable S3 logging for ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ default_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+- name: Re-enable S3 logging for ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ default_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Re-enable S3 logging for ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ default_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+# ===========================================================
+
+- name: Update ELB Log delivery interval for ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Update ELB Log delivery interval for ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+- name: Update ELB Log delivery interval for ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Update ELB Log delivery interval for ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_a }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+# ===========================================================
+
+- name: Update S3 Logging Location for ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_b }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Update S3 Logging Location for ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_b }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+- name: Update S3 Logging Location for ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_b }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Update S3 Logging Location for ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_b }}'
+ s3_prefix: '{{ default_logging_prefix }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+# ===========================================================
+
+- name: Update S3 Logging Prefix for ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_b }}'
+ s3_prefix: '{{ updated_logging_prefix }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Update S3 Logging Prefix for ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_b }}'
+ s3_prefix: '{{ updated_logging_prefix }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == updated_logging_prefix
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+- name: Update S3 Logging Prefix for ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_b }}'
+ s3_prefix: '{{ updated_logging_prefix }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Update S3 Logging Prefix for ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_b }}'
+ s3_prefix: '{{ updated_logging_prefix }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == updated_logging_prefix
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+# ===========================================================
+
+- name: Empty S3 Logging Prefix for ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_b }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Empty S3 Logging Prefix for ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_b }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == ''
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+- name: Empty S3 Logging Prefix for ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_b }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Empty S3 Logging Prefix for ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_location: '{{ s3_logging_bucket_b }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == ''
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+- name: Empty string S3 Logging Prefix for ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_prefix: ''
+ s3_location: '{{ s3_logging_bucket_b }}'
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Empty stringS3 Logging Prefix for ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ interval: '{{ updated_logging_interval }}'
+ s3_prefix: ''
+ s3_location: '{{ s3_logging_bucket_b }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == ''
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
+
+# ===========================================================
+
+- name: Update S3 Logging interval for ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ s3_location: '{{ s3_logging_bucket_b }}'
+ s3_prefix: ''
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Update S3 Logging interval for ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ access_logs:
+ enabled: true
+ s3_location: '{{ s3_logging_bucket_b }}'
+ s3_prefix: ''
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.load_balancer.load_balancer_attributes.access_log.emit_interval == 60
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b
+ - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == ''
+ - result.load_balancer.load_balancer_attributes.access_log.enabled == True
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml
new file mode 100644
index 000000000..50c5ce519
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml
@@ -0,0 +1,141 @@
+---
+# ===========================================================
+- name: Enable proxy protocol on a listener (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ proxied_listener }}"
+ purge_listeners: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Enable proxy protocol on a listener
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ proxied_listener }}"
+ purge_listeners: false
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.elb.proxy_policy == "ProxyProtocol-policy"
+ - result.load_balancer.backend_server_descriptions | length == 1
+ - result.load_balancer.backend_server_descriptions[0].policy_names == ["ProxyProtocol-policy"]
+
+- name: Enable proxy protocol on a listener - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ proxied_listener }}"
+ purge_listeners: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Enable proxy protocol on a listener - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ proxied_listener }}"
+ purge_listeners: false
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.elb.proxy_policy == "ProxyProtocol-policy"
+ - result.load_balancer.backend_server_descriptions | length == 1
+ - result.load_balancer.backend_server_descriptions[0].policy_names == ["ProxyProtocol-policy"]
+
+# ===========================================================
+
+- name: Disable proxy protocol on a listener (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ unproxied_listener }}"
+ purge_listeners: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Disable proxy protocol on a listener
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ unproxied_listener }}"
+ purge_listeners: false
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.load_balancer.backend_server_descriptions | length == 0
+
+- name: Disable proxy protocol on a listener - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ unproxied_listener }}"
+ purge_listeners: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Disable proxy protocol on a listener - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ unproxied_listener }}"
+ purge_listeners: false
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.load_balancer.backend_server_descriptions | length == 0
+
+# ===========================================================
+
+- name: Re-enable proxy protocol on a listener (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ proxied_listener }}"
+ purge_listeners: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Re-enable proxy protocol on a listener
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ listeners: "{{ proxied_listener }}"
+ purge_listeners: false
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.elb.proxy_policy == "ProxyProtocol-policy"
+ - result.load_balancer.backend_server_descriptions | length == 1
+ - result.load_balancer.backend_server_descriptions[0].policy_names == ["ProxyProtocol-policy"]
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml
new file mode 100644
index 000000000..21a56d792
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml
@@ -0,0 +1,106 @@
+---
+- name: Assign Security Groups to ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ security_group_ids: ['{{ sg_b }}']
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Assign Security Groups to ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ security_group_ids: ['{{ sg_b }}']
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - sg_a not in result.elb.security_group_ids
+ - sg_b in result.elb.security_group_ids
+ - sg_c not in result.elb.security_group_ids
+
+- name: Assign Security Groups to ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ security_group_ids: ['{{ sg_b }}']
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Assign Security Groups to ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ security_group_ids: ['{{ sg_b }}']
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - sg_a not in result.elb.security_group_ids
+ - sg_b in result.elb.security_group_ids
+ - sg_c not in result.elb.security_group_ids
+
+#=====================================================================
+
+- name: Assign Security Groups to ELB by name (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-c']
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Assign Security Groups to ELB by name
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-c']
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - sg_a in result.elb.security_group_ids
+ - sg_b not in result.elb.security_group_ids
+ - sg_c in result.elb.security_group_ids
+
+- name: Assign Security Groups to ELB by name - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-c']
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Assign Security Groups to ELB by name - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-c']
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - sg_a in result.elb.security_group_ids
+ - sg_b not in result.elb.security_group_ids
+ - sg_c in result.elb.security_group_ids
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml
new file mode 100644
index 000000000..9c0f925ec
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml
@@ -0,0 +1,390 @@
+---
+# ==============================================================
+- name: App Cookie Stickiness (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ app_stickiness }}"
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: App Cookie Stickiness
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ app_stickiness }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: App Cookie Stickiness - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ app_stickiness }}"
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: App Cookie Stickiness - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ app_stickiness }}"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+# ==============================================================
+- name: Update App Cookie Stickiness (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ updated_app_stickiness }}"
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Update App Cookie Stickiness
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ updated_app_stickiness }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Update App Cookie Stickiness - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ updated_app_stickiness }}"
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Update App Cookie Stickiness - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ updated_app_stickiness }}"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+
+# ==============================================================
+
+- name: Disable Stickiness (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness:
+ enabled: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Disable Stickiness
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness:
+ enabled: false
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Disable Stickiness - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness:
+ enabled: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Disable Stickiness - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness:
+ enabled: false
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+# ==============================================================
+
+- name: Re-enable App Stickiness (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ app_stickiness }}"
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Re-enable App Stickiness
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ app_stickiness }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Re-enable App Stickiness (check_mode) - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ app_stickiness }}"
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Re-enable App Stickiness - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ app_stickiness }}"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+# ==============================================================
+- name: LB Stickiness (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ lb_stickiness }}"
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: LB Stickiness
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ lb_stickiness }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: LB Stickiness - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ lb_stickiness }}"
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: LB Stickiness - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ lb_stickiness }}"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+# ==============================================================
+- name: Update LB Stickiness (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ updated_lb_stickiness }}"
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Update LB Stickiness
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ updated_lb_stickiness }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Update LB Stickiness - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ updated_lb_stickiness }}"
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Update LB Stickiness - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ updated_lb_stickiness }}"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+
+# ==============================================================
+
+- name: Disable Stickiness (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness:
+ enabled: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Disable Stickiness
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness:
+ enabled: false
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Disable Stickiness - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness:
+ enabled: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Disable Stickiness - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness:
+ enabled: false
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+# ==============================================================
+
+- name: Re-enable LB Stickiness (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ updated_lb_stickiness }}"
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Re-enable LB Stickiness
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ updated_lb_stickiness }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Re-enable LB Stickiness - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ updated_lb_stickiness }}"
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Re-enable LB Stickiness - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ stickiness: "{{ updated_lb_stickiness }}"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml
new file mode 100644
index 000000000..b78eb1c58
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml
@@ -0,0 +1,141 @@
+---
+# ===========================================================
+# partial tags (no purge)
+# update tags (no purge)
+# update tags (with purge)
+# ===========================================================
+- name: Pass partial tags to ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ tags: "{{ partial_tags }}"
+ purge_tags: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Pass partial tags to ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ tags: "{{ partial_tags }}"
+ purge_tags: false
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.elb.tags == default_tags
+
+# ===========================================================
+
+- name: Add tags to ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ tags: "{{ updated_tags }}"
+ purge_tags: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Add tags to ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ tags: "{{ updated_tags }}"
+ purge_tags: false
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.elb.tags == ( default_tags | combine(updated_tags) )
+
+- name: Add tags to ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ tags: "{{ updated_tags }}"
+ purge_tags: false
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Add tags to ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ tags: "{{ updated_tags }}"
+ purge_tags: false
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.elb.tags == ( default_tags | combine(updated_tags) )
+
+# ===========================================================
+
+- name: Purge tags from ELB (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ tags: "{{ updated_tags }}"
+ purge_tags: true
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+
+- name: Purge tags from ELB
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ tags: "{{ updated_tags }}"
+ purge_tags: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.elb.tags == updated_tags
+
+- name: Purge tags from ELB - idempotency (check_mode)
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ tags: "{{ updated_tags }}"
+ purge_tags: true
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Purge tags from ELB - idempotency
+ elb_classic_lb:
+ name: "{{ elb_name }}"
+ state: present
+ tags: "{{ updated_tags }}"
+ purge_tags: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.elb.tags == updated_tags
+
+# ===========================================================
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/templates/s3_policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/templates/s3_policy.j2
new file mode 100644
index 000000000..ee69dae33
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/templates/s3_policy.j2
@@ -0,0 +1,15 @@
+{
+ "Version": "2012-10-17",
+ "Id": "ELB-Logging-Policy",
+ "Statement": [
+ {
+ "Sid": "ELB-Logging",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "arn:aws:iam::{{ access_log_account_id }}:root"
+ },
+ "Action": "s3:PutObject",
+ "Resource": "arn:aws:s3:::{{ s3_logging_bucket }}/*"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/vars/main.yml
new file mode 100644
index 000000000..79194af1e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for test_ec2_elb_lb
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/aliases b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/aliases
new file mode 100644
index 000000000..9b3bde40b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/aliases
@@ -0,0 +1,8 @@
+# reason: missing-policy
+# It's not possible to control what permissions are granted to a policy.
+# This makes securely testing iam_policy very difficult
+unsupported
+
+cloud/aws
+
+iam_policy_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/defaults/main.yml
new file mode 100644
index 000000000..caf40aebd
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/defaults/main.yml
@@ -0,0 +1,5 @@
+iam_name: '{{resource_prefix}}'
+iam_policy_name_a: '{{resource_prefix}}-document-a'
+iam_policy_name_b: '{{resource_prefix}}-document-b'
+iam_policy_name_c: '{{resource_prefix}}-json-a'
+iam_policy_name_d: '{{resource_prefix}}-json-b'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access.json
new file mode 100644
index 000000000..a2f299757
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access.json
@@ -0,0 +1,10 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Deny",
+ "Action": "*",
+ "Resource": "*"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access_with_id.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access_with_id.json
new file mode 100644
index 000000000..9d40dd54a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access_with_id.json
@@ -0,0 +1,11 @@
+{
+ "Id": "MyId",
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Deny",
+ "Action": "*",
+ "Resource": "*"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access_with_second_id.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access_with_second_id.json
new file mode 100644
index 000000000..0efbc31d4
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_access_with_second_id.json
@@ -0,0 +1,11 @@
+{
+ "Id": "MyOtherId",
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Deny",
+ "Action": "*",
+ "Resource": "*"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_trust.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_trust.json
new file mode 100644
index 000000000..c36616187
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/files/no_trust.json
@@ -0,0 +1,10 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Deny",
+ "Principal": {"AWS": "*"},
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml
new file mode 100644
index 000000000..0894490af
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml
@@ -0,0 +1,70 @@
+- name: Run integration tests for IAM (inline) Policy management
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ # ============================================================
+ - name: Create user for tests
+ iam_user:
+ state: present
+ name: '{{ iam_name }}'
+ register: result
+ - name: Ensure user was created
+ assert:
+ that:
+ - result is changed
+
+ - name: Create role for tests
+ iam_role:
+ state: present
+ name: '{{ iam_name }}'
+ assume_role_policy_document: "{{ lookup('file','no_trust.json') }}"
+ register: result
+ - name: Ensure role was created
+ assert:
+ that:
+ - result is changed
+
+ - name: Create group for tests
+ iam_group:
+ state: present
+ name: '{{ iam_name }}'
+ register: result
+ - name: Ensure group was created
+ assert:
+ that:
+ - result is changed
+
+ # ============================================================
+
+ - name: Run tests for each type of object
+ include_tasks: object.yml
+ loop_control:
+ loop_var: iam_type
+ with_items:
+ - user
+ - group
+ - role
+
+ # ============================================================
+
+ always:
+ # ============================================================
+ - name: Remove user
+ iam_user:
+ state: absent
+ name: '{{ iam_name }}'
+ ignore_errors: yes
+ - name: Remove role
+ iam_role:
+ state: absent
+ name: '{{ iam_name }}'
+ ignore_errors: yes
+ - name: Remove group
+ iam_group:
+ state: absent
+ name: '{{ iam_name }}'
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/object.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/object.yml
new file mode 100644
index 000000000..75eb5a167
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/object.yml
@@ -0,0 +1,1169 @@
+- name: Run integration tests for IAM (inline) Policy management on {{ iam_type }}s
+ vars:
+ iam_object_key: '{{ iam_type }}_name'
+ block:
+ # ============================================================
+ - name: Fetch policies from {{ iam_type }} before making changes
+ iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ register: iam_policy_info
+ - name: Assert empty policy list
+ assert:
+ that:
+ - iam_policy_info is succeeded
+ - iam_policy_info.policies | length == 0
+ - iam_policy_info.all_policy_names | length == 0
+ - iam_policy_info.policy_names | length == 0
+
+ - name: Fetch policies from non-existent {{ iam_type }}
+ iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}-junk'
+ register: iam_policy_info
+ - name: Assert not failed
+ assert:
+ that:
+ - iam_policy_info is succeeded
+
+ # ============================================================
+ - name: Invalid creation of policy for {{ iam_type }} - missing required parameters
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ skip_duplicates: yes
+ register: result
+ ignore_errors: yes
+ - name: Assert task failed with correct error message
+ assert:
+ that:
+ - result.failed
+ - "'state is present but any of the following are missing: policy_json' in result.msg"
+
+ - name: Create policy using document for {{ iam_type }} (check mode)
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_json: '{{ lookup("file", "no_access.json") }}'
+ skip_duplicates: yes
+ register: result
+ - name: Assert policy would be added for {{ iam_type }}
+ assert:
+ that:
+ - result is changed
+
+ - name: Create policy using document for {{ iam_type }}
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_json: '{{ lookup("file", "no_access.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ register: iam_policy_info
+ - name: Assert policy was added for {{ iam_type }}
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 1
+ - iam_policy_name_a in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_name_a in iam_policy_info.policy_names
+ - iam_policy_info.policy_names | length == 1
+ - iam_policy_info.policies | length == 1
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_info.all_policy_names | length == 1
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_a
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ - name: Create policy using document for {{ iam_type }} (idempotency - check mode)
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_json: '{{ lookup("file", "no_access.json") }}'
+ skip_duplicates: yes
+ register: result
+ check_mode: yes
+ - name: Assert no change would occur
+ assert:
+ that:
+ - result is not changed
+
+ - name: Create policy using document for {{ iam_type }} (idempotency)
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_json: '{{ lookup("file", "no_access.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ register: iam_policy_info
+ - name: Assert no change
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 1
+ - iam_policy_name_a in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies | length == 1
+ - iam_policy_info.all_policy_names | length == 1
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_a
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ # ============================================================
+ - name: Create policy using document for {{ iam_type }} (check mode) (skip_duplicates)
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_json: '{{ lookup("file", "no_access.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+ - name: Assert policy would be added for {{ iam_type }}
+ assert:
+ that:
+ - result is not changed
+ - iam_policy_info.all_policy_names | length == 1
+ - '"policies" not in iam_policy_info'
+ - iam_policy_name_b not in iam_policy_info.all_policy_names
+
+ - name: Create policy using document for {{ iam_type }} (skip_duplicates)
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_json: '{{ lookup("file", "no_access.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+ - name: Assert policy was not added for {{ iam_type }} (skip_duplicates)
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 1
+ - iam_policy_name_b not in result.policies
+ - result[iam_object_key] == iam_name
+ - '"policies" not in iam_policy_info'
+ - '"policy_names" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 1
+ - iam_policy_name_b not in iam_policy_info.all_policy_names
+
+ - name: Create policy using document for {{ iam_type }} (check mode) (skip_duplicates
+ = no)
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_json: '{{ lookup("file", "no_access.json") }}'
+ skip_duplicates: no
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+ - name: Assert policy would be added for {{ iam_type }}
+ assert:
+ that:
+ - result.changed == True
+ - '"policies" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 1
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b not in iam_policy_info.all_policy_names
+
+ - name: Create policy using document for {{ iam_type }} (skip_duplicates = no)
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_json: '{{ lookup("file", "no_access.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+ - name: Assert policy was added for {{ iam_type }}
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 2
+ - iam_policy_name_b in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies | length == 1
+ - iam_policy_info.all_policy_names | length == 2
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_b
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ - name: Create policy using document for {{ iam_type }} (idempotency - check mode)
+ (skip_duplicates = no)
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_json: '{{ lookup("file", "no_access.json") }}'
+ register: result
+ check_mode: yes
+ - name: Assert no change would occur
+ assert:
+ that:
+ - result is not changed
+
+ - name: Create policy using document for {{ iam_type }} (idempotency) (skip_duplicates
+ = no)
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_json: '{{ lookup("file", "no_access.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+ - name: Assert no change
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 2
+ - iam_policy_name_b in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies | length == 1
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_info.all_policy_names | length == 2
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_b
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ # ============================================================
+ - name: Create policy using json for {{ iam_type }} (check mode)
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "no_access_with_id.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+ - name: Assert policy would be added for {{ iam_type }}
+ assert:
+ that:
+ - result is changed
+ - '"policies" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 2
+ - iam_policy_name_c not in iam_policy_info.all_policy_names
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+
+ - name: Create policy using json for {{ iam_type }}
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "no_access_with_id.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+ - name: Assert policy was added for {{ iam_type }}
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 3
+ - iam_policy_name_c in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies | length == 1
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_name_c in iam_policy_info.all_policy_names
+ - iam_policy_info.all_policy_names | length == 3
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_c
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ - name: Create policy using json for {{ iam_type }} (idempotency - check mode)
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "no_access_with_id.json") }}'
+ skip_duplicates: yes
+ register: result
+ check_mode: yes
+ - name: Assert no change would occur
+ assert:
+ that:
+ - result is not changed
+
+ - name: Create policy using json for {{ iam_type }} (idempotency)
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "no_access_with_id.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+ - name: Assert no change
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 3
+ - iam_policy_name_c in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_name_c in iam_policy_info.all_policy_names
+ - iam_policy_info.all_policy_names | length == 3
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_c
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ # ============================================================
+ - name: Create policy using json for {{ iam_type }} (check mode) (skip_duplicates)
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "no_access_with_id.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+ - name: Assert policy would not be added for {{ iam_type }}
+ assert:
+ that:
+ - result is not changed
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_name_c in iam_policy_info.all_policy_names
+ - iam_policy_name_d not in iam_policy_info.all_policy_names
+ - iam_policy_info.all_policy_names | length == 3
+ - '"policies" not in iam_policy_info'
+
+ - name: Create policy using json for {{ iam_type }} (skip_duplicates)
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "no_access_with_id.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+ - name: Assert policy was not added for {{ iam_type }} (skip_duplicates)
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 3
+ - iam_policy_name_d not in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_name_c in iam_policy_info.all_policy_names
+ - iam_policy_name_d not in iam_policy_info.all_policy_names
+ - iam_policy_info.all_policy_names | length == 3
+ - '"policies" not in iam_policy_info'
+
+ - name: Create policy using json for {{ iam_type }} (check mode) (skip_duplicates
+ = no)
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "no_access_with_id.json") }}'
+ skip_duplicates: no
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+ - name: Assert policy would be added for {{ iam_type }}
+ assert:
+ that:
+ - result.changed == True
+
+ - name: Create policy using json for {{ iam_type }} (skip_duplicates = no)
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "no_access_with_id.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+ - name: Assert policy was added for {{ iam_type }}
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 4
+ - iam_policy_name_d in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_name_c in iam_policy_info.all_policy_names
+ - iam_policy_name_d in iam_policy_info.all_policy_names
+ - iam_policy_name_a not in iam_policy_info.policy_names
+ - iam_policy_name_b not in iam_policy_info.policy_names
+ - iam_policy_name_c not in iam_policy_info.policy_names
+ - iam_policy_name_d in iam_policy_info.policy_names
+ - iam_policy_info.policy_names | length == 1
+ - iam_policy_info.all_policy_names | length == 4
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_d
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ - name: Create policy using json for {{ iam_type }} (idempotency - check mode) (skip_duplicates
+ = no)
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "no_access_with_id.json") }}'
+ register: result
+ check_mode: yes
+ - name: Assert no change would occur
+ assert:
+ that:
+ - result is not changed
+
+ - name: Create policy using json for {{ iam_type }} (idempotency) (skip_duplicates
+ = no)
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "no_access_with_id.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+ - name: Assert no change
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 4
+ - iam_policy_name_d in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_name_c in iam_policy_info.all_policy_names
+ - iam_policy_name_d in iam_policy_info.all_policy_names
+ - iam_policy_info.all_policy_names | length == 4
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_d
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ # ============================================================
+ - name: Test fetching multiple policies from {{ iam_type }}
+ iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ register: iam_policy_info
+ - name: Assert all policies returned
+ assert:
+ that:
+ - iam_policy_info is succeeded
+ - iam_policy_info.policies | length == 4
+ - iam_policy_info.all_policy_names | length == 4
+ - iam_policy_name_a in iam_policy_info.all_policy_names
+ - iam_policy_name_b in iam_policy_info.all_policy_names
+ - iam_policy_name_c in iam_policy_info.all_policy_names
+ - iam_policy_name_d in iam_policy_info.all_policy_names
+ # Quick test that the policies are the ones we expect
+ - iam_policy_info.policies | community.general.json_query('[*].policy_name')
+ | length == 4
+ - iam_policy_info.policies | community.general.json_query('[?policy_document.Id
+ == `MyId`].policy_name') | length == 2
+ - iam_policy_name_c in (iam_policy_info.policies | community.general.json_query('[?policy_document.Id
+ == `MyId`].policy_name') | list)
+ - iam_policy_name_d in (iam_policy_info.policies | community.general.json_query('[?policy_document.Id
+ == `MyId`].policy_name') | list)
+
+ # ============================================================
+ - name: Update policy using document for {{ iam_type }} (check mode) (skip_duplicates)
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_json: '{{ lookup("file", "no_access_with_id.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ register: iam_policy_info
+ - name: Assert policy would not be added for {{ iam_type }}
+ assert:
+ that:
+ - result is not changed
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_a
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ - name: Update policy using document for {{ iam_type }} (skip_duplicates)
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_json: '{{ lookup("file", "no_access_with_id.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ register: iam_policy_info
+ - name: Assert policy was not updated for {{ iam_type }} (skip_duplicates)
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 4
+ - iam_policy_name_a in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.all_policy_names | length == 4
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_a
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ - name: Update policy using document for {{ iam_type }} (check mode) (skip_duplicates
+ = no)
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_json: '{{ lookup("file", "no_access_with_id.json") }}'
+ skip_duplicates: no
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ register: iam_policy_info
+ - name: Assert policy would be updated for {{ iam_type }}
+ assert:
+ that:
+ - result.changed == True
+ - iam_policy_info.all_policy_names | length == 4
+ - iam_policy_info.policies[0].policy_name == iam_policy_name_a
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ - name: Update policy using document for {{ iam_type }} (skip_duplicates = no)
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_json: '{{ lookup("file", "no_access_with_id.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ register: iam_policy_info
+ - name: Assert policy was updated for {{ iam_type }}
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 4
+ - iam_policy_name_a in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ - name: Update policy using document for {{ iam_type }} (idempotency - check mode)
+ (skip_duplicates = no)
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_json: '{{ lookup("file", "no_access_with_id.json") }}'
+ register: result
+ check_mode: yes
+ - name: Assert no change would occur
+ assert:
+ that:
+ - result is not changed
+
+ - name: Update policy using document for {{ iam_type }} (idempotency) (skip_duplicates
+ = no)
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ policy_json: '{{ lookup("file", "no_access_with_id.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ register: iam_policy_info
+ - name: Assert no change
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 4
+ - iam_policy_name_a in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ - name: Delete policy A
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ register: iam_policy_info
+ - name: Assert deleted
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 3
+ - iam_policy_name_a not in result.policies
+ - result[iam_object_key] == iam_name
+ - '"policies" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 3
+ - iam_policy_name_a not in iam_policy_info.all_policy_names
+
+ # ============================================================
+ # Update C with no_access.json
+ # Delete C
+
+ - name: Update policy using json for {{ iam_type }} (check mode) (skip_duplicates)
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "no_access.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+ - name: Assert policy would not be added for {{ iam_type }}
+ assert:
+ that:
+ - result is not changed
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ - name: Update policy using json for {{ iam_type }} (skip_duplicates)
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "no_access.json") }}'
+ skip_duplicates: yes
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+ - name: Assert policy was not updated for {{ iam_type }} (skip_duplicates)
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 3
+ - iam_policy_name_c in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ - name: Update policy using json for {{ iam_type }} (check mode) (skip_duplicates
+ = no)
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "no_access.json") }}'
+ skip_duplicates: no
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+ - name: Assert policy would be updated for {{ iam_type }}
+ assert:
+ that:
+ - result.changed == True
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ - name: Update policy using json for {{ iam_type }} (skip_duplicates = no)
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "no_access.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+ - name: Assert policy was updated for {{ iam_type }}
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 3
+ - iam_policy_name_c in result.policies
+ - result[iam_object_key] == iam_name
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ - name: Update policy using json for {{ iam_type }} (idempotency - check mode) (skip_duplicates
+ = no)
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "no_access.json") }}'
+ register: result
+ check_mode: yes
+ - name: Assert no change would occur
+ assert:
+ that:
+ - result is not changed
+
+ - name: Update policy using json for {{ iam_type }} (idempotency) (skip_duplicates
+ = no)
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ policy_json: '{{ lookup("file", "no_access.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+ - name: Assert no change
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 3
+ - iam_policy_name_c in result.policies
+ - result[iam_object_key] == iam_name
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ - name: Delete policy C
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ register: iam_policy_info
+ - name: Assert deleted
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 2
+ - iam_policy_name_c not in result.policies
+ - result[iam_object_key] == iam_name
+ - '"policies" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 2
+ - iam_policy_name_c not in iam_policy_info.all_policy_names
+
+ # ============================================================
+ - name: Update policy using document for {{ iam_type }} (check mode)
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+ - name: Assert policy would be updated for {{ iam_type }}
+ assert:
+ that:
+ - result.changed == True
+ - '"Id" not in iam_policy_info.policies[0].policy_document'
+
+ - name: Update policy using document for {{ iam_type }}
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+ - name: Assert policy was updated for {{ iam_type }}
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 2
+ - iam_policy_name_b in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId'
+
+ - name: Update policy using document for {{ iam_type }} (idempotency - check mode)
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}'
+ register: result
+ check_mode: yes
+ - name: Assert no change would occur
+ assert:
+ that:
+ - result is not changed
+
+ - name: Update policy using document for {{ iam_type }} (idempotency)
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+ - name: Assert no change
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 2
+ - iam_policy_name_b in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId'
+
+ - name: Delete policy B
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ register: iam_policy_info
+ - name: Assert deleted
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 1
+ - iam_policy_name_b not in result.policies
+ - result[iam_object_key] == iam_name
+ - '"policies" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 1
+ - iam_policy_name_b not in iam_policy_info.all_policy_names
+
+ # ============================================================
+ - name: Update policy using json for {{ iam_type }} (check mode)
+ check_mode: yes
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+ - name: Assert policy would be updated for {{ iam_type }}
+ assert:
+ that:
+ - result.changed == True
+ - iam_policy_info.policies[0].policy_document.Id == 'MyId'
+
+ - name: Update policy using json for {{ iam_type }}
+ iam_policy:
+ state: present
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+ - name: Assert policy was updated for {{ iam_type }}
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 1
+ - iam_policy_name_d in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId'
+
+ - name: Update policy using json for {{ iam_type }} (idempotency - check mode)
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}'
+ register: result
+ check_mode: yes
+ - name: Assert no change would occur
+ assert:
+ that:
+ - result is not changed
+
+ - name: Update policy using json for {{ iam_type }} (idempotency)
+ iam_policy:
+ state: present
+ skip_duplicates: no
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+ - name: Assert no change
+ assert:
+ that:
+ - result is not changed
+ - result.policies | length == 1
+ - iam_policy_name_d in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId'
+
+ # ============================================================
+ - name: Delete policy D (check_mode)
+ check_mode: yes
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+ - name: Assert not deleted
+ assert:
+ that:
+ - result is changed
+ - result.policies | length == 1
+ - iam_policy_name_d in result.policies
+ - result[iam_object_key] == iam_name
+ - iam_policy_info.all_policy_names | length == 1
+ - iam_policy_name_d in iam_policy_info.all_policy_names
+ - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId'
+
+ - name: Delete policy D
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+ - name: Assert deleted
+ assert:
+ that:
+ - result is changed
+ - '"policies" not in iam_policy_info'
+ - iam_policy_name_d not in result.policies
+ - result[iam_object_key] == iam_name
+ - '"policies" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 0
+
+ - name: Delete policy D (test idempotency)
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+ - name: Assert deleted
+ assert:
+ that:
+ - result is not changed
+ - '"policies" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 0
+
+ - name: Delete policy D (check_mode) (test idempotency)
+ check_mode: yes
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: result
+ - iam_policy_info:
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ register: iam_policy_info
+ - name: Assert deleted
+ assert:
+ that:
+ - result is not changed
+ - '"policies" not in iam_policy_info'
+ - iam_policy_info.all_policy_names | length == 0
+
+ always:
+ # ============================================================
+ - name: Delete policy A for {{ iam_type }}
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_a }}'
+ ignore_errors: yes
+ - name: Delete policy B for {{ iam_type }}
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_b }}'
+ ignore_errors: yes
+ - name: Delete policy C for {{ iam_type }}
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_c }}'
+ ignore_errors: yes
+ - name: Delete policy D for {{ iam_type }}
+ iam_policy:
+ state: absent
+ iam_type: '{{ iam_type }}'
+ iam_name: '{{ iam_name }}'
+ policy_name: '{{ iam_policy_name_d }}'
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/aliases b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/aliases
new file mode 100644
index 000000000..cf11724d7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/aliases
@@ -0,0 +1,9 @@
+# reason: missing-policy
+# It should be possible to test iam_user by limiting which policies can be
+# attached to the users.
+# Careful review is needed prior to adding this to the main CI.
+unsupported
+
+cloud/aws
+
+iam_user_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/defaults/main.yml
new file mode 100644
index 000000000..d5726a48b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/defaults/main.yml
@@ -0,0 +1,10 @@
+test_group: '{{ resource_prefix }}-group'
+test_path: /
+test_user: '{{ test_users[0] }}'
+test_user3: '{{ test_users[2] }}'
+test_password: ATotallySecureUncrackablePassword1!
+test_new_password: ATotallyNewSecureUncrackablePassword1!
+test_users:
+- '{{ resource_prefix }}-user-a'
+- '{{ resource_prefix }}-user-b'
+- '{{ resource_prefix }}-user-c'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml
new file mode 100644
index 000000000..06279024f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml
@@ -0,0 +1,798 @@
+- name: set up aws connection info
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ - name: ensure improper usage of parameters fails gracefully
+ iam_user_info:
+ path: '{{ test_path }}'
+ group: '{{ test_group }}'
+ ignore_errors: yes
+ register: iam_user_info_path_group
+ - assert:
+ that:
+ - iam_user_info_path_group is failed
+ - 'iam_user_info_path_group.msg == "parameters are mutually exclusive: group|path"'
+
+ - name: create test user (check mode)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ check_mode: yes
+ register: iam_user
+ - name: assert that the user would be created
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: create test user
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ register: iam_user
+ - name: assert that the user is created
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: ensure test user exists (no change - check mode)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ register: iam_user
+ check_mode: yes
+ - name: assert that user would not change
+ assert:
+ that:
+ - iam_user is not changed
+
+ - name: ensure test user exists (no change)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ register: iam_user
+ - name: assert that the user wasn't changed
+ assert:
+ that:
+ - iam_user is not changed
+
+ - name: ensure the info used to validate other tests is valid
+ set_fact:
+ test_iam_user: '{{ iam_user.iam_user.user }}'
+ - assert:
+ that:
+ - test_iam_user.arn.startswith("arn:aws:iam")
+ - test_iam_user.arn.endswith("user/" + test_user )
+ - test_iam_user.create_date is not none
+ - test_iam_user.path == '{{ test_path }}'
+ - test_iam_user.user_id is not none
+ - test_iam_user.user_name == '{{ test_user }}'
+ - test_iam_user.tags | length == 0
+
+ - name: get info on IAM user(s)
+ iam_user_info:
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info.iam_users | length != 0
+
+ - name: get info on IAM user(s) with name
+ iam_user_info:
+ name: '{{ test_user }}'
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info.iam_users | length == 1
+ - iam_user_info.iam_users[0].arn == test_iam_user.arn
+ - iam_user_info.iam_users[0].create_date == test_iam_user.create_date
+ - iam_user_info.iam_users[0].path == test_iam_user.path
+ - iam_user_info.iam_users[0].user_id == test_iam_user.user_id
+ - iam_user_info.iam_users[0].user_name == test_iam_user.user_name
+ - iam_user_info.iam_users[0].tags | length == 0
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: create test user with password (check mode)
+ iam_user:
+ name: '{{ test_user3 }}'
+ password: '{{ test_password }}'
+ state: present
+ check_mode: yes
+ register: iam_user
+ - name: assert that the second user would be created
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: create second test user with password
+ iam_user:
+ name: '{{ test_user3 }}'
+ password: '{{ test_password }}'
+ password_reset_required: yes
+ state: present
+ wait: false
+ register: iam_user
+ - name: assert that the second user is created
+ assert:
+ that:
+ - iam_user is changed
+ - iam_user.iam_user.user.password_reset_required
+
+ - name: get info on IAM user(s) on path
+ iam_user_info:
+ path: '{{ test_path }}'
+ name: '{{ test_user }}'
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info.iam_users | length == 1
+ - iam_user_info.iam_users[0].arn == test_iam_user.arn
+ - iam_user_info.iam_users[0].create_date == test_iam_user.create_date
+ - iam_user_info.iam_users[0].path == test_iam_user.path
+ - iam_user_info.iam_users[0].user_id == test_iam_user.user_id
+ - iam_user_info.iam_users[0].user_name == test_iam_user.user_name
+ - iam_user_info.iam_users[0].tags | length == 0
+
+ # ------------------------------------------------------------------------------------------
+ ## Test tags creation / updates
+ - name: Add Tag (check mode)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ tags:
+ TagA: ValueA
+ register: iam_user
+ check_mode: yes
+ - assert:
+ that:
+ - iam_user is changed
+
+ - name: Add Tag
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ tags:
+ TagA: ValueA
+ register: iam_user
+ - assert:
+ that:
+ - iam_user is changed
+ - iam_user.iam_user.user.user_name == test_user
+ - iam_user.iam_user.user.tags | length == 1
+ - '"TagA" in iam_user.iam_user.user.tags'
+ - iam_user.iam_user.user.tags.TagA == "ValueA"
+
+ - name: Add Tag (no change - check mode)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ tags:
+ TagA: ValueA
+ register: iam_user
+ check_mode: yes
+ - assert:
+ that:
+ - iam_user is not changed
+
+ - name: Add Tag (no change)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ tags:
+ TagA: ValueA
+ register: iam_user
+ - assert:
+ that:
+ - iam_user is not changed
+ - iam_user.iam_user.user.user_name == test_user
+ - iam_user.iam_user.user.tags | length == 1
+ - '"TagA" in iam_user.iam_user.user.tags'
+ - iam_user.iam_user.user.tags.TagA == "ValueA"
+
+ - name: Extend Tags
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ purge_tags: no
+ tags:
+ tag_b: value_b
+ Tag C: Value C
+ tag d: value d
+ register: iam_user
+ - assert:
+ that:
+ - iam_user is changed
+ - iam_user.iam_user.user.user_name == test_user
+ - iam_user.iam_user.user.tags | length == 4
+ - '"TagA" in iam_user.iam_user.user.tags'
+ - '"tag_b" in iam_user.iam_user.user.tags'
+ - '"Tag C" in iam_user.iam_user.user.tags'
+ - '"tag d" in iam_user.iam_user.user.tags'
+ - iam_user.iam_user.user.tags.TagA == "ValueA"
+ - iam_user.iam_user.user.tags.tag_b == "value_b"
+ - iam_user.iam_user.user.tags["Tag C"] == "Value C"
+ - iam_user.iam_user.user.tags["tag d"] == "value d"
+
+ - name: Create user without Tag (no change)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ register: iam_user
+ - assert:
+ that:
+ - iam_user is not changed
+ - iam_user.iam_user.user.user_name == test_user
+ - iam_user.iam_user.user.tags | length == 4
+
+ - name: Remove all Tags (check mode)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ tags: {}
+ check_mode: yes
+ register: iam_user
+ - assert:
+ that:
+ - iam_user is changed
+
+ - name: Remove 3 Tags
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ tags:
+ TagA: ValueA
+ register: iam_user
+ - assert:
+ that:
+ - iam_user is changed
+ - iam_user.iam_user.user.user_name == test_user
+ - iam_user.iam_user.user.tags | length == 1
+ - '"TagA" in iam_user.iam_user.user.tags'
+ - iam_user.iam_user.user.tags.TagA == "ValueA"
+
+ - name: Change Tag (check mode)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ tags:
+ TagA: AnotherValueA
+ register: iam_user
+ check_mode: yes
+ - assert:
+ that:
+ - iam_user is changed
+
+ - name: Change Tag
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ tags:
+ TagA: AnotherValueA
+ register: iam_user
+ - assert:
+ that:
+ - iam_user is changed
+ - iam_user.iam_user.user.user_name == test_user
+ - iam_user.iam_user.user.tags | length == 1
+ - '"TagA" in iam_user.iam_user.user.tags'
+ - iam_user.iam_user.user.tags.TagA == "AnotherValueA"
+
+ - name: Remove All Tags
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ tags: {}
+ register: iam_user
+ - assert:
+ that:
+ - iam_user is changed
+ - iam_user.iam_user.user.user_name == test_user
+ - iam_user.iam_user.user.tags | length == 0
+
+ - name: Remove All Tags (no change)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ tags: {}
+ register: iam_user
+ - assert:
+ that:
+ - iam_user is not changed
+ - iam_user.iam_user.user.user_name == test_user
+ - iam_user.iam_user.user.tags | length == 0
+
+ # ------------------------------------------------------------------------------------------
+ ## Test user password update
+ - name: test update IAM password with on_create only (check mode)
+ iam_user:
+ name: '{{ test_user3 }}'
+ password: '{{ test_new_password }}'
+ update_password: on_create
+ state: present
+ register: iam_user_update
+ check_mode: yes
+ - assert:
+ that:
+ - iam_user_update is not changed
+
+ - name: test update IAM password with on_create only
+ iam_user:
+ name: '{{ test_user3 }}'
+ password: '{{ test_new_password }}'
+ update_password: on_create
+ state: present
+ register: iam_user_update
+ - assert:
+ that:
+ - iam_user_update is not changed
+
+ - name: update IAM password (check mode)
+ iam_user:
+ name: '{{ test_user3 }}'
+ password: '{{ test_new_password }}'
+ state: present
+ register: iam_user_update
+ check_mode: yes
+ - assert:
+ that:
+ - iam_user_update is changed
+
+ # flakey, there is no waiter for login profiles
+ # Login Profile for User ansible-user-c cannot be modified while login profile is being created.
+ - name: update IAM password
+ iam_user:
+ name: '{{ test_user3 }}'
+ password: '{{ test_new_password }}'
+ state: present
+ register: iam_user_update
+ until: iam_user_update.failed == false
+ delay: 3
+ retries: 5
+ - assert:
+ that:
+ - iam_user_update is changed
+ - iam_user_update.iam_user.user.user_name == test_user3
+
+ # ===========================================
+ # Test Managed Policy management
+ #
+ # Use a couple of benign policies for testing:
+ # - AWSDenyAll
+ # - ServiceQuotasReadOnlyAccess
+ #
+ - name: attach managed policy to user (check mode)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/AWSDenyAll
+ register: iam_user
+ check_mode: yes
+ - name: assert that the user is changed
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: attach managed policy to user
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/AWSDenyAll
+ register: iam_user
+ - name: assert that the user is changed
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: ensure managed policy is attached to user (no change - check mode)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/AWSDenyAll
+ register: iam_user
+ check_mode: yes
+ - name: assert that the user hasn't changed
+ assert:
+ that:
+ - iam_user is not changed
+
+ - name: ensure managed policy is attached to user (no change)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/AWSDenyAll
+ register: iam_user
+ - name: assert that the user hasn't changed
+ assert:
+ that:
+ - iam_user is not changed
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: attach different managed policy to user (check mode)
+ check_mode: yes
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ purge_policy: no
+ register: iam_user
+ - name: assert that the user changed
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: attach different managed policy to user
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ purge_policy: no
+ register: iam_user
+ - name: assert that the user changed
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: attach different managed policy to user (no change - check mode)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ purge_policy: no
+ register: iam_user
+ check_mode: yes
+ - name: assert that the user hasn't changed
+ assert:
+ that:
+ - iam_user is not changed
+
+ - name: Check first policy wasn't purged
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ - arn:aws:iam::aws:policy/AWSDenyAll
+ purge_policy: no
+ register: iam_user
+ - name: assert that the user hasn't changed
+ assert:
+ that:
+ - iam_user is not changed
+
+ - name: Check that managed policy order doesn't matter
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/AWSDenyAll
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ purge_policy: no
+ register: iam_user
+ - name: assert that the user hasn't changed
+ assert:
+ that:
+ - iam_user is not changed
+
+ - name: Check that policy doesn't require full ARN path
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - AWSDenyAll
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ purge_policy: no
+ register: iam_user
+ - name: assert that the user hasn't changed
+ assert:
+ that:
+ - iam_user is not changed
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Remove one of the managed policies - with purge (check mode)
+ check_mode: yes
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ purge_policy: yes
+ register: iam_user
+ - name: assert that the user changed
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: Remove one of the managed policies - with purge
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ purge_policy: yes
+ register: iam_user
+ - name: assert that the user changed
+ assert:
+ that:
+ - iam_user is changed
+
+ - name: Remove one of the managed policies - with purge (no change - check mode)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ purge_policy: yes
+ register: iam_user
+ check_mode: yes
+ - name: assert that the user hasn't changed
+ assert:
+ that:
+ - iam_user is not changed
+
+ - name: Remove one of the managed policies - with purge (no change)
+ iam_user:
+ name: '{{ test_user }}'
+ state: present
+ managed_policy:
+ - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
+ purge_policy: yes
+ register: iam_user
+ - name: assert that the user hasn't changed
+ assert:
+ that:
+ - iam_user is not changed
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: ensure group exists
+ iam_group:
+ name: '{{ test_group }}'
+ users:
+ - '{{ test_user }}'
+ state: present
+ register: iam_group
+ - assert:
+ that:
+ - iam_group.changed
+ - iam_group.iam_group.users
+
+ - name: get info on IAM user(s) in group
+ iam_user_info:
+ group: '{{ test_group }}'
+ name: '{{ test_user }}'
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info.iam_users | length == 1
+ - iam_user_info.iam_users[0].arn == test_iam_user.arn
+ - iam_user_info.iam_users[0].create_date == test_iam_user.create_date
+ - iam_user_info.iam_users[0].path == test_iam_user.path
+ - iam_user_info.iam_users[0].user_id == test_iam_user.user_id
+ - iam_user_info.iam_users[0].user_name == test_iam_user.user_name
+ - iam_user_info.iam_users[0].tags | length == 0
+
+ - name: remove user from group
+ iam_group:
+ name: '{{ test_group }}'
+ purge_users: true
+ users: []
+ state: present
+ register: iam_group
+ - name: get info on IAM user(s) after removing from group
+ iam_user_info:
+ group: '{{ test_group }}'
+ name: '{{ test_user }}'
+ register: iam_user_info
+ - name: assert empty list of users for group are returned
+ assert:
+ that:
+ - iam_user_info.iam_users | length == 0
+
+ - name: ensure ansible users exist
+ iam_user:
+ name: '{{ item }}'
+ state: present
+ with_items: '{{ test_users }}'
+ - name: get info on multiple IAM user(s)
+ iam_user_info:
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info.iam_users | length != 0
+
+ - name: ensure multiple user group exists with single user
+ iam_group:
+ name: '{{ test_group }}'
+ users:
+ - '{{ test_user }}'
+ state: present
+ register: iam_group
+ - name: get info on IAM user(s) in group
+ iam_user_info:
+ group: '{{ test_group }}'
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info.iam_users | length == 1
+
+ - name: add all users to group
+ iam_group:
+ name: '{{ test_group }}'
+ users: '{{ test_users }}'
+ state: present
+ register: iam_group
+ - name: get info on multiple IAM user(s) in group
+ iam_user_info:
+ group: '{{ test_group }}'
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info.iam_users | length == test_users | length
+
+ - name: purge users from group
+ iam_group:
+ name: '{{ test_group }}'
+ purge_users: true
+ users: []
+ state: present
+ register: iam_group
+ - name: ensure info is empty for empty group
+ iam_user_info:
+ group: '{{ test_group }}'
+ register: iam_user_info
+ - assert:
+ that:
+ - iam_user_info.iam_users | length == 0
+
+ - name: get info on IAM user(s) after removing from group
+ iam_user_info:
+ group: '{{ test_group }}'
+ register: iam_user_info
+ - name: assert empty list of users for group are returned
+ assert:
+ that:
+ - iam_user_info.iam_users | length == 0
+
+ - name: remove group
+ iam_group:
+ name: '{{ test_group }}'
+ state: absent
+ register: iam_group
+ - name: assert that group was removed
+ assert:
+ that:
+ - iam_group.changed
+ - iam_group
+
+ - name: Test remove group again (idempotency)
+ iam_group:
+ name: '{{ test_group }}'
+ state: absent
+ register: iam_group
+ - name: assert that group remove is not changed
+ assert:
+ that:
+ - not iam_group.changed
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Remove user with attached policy (check mode)
+ iam_user:
+ name: '{{ test_user }}'
+ state: absent
+ register: iam_user
+ check_mode: yes
+ - name: get info on IAM user(s) after deleting in check mode
+ iam_user_info:
+ name: '{{ test_user }}'
+ register: iam_user_info
+ - name: Assert user was not removed in check mode
+ assert:
+ that:
+ - iam_user.changed
+ - iam_user_info.iam_users | length == 1
+
+ - name: Remove user with attached policy
+ iam_user:
+ name: '{{ test_user }}'
+ state: absent
+ register: iam_user
+ - name: get info on IAM user(s) after deleting
+ iam_user_info:
+ name: '{{ test_user }}'
+ register: iam_user_info
+ - name: Assert user was removed
+ assert:
+ that:
+ - iam_user.changed
+ - iam_user_info.iam_users | length == 0
+
+ - name: Remove user with attached policy (idempotent - check mode)
+ iam_user:
+ name: '{{ test_user }}'
+ state: absent
+ register: iam_user
+ check_mode: yes
+ - name: Assert no change
+ assert:
+ that:
+ - not iam_user.changed
+
+ - name: Remove user with attached policy (idempotent)
+ iam_user:
+ name: '{{ test_user }}'
+ state: absent
+ register: iam_user
+ - name: Assert no change
+ assert:
+ that:
+ - not iam_user.changed
+
+ # ------------------------------------------------------------------------------------------
+ ## Test user password removal
+ - name: Delete IAM password (check mode)
+ iam_user:
+ name: '{{ test_user3 }}'
+ remove_password: yes
+ state: present
+ register: iam_user_password_removal
+ check_mode: yes
+ - assert:
+ that:
+ - iam_user_password_removal is changed
+
+ - name: Delete IAM password
+ iam_user:
+ name: '{{ test_user3 }}'
+ remove_password: yes
+ state: present
+ register: iam_user_password_removal
+ - assert:
+ that:
+ - iam_user_password_removal is changed
+
+ - name: Delete IAM password again (check mode)
+ iam_user:
+ name: '{{ test_user3 }}'
+ remove_password: yes
+ state: present
+ register: iam_user_password_removal
+ check_mode: yes
+ - assert:
+ that:
+ - iam_user_password_removal is not changed
+
+ - name: Delete IAM password again
+ iam_user:
+ name: '{{ test_user3 }}'
+ remove_password: yes
+ state: present
+ register: iam_user_password_removal
+ - assert:
+ that:
+ - iam_user_password_removal is not changed
+
+ always:
+ - name: remove group
+ iam_group:
+ name: '{{ test_group }}'
+ state: absent
+ ignore_errors: yes
+ - name: remove ansible users
+ iam_user:
+ name: '{{ item }}'
+ state: absent
+ with_items: '{{ test_users }}'
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/aliases b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/aliases
new file mode 100644
index 000000000..66c3ccc82
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/aliases
@@ -0,0 +1,3 @@
+time=45m
+
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_environment_script.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_environment_script.yml
new file mode 100644
index 000000000..bfa5f4bb4
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_environment_script.yml
@@ -0,0 +1,9 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: 'Write access key to file we can source'
+ copy:
+ dest: '../access_key.sh'
+ content: 'export MY_ACCESS_KEY="{{ aws_access_key }}"'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml
new file mode 100644
index 000000000..7e4c31068
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml
@@ -0,0 +1,11 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ vars:
+ template_name: "../templates/{{ template | default('inventory.yml.j2') }}"
+ tasks:
+ - name: write inventory config file
+ copy:
+ dest: ../test.aws_ec2.yml
+ content: "{{ lookup('template', template_name) }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml
new file mode 100644
index 000000000..f67fff1a9
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml
@@ -0,0 +1,9 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: write inventory config file
+ copy:
+ dest: ../test.aws_ec2.yml
+ content: ""
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml
new file mode 100644
index 000000000..929608c72
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml
@@ -0,0 +1,55 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+
+ - include_tasks: setup.yml
+# - pause:
+# seconds: 240
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ # Create new host, add it to inventory and then terminate it without updating the cache
+
+ - name: create a new host
+ ec2_instance:
+ image_id: '{{ image_id }}'
+ name: '{{ resource_prefix }}'
+ instance_type: t2.micro
+ wait: no
+ security_groups: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2_instance:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ name: '{{ resource_prefix }}'
+ security_groups: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ ignore_errors: yes
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml
new file mode 100644
index 000000000..abbb61997
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml
@@ -0,0 +1,52 @@
+- name: get image ID to create an instance
+ ec2_ami_info:
+ filters:
+ architecture: x86_64
+ # CentOS Community Platform Engineering (CPE)
+ owner-id: '125523088429'
+ virtualization-type: hvm
+ root-device-type: ebs
+ name: 'Fedora-Cloud-Base-34-1.2.x86_64*'
+ register: fedora_images
+
+- set_fact:
+ image_id: '{{ fedora_images.images.0.image_id }}'
+ vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16'
+ subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24'
+
+- name: create a VPC to work in
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ state: present
+ name: '{{ resource_prefix }}_setup'
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ register: setup_vpc
+
+- set_fact:
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+
+- name: create a subnet to use for creating an ec2 instance
+ ec2_vpc_subnet:
+ az: '{{ aws_region }}a'
+ tags: '{{ resource_prefix }}_setup'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: '{{ subnet_cidr }}'
+ state: present
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ register: setup_subnet
+
+- set_fact:
+ subnet_id: '{{ setup_subnet.subnet.id }}'
+
+- name: create a security group to use for creating an ec2 instance
+ ec2_group:
+ name: '{{ resource_prefix }}_setup'
+ description: 'created by Ansible integration tests'
+ state: present
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ register: setup_sg
+
+- set_fact:
+ sg_id: '{{ setup_sg.group_id }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml
new file mode 100644
index 000000000..c782421d4
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml
@@ -0,0 +1,31 @@
+- set_fact:
+ vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16'
+ subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24'
+
+- name: remove setup security group
+ ec2_group:
+ name: '{{ resource_prefix }}_setup'
+ description: 'created by Ansible integration tests'
+ state: absent
+ vpc_id: '{{ vpc_id }}'
+ ignore_errors: yes
+
+- name: remove setup subnet
+ ec2_vpc_subnet:
+ az: '{{ aws_region }}a'
+ tags: '{{ resource_prefix }}_setup'
+ vpc_id: '{{ vpc_id }}'
+ cidr: '{{ subnet_cidr }}'
+ state: absent
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ ignore_errors: yes
+
+- name: remove setup VPC
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ state: absent
+ name: '{{ resource_prefix }}_setup'
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml
new file mode 100644
index 000000000..cc1b9a5a5
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml
@@ -0,0 +1,9 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: assert inventory was not populated by aws_ec2 inventory plugin
+ assert:
+ that:
+ - "'aws_ec2' not in groups"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml
new file mode 100644
index 000000000..d83cb0bfe
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml
@@ -0,0 +1,18 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: assert cache was used to populate inventory
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "groups.aws_ec2 | length == 1"
+
+ - meta: refresh_inventory
+
+ - name: assert refresh_inventory updated the cache
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml
new file mode 100644
index 000000000..01627659b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml
@@ -0,0 +1,78 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+
+ block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+
+ - include_tasks: setup.yml
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ # Create new host, refresh inventory, remove host, refresh inventory
+
+ - name: create a new host
+ ec2_instance:
+ image_id: '{{ image_id }}'
+ name: '{{ resource_prefix }}'
+ instance_type: t2.micro
+ security_groups: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ wait: no
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory and is no longer empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "groups.aws_ec2 | length == 1"
+ - "groups.aws_ec2.0 == '{{ resource_prefix }}'"
+
+ - name: remove setup ec2 instance
+ ec2_instance:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ name: '{{ resource_prefix }}'
+ security_groups: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2_instance:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ name: '{{ resource_prefix }}'
+ security_groups: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ ignore_errors: yes
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml
new file mode 100644
index 000000000..b155b7ab3
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml
@@ -0,0 +1,56 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+ - include_tasks: setup.yml
+
+ # Create new host, refresh inventory
+ - name: create a new host
+ ec2_instance:
+ image_id: '{{ image_id }}'
+ name: '{{ resource_prefix }}'
+ tags:
+ OtherTag: value
+ instance_type: t2.micro
+ security_groups: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ wait: no
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: register the current hostname
+ set_fact:
+ expected_hostname: "value_{{ resource_prefix }}"
+
+ - name: "Ensure we've got a hostvars entry for the new host"
+ assert:
+ that:
+ - expected_hostname in hostvars
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2_instance:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ name: '{{ resource_prefix }}'
+ security_groups: "{{ sg_id }}"
+ vpc_subnet_id: "{{ subnet_id }}"
+ ignore_errors: yes
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml
new file mode 100644
index 000000000..f75dafac8
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml
@@ -0,0 +1,69 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+
+ - include_tasks: setup.yml
+
+ # Create new host, refresh inventory
+
+ - name: create a new host
+ ec2_instance:
+ image_id: '{{ image_id }}'
+ name: '{{ resource_prefix }}'
+ tags:
+ tag1: value1
+ tag2: value2
+ instance_type: t2.micro
+ security_groups: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ wait: no
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: register the keyed sg group name
+ set_fact:
+ sg_group_name: "security_groups_{{ sg_id | replace('-', '_') }}"
+
+ - name: register one of the keyed tag groups name
+ set_fact:
+ tag_group_name: "tag_Name_{{ resource_prefix | replace('-', '_') }}"
+
+ - name: assert the keyed groups and groups from constructed config were added to inventory and composite var added to hostvars
+ assert:
+ that:
+ # There are 9 groups: all, ungrouped, aws_ec2, sg keyed group, 3 tag keyed group (one per tag), arch keyed group, constructed group
+ - "groups | length == 9"
+ - "groups[tag_group_name] | length == 1"
+ - "groups[sg_group_name] | length == 1"
+ - "groups.arch_x86_64 | length == 1"
+ - "groups.tag_with_name_key | length == 1"
+ - vars.hostvars[groups.aws_ec2.0]['test_compose_var_sum'] == 'value1value2'
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2_instance:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ name: '{{ resource_prefix }}'
+ security_groups: "{{ sg_id }}"
+ vpc_subnet_id: "{{ subnet_id }}"
+ ignore_errors: yes
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags.yml
new file mode 100644
index 000000000..dfae16f05
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags.yml
@@ -0,0 +1,62 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: false
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+ - include_tasks: setup.yml
+
+ # Create new host
+ - name: create a new host
+ ec2_instance:
+ image_id: '{{ image_id }}'
+ name: '{{ resource_prefix }}'
+ tags:
+ Tag1: Test1
+ Tag2: Test2
+ instance_type: t2.micro
+ security_groups: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ wait: false
+ register: setup_instance
+
+ # refresh inventory
+ - meta: refresh_inventory
+
+ - debug:
+ var: groups
+
+ - name: assert groups and hostvars were populated with inventory
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - groups['aws_ec2'] | length == 1
+ - "'Tag1_Test1' in groups['aws_ec2']"
+ - "'Tag2_Test2' not in groups['aws_ec2']"
+ - "'Tag1_Test1' in hostvars"
+ - "'Tag2_Test2' not in hostvars"
+
+ always:
+
+ - name: remove ec2 instance
+ ec2_instance:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ name: '{{ resource_prefix }}'
+ security_groups: "{{ sg_id }}"
+ vpc_subnet_id: "{{ subnet_id }}"
+ ignore_errors: true
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml
new file mode 100644
index 000000000..576b53ab5
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml
@@ -0,0 +1,62 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: false
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+ - include_tasks: setup.yml
+
+ # Create new host
+ - name: create a new host
+ ec2_instance:
+ image_id: '{{ image_id }}'
+ name: '{{ resource_prefix }}'
+ tags:
+ Tag1: Test1
+ Tag2: Test2
+ instance_type: t2.micro
+ security_groups: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ wait: false
+ register: setup_instance
+
+ # refresh inventory
+ - meta: refresh_inventory
+
+ - debug:
+ var: groups
+
+ - name: assert groups and hostvars were populated with inventory
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - groups['aws_ec2'] | length == 1
+ - "'Test1' in groups['aws_ec2']"
+ - "'Test2' not in groups['aws_ec2']"
+ - "'Test1' in hostvars"
+ - "'Test2' not in hostvars"
+
+ always:
+
+ - name: remove ec2 instance
+ ec2_instance:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ name: '{{ resource_prefix }}'
+ security_groups: "{{ sg_id }}"
+ vpc_subnet_id: "{{ subnet_id }}"
+ ignore_errors: true
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml
new file mode 100644
index 000000000..7d6e8c5d9
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml
@@ -0,0 +1,65 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+
+ - include_tasks: setup.yml
+
+ # Create new host, refresh inventory
+ - name: create a new host
+ ec2_instance:
+ image_id: '{{ image_id }}'
+ name: '{{ resource_prefix }}_1'
+ tags:
+ tag_instance1: foo
+ instance_type: t2.micro
+ security_groups: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ wait: no
+ register: setup_instance_1
+
+ - meta: refresh_inventory
+
+ - name: assert the hostvars are defined with prefix and/or suffix
+ assert:
+ that:
+ - "hostvars['{{ resource_prefix }}_1'].{{ vars_prefix }}instance_type{{ vars_suffix }} == 't2.micro'"
+ - "'{{ vars_prefix }}instance_type{{ vars_suffix }}' in hostvars['{{ resource_prefix }}_1']"
+ - "'{{ vars_prefix }}image_id{{ vars_suffix }}' in hostvars['{{ resource_prefix }}_1']"
+ - "'{{ vars_prefix }}instance_id{{ vars_suffix }}' in hostvars['{{ resource_prefix }}_1']"
+ - "'instance_type' not in hostvars['{{ resource_prefix }}_1']"
+ - "'image_id' not in hostvars['{{ resource_prefix }}_1']"
+ - "'instance_id' not in hostvars['{{ resource_prefix }}_1']"
+ - "'ansible_diff_mode' in hostvars['{{ resource_prefix }}_1']"
+ - "'ansible_forks' in hostvars['{{ resource_prefix }}_1']"
+ - "'ansible_version' in hostvars['{{ resource_prefix }}_1']"
+ vars:
+ vars_prefix: "{{ hostvars_prefix | default('') }}"
+ vars_suffix: "{{ hostvars_suffix | default('') }}"
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2_instance:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance_1.instance_ids }}'
+ state: absent
+ name: '{{ resource_prefix }}_1'
+ security_groups: "{{ sg_id }}"
+ vpc_subnet_id: "{{ subnet_id }}"
+ ignore_errors: yes
+ when: setup_instance_1 is defined
+
+ - include_tasks: tear_down.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_include_or_exclude_filters.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_include_or_exclude_filters.yml
new file mode 100644
index 000000000..b456565ae
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_include_or_exclude_filters.yml
@@ -0,0 +1,103 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+
+ - include_tasks: setup.yml
+
+ # Create new host, refresh inventory
+ - name: create a new host (1/3)
+ ec2_instance:
+ image_id: '{{ image_id }}'
+ name: '{{ resource_prefix }}_1'
+ tags:
+ tag_instance1: foo
+ instance_type: t2.micro
+ security_groups: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ wait: no
+ register: setup_instance_1
+
+ - name: create a new host (2/3)
+ ec2_instance:
+ image_id: '{{ image_id }}'
+ name: '{{ resource_prefix }}_2'
+ tags:
+ tag_instance2: bar
+ instance_type: t2.micro
+ security_groups: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ wait: no
+ register: setup_instance_2
+
+ - name: create a new host (3/3)
+ ec2_instance:
+ image_id: '{{ image_id }}'
+ name: '{{ resource_prefix }}_3'
+ tags:
+ tag_instance2: bar
+ instance_type: t2.micro
+ security_groups: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ wait: no
+ register: setup_instance_3
+
+ - meta: refresh_inventory
+
+ - name: assert the keyed groups and groups from constructed config were added to inventory and composite var added to hostvars
+ assert:
+ that:
+ # There are 9 groups: all, ungrouped, aws_ec2, sg keyed group, 3 tag keyed group (one per tag), arch keyed group, constructed group
+ - "groups['all'] | length == 2"
+ - "'{{ resource_prefix }}_1' in groups['all']"
+ - "'{{ resource_prefix }}_2' in groups['all']"
+ - "not ('{{ resource_prefix }}_3' in groups['all'])"
+
+ always:
+
+ - name: remove setup ec2 instance (1/3)
+ ec2_instance:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance_1.instance_ids }}'
+ state: absent
+ name: '{{ resource_prefix }}_1'
+ security_groups: "{{ sg_id }}"
+ vpc_subnet_id: "{{ subnet_id }}"
+ ignore_errors: yes
+ when: setup_instance_1 is defined
+
+ - name: remove setup ec2 instance (2/3)
+ ec2_instance:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance_2.instance_ids }}'
+ state: absent
+ name: '{{ resource_prefix }}_2'
+ security_groups: "{{ sg_id }}"
+ vpc_subnet_id: "{{ subnet_id }}"
+ ignore_errors: yes
+ when: setup_instance_2 is defined
+
+ - name: remove setup ec2 instance (3/3)
+ ec2_instance:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance_3.instance_ids }}'
+ state: absent
+ name: '{{ resource_prefix }}_3'
+ security_groups: "{{ sg_id }}"
+ vpc_subnet_id: "{{ subnet_id }}"
+ ignore_errors: yes
+ when: setup_instance_3 is defined
+
+ - include_tasks: tear_down.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_literal_string.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_literal_string.yml
new file mode 100644
index 000000000..8ba065eaf
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_literal_string.yml
@@ -0,0 +1,56 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+ - include_tasks: setup.yml
+
+ # Create new host, refresh inventory
+ - name: create a new host
+ ec2_instance:
+ image_id: '{{ image_id }}'
+ name: '{{ resource_prefix }}'
+ tags:
+ OtherTag: value
+ instance_type: t2.micro
+ security_groups: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ wait: no
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: register the current hostname
+ set_fact:
+ expected_hostname: "aws-{{ resource_prefix }}"
+
+ - name: "Ensure we've got a hostvars entry for the new host"
+ assert:
+ that:
+ - expected_hostname in hostvars
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2_instance:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ name: '{{ resource_prefix }}'
+ security_groups: "{{ sg_id }}"
+ vpc_subnet_id: "{{ subnet_id }}"
+ ignore_errors: yes
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_use_contrib_script_keys.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_use_contrib_script_keys.yml
new file mode 100644
index 000000000..6a4ef5b2a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_use_contrib_script_keys.yml
@@ -0,0 +1,57 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+ - include_tasks: setup.yml
+
+ # Create new host, refresh inventory
+ - name: create a new host
+ ec2_instance:
+ image_id: '{{ image_id }}'
+ name: '{{ resource_prefix }}:/aa'
+ tags:
+ OtherTag: value
+ instance_type: t2.micro
+ security_groups: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ wait: no
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: "register the current hostname, the : and / a replaced with _"
+ set_fact:
+ expected_hostname: "{{ resource_prefix }}__aa"
+
+ - name: "Ensure we've got a hostvars entry for the new host"
+ assert:
+ that:
+ - expected_hostname in hostvars
+ - hostvars[expected_hostname].ec2_tag_OtherTag == "value"
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2_instance:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ name: '{{ resource_prefix }}'
+ security_groups: "{{ sg_id }}"
+ vpc_subnet_id: "{{ subnet_id }}"
+ ignore_errors: yes
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml
new file mode 100644
index 000000000..46a0c3e3b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml
@@ -0,0 +1,61 @@
+- name: Test updating inventory
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ - name: create a new host
+ ec2_instance:
+ image_id: "{{ images[aws_region] }}"
+ exact_count: 1
+ name: '{{ resource_prefix }}'
+ instance_type: t2.micro
+ security_groups: '{{ setup_sg.security_groups }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+ wait: no
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory and is no longer empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "groups.aws_ec2 | length == 1"
+ - "groups.aws_ec2.0 == '{{ resource_prefix }}'"
+
+ - name: remove setup ec2 instance
+ ec2_instance:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ name: '{{ resource_prefix }}'
+ security_groups: '{{ setup_sg.security_groups }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ always:
+ - name: remove setup ec2 instance
+ ec2_instance:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ name: '{{ resource_prefix }}'
+ security_groups: '{{ setup_sg.security_groups }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/runme.sh
new file mode 100755
index 000000000..d2940cd2a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/runme.sh
@@ -0,0 +1,67 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# ensure test config is empty
+ansible-playbook playbooks/empty_inventory_config.yml "$@"
+
+export ANSIBLE_INVENTORY_ENABLED="amazon.aws.aws_ec2"
+
+# test with default inventory file
+ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
+
+export ANSIBLE_INVENTORY=test.aws_ec2.yml
+
+# test empty inventory config
+ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
+
+# generate inventory config and test using it
+ansible-playbook playbooks/create_inventory_config.yml "$@"
+ansible-playbook playbooks/test_populating_inventory.yml "$@"
+
+# generate inventory with access_key provided through a templated variable
+ansible-playbook playbooks/create_environment_script.yml "$@"
+source access_key.sh
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_template.yml.j2'" "$@"
+ansible-playbook playbooks/test_populating_inventory.yml "$@"
+
+# generate inventory config with caching and test using it
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.yml.j2'" "$@"
+ansible-playbook playbooks/populate_cache.yml "$@"
+ansible-playbook playbooks/test_inventory_cache.yml "$@"
+
+# remove inventory cache
+rm -r aws_ec2_cache_dir/
+
+# generate inventory config with constructed features and test using it
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.yml.j2'" "$@"
+ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@"
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_concatenation.yml.j2'" "$@"
+ansible-playbook playbooks/test_populating_inventory_with_concatenation.yml "$@"
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_literal_string.yml.j2'" "$@"
+ansible-playbook playbooks/test_populating_inventory_with_literal_string.yml "$@"
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostnames_using_tags_classic.yml.j2'" "$@"
+ansible-playbook playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml "$@"
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostnames_using_tags.yml.j2'" "$@"
+ansible-playbook playbooks/test_populating_inventory_with_hostnames_using_tags.yml "$@"
+
+# generate inventory config with includes_entries_matching and prepare the tests
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_include_or_exclude_filters.yml.j2'" "$@"
+ansible-playbook playbooks/test_populating_inventory_with_include_or_exclude_filters.yml "$@"
+
+# generate inventory config with hostvars_prefix
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.yml.j2'" -e "hostvars_prefix='aws_ec2_'" "$@"
+ansible-playbook playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml -e "hostvars_prefix='aws_ec2_'" "$@"
+# generate inventory config with hostvars_suffix
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.yml.j2'" -e "hostvars_suffix='_aws_ec2'" "$@"
+ansible-playbook playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml -e "hostvars_suffix='_aws_ec2'" "$@"
+# generate inventory config with hostvars_prefix and hostvars_suffix
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.yml.j2'" -e "hostvars_prefix='aws_'" -e "hostvars_suffix='_ec2'" "$@"
+ansible-playbook playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml -e "hostvars_prefix='aws_'" -e "hostvars_suffix='_ec2'" "$@"
+
+# generate inventory config with caching and test using it
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_use_contrib_script_keys.yml.j2'" "$@"
+ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS=never ansible-playbook playbooks/test_populating_inventory_with_use_contrib_script_keys.yml "$@"
+
+# cleanup inventory config
+ansible-playbook playbooks/empty_inventory_config.yml "$@"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2
new file mode 100644
index 000000000..baac15be0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2
@@ -0,0 +1,14 @@
+plugin: amazon.aws.aws_ec2
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+- '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}'
+hostnames:
+- tag:Name
+- dns-name
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2
new file mode 100644
index 000000000..8fe4e33f4
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2
@@ -0,0 +1,14 @@
+plugin: amazon.aws.aws_ec2
+cache: True
+cache_plugin: jsonfile
+cache_connection: aws_ec2_cache_dir
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+- '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2
new file mode 100644
index 000000000..035b1d7ca
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2
@@ -0,0 +1,15 @@
+plugin: amazon.aws.aws_ec2
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+- '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}'
+hostnames:
+ - name: 'tag:Name'
+ separator: '_'
+ prefix: 'tag:OtherTag'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2
new file mode 100644
index 000000000..a33f03e21
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2
@@ -0,0 +1,22 @@
+plugin: amazon.aws.aws_ec2
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+- '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}'
+keyed_groups:
+- key: 'security_groups|map(attribute="group_id")'
+ prefix: security_groups
+- key: tags
+ prefix: tag
+- prefix: arch
+ key: architecture
+compose:
+ test_compose_var_sum: tags.tag1 + tags.tag2
+groups:
+ tag_with_name_key: '''Name'' in (tags | list)'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags.yml.j2
new file mode 100644
index 000000000..2f7882a22
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags.yml.j2
@@ -0,0 +1,21 @@
+plugin: amazon.aws.aws_ec2
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+- '{{ aws_region }}'
+keyed_groups:
+- prefix: tag
+ key: tags
+hostnames:
+# can also be specified using
+# - tag:Tag1,Tag2
+# or
+# - tag:Tag1
+# - tag:Tag2
+# or
+- tag:Tag1=Test1,Tag2=Test2
+compose:
+ ansible_host: private_ip_address
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags_classic.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags_classic.yml.j2
new file mode 100644
index 000000000..3138a4a2a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags_classic.yml.j2
@@ -0,0 +1,21 @@
+plugin: amazon.aws.aws_ec2
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+- '{{ aws_region }}'
+keyed_groups:
+- prefix: tag
+ key: tags
+hostnames:
+# can also be specified using
+# - tag:Tag1,Tag2
+# or
+# - tag:Tag1=Test1,Tag2=Test2
+# or
+- tag:Tag1
+- tag:Tag2
+compose:
+ ansible_host: private_ip_address
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostvars_prefix_suffix.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostvars_prefix_suffix.yml.j2
new file mode 100644
index 000000000..f4f12c632
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostvars_prefix_suffix.yml.j2
@@ -0,0 +1,19 @@
+plugin: amazon.aws.aws_ec2
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+- '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}_*'
+{% if hostvars_prefix | default(false) %}
+hostvars_prefix: '{{ hostvars_prefix }}'
+{% endif %}
+{% if hostvars_suffix | default(false) %}
+hostvars_suffix: '{{ hostvars_suffix }}'
+{% endif %}
+hostnames:
+- tag:Name
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_include_or_exclude_filters.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_include_or_exclude_filters.yml.j2
new file mode 100644
index 000000000..a6d48ce8c
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_include_or_exclude_filters.yml.j2
@@ -0,0 +1,23 @@
+plugin: amazon.aws.aws_ec2
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+- '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}_1'
+ - '{{ resource_prefix }}_3'
+include_filters:
+- tag:Name:
+ - '{{ resource_prefix }}_2'
+ - '{{ resource_prefix }}_4'
+exclude_filters:
+- tag:Name:
+ - '{{ resource_prefix }}_3'
+ - '{{ resource_prefix }}_4'
+hostnames:
+- tag:Name
+- dns-name
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_literal_string.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_literal_string.yml.j2
new file mode 100644
index 000000000..0dbddcb82
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_literal_string.yml.j2
@@ -0,0 +1,15 @@
+plugin: amazon.aws.aws_ec2
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+- '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}'
+hostnames:
+ - name: 'tag:Name'
+ separator: '-'
+ prefix: 'aws'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_template.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_template.yml.j2
new file mode 100644
index 000000000..6b27544f9
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_template.yml.j2
@@ -0,0 +1,14 @@
+plugin: amazon.aws.aws_ec2
+aws_access_key_id: '{{ '{{ lookup("env", "MY_ACCESS_KEY") }}' }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+- '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}'
+hostnames:
+- tag:Name
+- dns-name
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_use_contrib_script_keys.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_use_contrib_script_keys.yml.j2
new file mode 100644
index 000000000..e6b4068fa
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_use_contrib_script_keys.yml.j2
@@ -0,0 +1,15 @@
+plugin: amazon.aws.aws_ec2
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+- '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}:/aa'
+hostnames:
+- tag:Name
+use_contrib_script_compatible_sanitization: True
+use_contrib_script_compatible_ec2_tag_keys: True
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/test.aws_ec2.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/test.aws_ec2.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/test.aws_ec2.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/aliases b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/aliases
new file mode 100644
index 000000000..569271951
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml
new file mode 100644
index 000000000..f0a9030a0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml
@@ -0,0 +1,11 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ vars:
+ template_name: "../templates/{{ template | default('inventory.j2') }}"
+ tasks:
+ - name: write inventory config file
+ copy:
+ dest: ../test.aws_rds.yml
+ content: "{{ lookup('template', template_name) }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml
new file mode 100644
index 000000000..d7e2cda3a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml
@@ -0,0 +1,9 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: write inventory config file
+ copy:
+ dest: ../test.aws_rds.yml
+ content: ""
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml
new file mode 100644
index 000000000..3c75a7cf5
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml
@@ -0,0 +1,57 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ collections:
+ - amazon.aws
+ - community.aws
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ - set_fact:
+ instance_id: '{{ resource_prefix }}-mariadb'
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "not groups.aws_rds"
+
+ # Create new host, add it to inventory and then terminate it without updating the cache
+
+ - name: create minimal mariadb instance in default VPC and default subnet group
+ rds_instance:
+ state: present
+ engine: mariadb
+ db_instance_class: db.t2.micro
+ allocated_storage: 20
+ instance_id: '{{ instance_id }}'
+ master_username: 'ansibletestuser'
+ master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
+ tags:
+ workload_type: other
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - assert:
+ that:
+ - groups.aws_rds
+
+ always:
+
+ - name: remove mariadb instance
+ rds_instance:
+ state: absent
+ engine: mariadb
+ skip_final_snapshot: yes
+ instance_id: '{{ instance_id }}'
+ ignore_errors: yes
+ when: setup_instance is defined
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml
new file mode 100644
index 000000000..499513570
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml
@@ -0,0 +1,9 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: assert inventory was not populated by aws_rds inventory plugin
+ assert:
+ that:
+ - "'aws_rds' not in groups"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml
new file mode 100644
index 000000000..7eadbad85
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml
@@ -0,0 +1,18 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: assert cache was used to populate inventory
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "groups.aws_rds | length == 1"
+
+ - meta: refresh_inventory
+
+ - name: assert refresh_inventory updated the cache
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "not groups.aws_rds"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_with_hostvars_prefix_suffix.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_with_hostvars_prefix_suffix.yml
new file mode 100644
index 000000000..2bdcea0eb
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_with_hostvars_prefix_suffix.yml
@@ -0,0 +1,63 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ collections:
+ - amazon.aws
+ - community.aws
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ - set_fact:
+ instance_id: "{{ resource_prefix }}-mariadb"
+
+ - name: create minimal mariadb instance in default VPC and default subnet group
+ rds_instance:
+ state: present
+ engine: mariadb
+ db_instance_class: db.t2.micro
+ allocated_storage: 20
+ instance_id: '{{ resource_prefix }}-mariadb'
+ master_username: 'ansibletestuser'
+ master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
+ tags:
+ workload_type: other
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: assert the hostvars are defined with prefix and/or suffix
+ assert:
+ that:
+ - "hostvars[host_instance_name].{{ vars_prefix }}db_instance_class{{ vars_suffix }} == 'db.t2.micro'"
+ - "hostvars[host_instance_name].{{ vars_prefix }}engine{{ vars_suffix }} == 'mariadb'"
+ - "hostvars[host_instance_name].{{ vars_prefix }}db_instance_status{{ vars_suffix }} == 'available'"
+ - "'db_instance_class' not in hostvars[host_instance_name]"
+ - "'engine' not in hostvars[host_instance_name]"
+ - "'db_instance_status' not in hostvars[host_instance_name]"
+ - "'ansible_diff_mode' in hostvars[host_instance_name]"
+ - "'ansible_forks' in hostvars[host_instance_name]"
+ - "'ansible_version' in hostvars[host_instance_name]"
+ vars:
+ host_instance_name: "{{ resource_prefix }}-mariadb"
+ vars_prefix: "{{ inventory_prefix | default('') }}"
+ vars_suffix: "{{ inventory_suffix | default('') }}"
+
+ always:
+
+ - name: remove mariadb instance
+ rds_instance:
+ state: absent
+ engine: mariadb
+ skip_final_snapshot: yes
+ instance_id: '{{ instance_id }}'
+ ignore_errors: yes
+ when: setup_instance is defined
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml
new file mode 100644
index 000000000..678f65b7a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml
@@ -0,0 +1,77 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ collections:
+ - amazon.aws
+ - community.aws
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ - set_fact:
+ instance_id: "{{ resource_prefix }}-mariadb"
+
+ - debug: var=groups
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "not groups.aws_rds"
+
+ # Create new host, refresh inventory, remove host, refresh inventory
+
+ - name: create minimal mariadb instance in default VPC and default subnet group
+ rds_instance:
+ state: present
+ engine: mariadb
+ db_instance_class: db.t2.micro
+ allocated_storage: 20
+ instance_id: '{{ instance_id }}'
+ master_username: 'ansibletestuser'
+ master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
+ tags:
+ workload_type: other
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory and is no longer empty
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "groups.aws_rds | length == 1"
+ - "groups.aws_rds.0 == '{{ instance_id }}'"
+
+ - name: remove mariadb instance
+ rds_instance:
+ state: absent
+ engine: mariadb
+ skip_final_snapshot: yes
+ instance_id: '{{ instance_id }}'
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "not groups.aws_rds"
+
+ always:
+
+ - name: remove mariadb instance
+ rds_instance:
+ state: absent
+ engine: mariadb
+ skip_final_snapshot: yes
+ instance_id: '{{ instance_id }}'
+ ignore_errors: yes
+ when: setup_instance is defined
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml
new file mode 100644
index 000000000..1f59e683b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml
@@ -0,0 +1,65 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ collections:
+ - amazon.aws
+ - community.aws
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ - set_fact:
+ instance_id: "{{ resource_prefix }}-mariadb"
+
+ - name: create minimal mariadb instance in default VPC and default subnet group
+ rds_instance:
+ state: present
+ engine: mariadb
+ db_instance_class: db.t2.micro
+ allocated_storage: 20
+ instance_id: '{{ resource_prefix }}-mariadb'
+ master_username: 'ansibletestuser'
+ master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
+ tags:
+ workload_type: other
+ register: setup_instance
+
+ - meta: refresh_inventory
+ - debug: var=groups
+
+ - name: 'generate expected group name based off the db parameter groups'
+ vars:
+ parameter_group_name: '{{ setup_instance.db_parameter_groups[0].db_parameter_group_name }}'
+ set_fact:
+ parameter_group_key: 'rds_parameter_group_{{ parameter_group_name | replace(".", "_") }}'
+
+ - name: assert the keyed groups from constructed config were added to inventory
+ assert:
+ that:
+ # There are 6 groups: all, ungrouped, aws_rds, tag keyed group, engine keyed group, parameter group keyed group
+ - "groups | length == 6"
+ - '"all" in groups'
+ - '"ungrouped" in groups'
+ - '"aws_rds" in groups'
+ - '"tag_workload_type_other" in groups'
+ - '"rds_mariadb" in groups'
+ - 'parameter_group_key in groups'
+
+ always:
+
+ - name: remove mariadb instance
+ rds_instance:
+ state: absent
+ engine: mariadb
+ skip_final_snapshot: yes
+ instance_id: '{{ instance_id }}'
+ ignore_errors: yes
+ when: setup_instance is defined
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml
new file mode 100644
index 000000000..519aa5b28
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml
@@ -0,0 +1,67 @@
+- name: test updating inventory
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ collections:
+ - amazon.aws
+ - community.aws
+ block:
+ - set_fact:
+ instance_id: "{{ resource_prefix }}update"
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "not groups.aws_rds"
+
+ - name: create minimal mariadb instance in default VPC and default subnet group
+ rds_instance:
+ state: present
+ engine: mariadb
+ db_instance_class: db.t2.micro
+ allocated_storage: 20
+ instance_id: 'rds-mariadb-{{ resource_prefix }}'
+ master_username: 'ansibletestuser'
+ master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
+ tags:
+ workload_type: other
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory and is no longer empty
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "groups.aws_rds | length == 1"
+ - "groups.aws_rds.0 == '{{ resource_prefix }}'"
+
+ - name: remove mariadb instance
+ rds_instance:
+ state: absent
+ engine: mariadb
+ skip_final_snapshot: yes
+ instance_id: ansible-rds-mariadb-example
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "not groups.aws_rds"
+
+ always:
+
+ - name: remove mariadb instance
+ rds_instance:
+ state: absent
+ engine: mariadb
+ skip_final_snapshot: yes
+ instance_id: ansible-rds-mariadb-example
+ ignore_errors: yes
+ when: setup_instance is defined
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/runme.sh
new file mode 100755
index 000000000..c16c083ee
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/runme.sh
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# ensure test config is empty
+ansible-playbook playbooks/empty_inventory_config.yml "$@"
+
+export ANSIBLE_INVENTORY_ENABLED="amazon.aws.aws_rds"
+
+# test with default inventory file
+ansible-playbook playbooks/test_invalid_aws_rds_inventory_config.yml "$@"
+
+export ANSIBLE_INVENTORY=test.aws_rds.yml
+
+# test empty inventory config
+ansible-playbook playbooks/test_invalid_aws_rds_inventory_config.yml "$@"
+
+# generate inventory config and test using it
+ansible-playbook playbooks/create_inventory_config.yml "$@"
+ansible-playbook playbooks/test_populating_inventory.yml "$@"
+
+# generate inventory config with caching and test using it
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.j2'" "$@"
+ansible-playbook playbooks/populate_cache.yml "$@"
+ansible-playbook playbooks/test_inventory_cache.yml "$@"
+
+# remove inventory cache
+rm -r aws_rds_cache_dir/
+
+# generate inventory config with constructed features and test using it
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.j2'" "$@"
+ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@"
+
+# generate inventory config with hostvars_prefix features and test using it
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.j2'" -e "inventory_prefix='aws_rds_'" "$@"
+ansible-playbook playbooks/test_inventory_with_hostvars_prefix_suffix.yml -e "inventory_prefix='aws_rds_'" "$@"
+
+# generate inventory config with hostvars_suffix features and test using it
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.j2'" -e "inventory_suffix='_aws_rds'" "$@"
+ansible-playbook playbooks/test_inventory_with_hostvars_prefix_suffix.yml -e "inventory_suffix='_aws_rds'" "$@"
+
+# generate inventory config with hostvars_prefix and hostvars_suffix features and test using it
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.j2'" -e "inventory_prefix='aws_'" -e "inventory_suffix='_rds'" "$@"
+ansible-playbook playbooks/test_inventory_with_hostvars_prefix_suffix.yml -e "inventory_prefix='aws_'" -e "inventory_suffix='_rds'" "$@"
+
+# cleanup inventory config
+ansible-playbook playbooks/empty_inventory_config.yml "$@"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory.j2
new file mode 100644
index 000000000..61a659eaa
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory.j2
@@ -0,0 +1,10 @@
+plugin: amazon.aws.aws_rds
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+ - '{{ aws_region }}'
+filters:
+ db-instance-id: "{{ resource_prefix }}-mariadb"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2
new file mode 100644
index 000000000..6e9c40e90
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2
@@ -0,0 +1,13 @@
+plugin: amazon.aws.aws_rds
+cache: True
+cache_plugin: jsonfile
+cache_connection: aws_rds_cache_dir
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+ - '{{ aws_region }}'
+filters:
+ db-instance-id: "{{ resource_prefix }}-mariadb"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2
new file mode 100644
index 000000000..c5603ef87
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2
@@ -0,0 +1,17 @@
+plugin: amazon.aws.aws_rds
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+ - '{{ aws_region }}'
+keyed_groups:
+ - key: 'db_parameter_groups|map(attribute="db_parameter_group_name")'
+ prefix: rds_parameter_group
+ - key: tags
+ prefix: tag
+ - key: engine
+ prefix: rds
+filters:
+ db-instance-id: "{{ resource_prefix }}-mariadb"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_hostvars_prefix_suffix.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_hostvars_prefix_suffix.j2
new file mode 100644
index 000000000..1e2ac7af6
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_hostvars_prefix_suffix.j2
@@ -0,0 +1,16 @@
+plugin: amazon.aws.aws_rds
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+ - '{{ aws_region }}'
+{% if inventory_prefix | default(false) %}
+hostvars_prefix: '{{ inventory_prefix }}'
+{% endif %}
+{% if inventory_suffix | default(false) %}
+hostvars_suffix: '{{ inventory_suffix }}'
+{% endif %}
+filters:
+ db-instance-id: "{{ resource_prefix }}-mariadb"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/test.aws_rds.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/test.aws_rds.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/test.aws_rds.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/aliases b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/aliases
new file mode 100644
index 000000000..36c332ab4
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/aliases
@@ -0,0 +1,11 @@
+# Various race conditions - likely needs waiters
+# https://github.com/ansible-collections/community.aws/issues/433
+# No KMS supported waiters, and manual waiting for updates didn't fix the issue either.
+# Issue likely from AWS side - added waits on updates in integration tests to workaround this.
+
+# Some KMS operations are just slow
+time=10m
+
+cloud/aws
+
+kms_key_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/inventory b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/inventory
new file mode 100644
index 000000000..a9081eae9
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/inventory
@@ -0,0 +1,12 @@
+# inventory names shortened down to fit resource name length limits
+[tests]
+states
+grants
+modify
+tagging
+# CI's AWS account doesnot support multi region
+# multi_region
+
+[all:vars]
+ansible_connection=local
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/main.yml
new file mode 100644
index 000000000..0f248fc01
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/main.yml
@@ -0,0 +1,9 @@
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/aws_kms/tasks/
+
+- hosts: all
+ gather_facts: no
+ strategy: free
+ roles:
+ - aws_kms
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/defaults/main.yml
new file mode 100644
index 000000000..af2b9609a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/defaults/main.yml
@@ -0,0 +1,2 @@
+kms_key_alias: ansible-test-{{ inventory_hostname | replace('_','-') }}{{ tiny_prefix
+ }}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml
new file mode 100644
index 000000000..2dcdcc757
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml
@@ -0,0 +1,11 @@
+- name: aws_kms integration tests
+ collections:
+ - community.aws
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ - include: ./test_{{ inventory_hostname }}.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_grants.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_grants.yml
new file mode 100644
index 000000000..071b36417
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_grants.yml
@@ -0,0 +1,350 @@
+- block:
+ # ============================================================
+ # PREPARATION
+ #
+ # Get some information about who we are before starting our tests
+ # we'll need this as soon as we start working on the policies
+ - name: get ARN of calling user
+ aws_caller_info:
+ register: aws_caller_info
+ - name: create an IAM role that can do nothing
+ iam_role:
+ name: '{{ kms_key_alias }}'
+ state: present
+ assume_role_policy_document: '{"Version": "2012-10-17", "Statement": {"Action":
+ "sts:AssumeRole", "Principal": {"Service": "ec2.amazonaws.com"}, "Effect":
+ "Deny"} }'
+ register: iam_role_result
+ - name: create a key
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ tags:
+ Hello: World
+ state: present
+ enabled: yes
+ enable_key_rotation: no
+ register: key
+ - name: assert that state is enabled
+ assert:
+ that:
+ - key is changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Add grant - check mode
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ purge_grants: yes
+ grants:
+ - name: test_grant
+ grantee_principal: '{{ iam_role_result.iam_role.arn }}'
+ retiring_principal: '{{ aws_caller_info.arn }}'
+ constraints:
+ encryption_context_equals:
+ environment: test
+ application: testapp
+ operations:
+ - Decrypt
+ - RetireGrant
+ register: key
+ check_mode: yes
+ - name: assert grant would have been added
+ assert:
+ that:
+ - key.changed
+
+ # Roles can take a little while to get ready, pause briefly to give it chance
+ - wait_for:
+ timeout: 20
+ - name: Add grant
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ purge_grants: yes
+ grants:
+ - name: test_grant
+ grantee_principal: '{{ iam_role_result.iam_role.arn }}'
+ retiring_principal: '{{ aws_caller_info.arn }}'
+ constraints:
+ encryption_context_equals:
+ environment: test
+ application: testapp
+ operations:
+ - Decrypt
+ - RetireGrant
+ register: key
+ - name: assert grant added
+ assert:
+ that:
+ - key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 1
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+
+ - name: Sleep to wait for updates to propagate
+ wait_for:
+ timeout: 45
+ - name: Add grant (idempotence) - check mode
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ purge_grants: yes
+ grants:
+ - name: test_grant
+ grantee_principal: '{{ iam_role_result.iam_role.arn }}'
+ retiring_principal: '{{ aws_caller_info.arn }}'
+ constraints:
+ encryption_context_equals:
+ environment: test
+ application: testapp
+ operations:
+ - Decrypt
+ - RetireGrant
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - not key.changed
+
+ - name: Add grant (idempotence)
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ purge_grants: yes
+ grants:
+ - name: test_grant
+ grantee_principal: '{{ iam_role_result.iam_role.arn }}'
+ retiring_principal: '{{ aws_caller_info.arn }}'
+ constraints:
+ encryption_context_equals:
+ environment: test
+ application: testapp
+ operations:
+ - Decrypt
+ - RetireGrant
+ register: key
+ - assert:
+ that:
+ - not key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 1
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+
+ - name: Add a second grant
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ grants:
+ - name: another_grant
+ grantee_principal: '{{ iam_role_result.iam_role.arn }}'
+ retiring_principal: '{{ aws_caller_info.arn }}'
+ constraints:
+ encryption_context_equals:
+ Environment: second
+ Application: anotherapp
+ operations:
+ - Decrypt
+ - RetireGrant
+ register: key
+ - name: Assert grant added
+ assert:
+ that:
+ - key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 2
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+
+ - name: Sleep to wait for updates to propagate
+ wait_for:
+ timeout: 45
+ - name: Add a second grant again
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ grants:
+ - name: another_grant
+ grantee_principal: '{{ iam_role_result.iam_role.arn }}'
+ retiring_principal: '{{ aws_caller_info.arn }}'
+ constraints:
+ encryption_context_equals:
+ Environment: second
+ Application: anotherapp
+ operations:
+ - Decrypt
+ - RetireGrant
+ register: key
+ - name: Assert grant added
+ assert:
+ that:
+ - not key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 2
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+
+ - name: Update the grants with purge_grants set
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ purge_grants: yes
+ grants:
+ - name: third_grant
+ grantee_principal: '{{ iam_role_result.iam_role.arn }}'
+ retiring_principal: '{{ aws_caller_info.arn }}'
+ constraints:
+ encryption_context_equals:
+ environment: third
+ application: onemoreapp
+ operations:
+ - Decrypt
+ - RetireGrant
+ register: key
+ - name: Assert grants replaced
+ assert:
+ that:
+ - key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 1
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+
+ - name: Update third grant to change encryption context equals to subset
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ grants:
+ - name: third_grant
+ grantee_principal: '{{ iam_role_result.iam_role.arn }}'
+ retiring_principal: '{{ aws_caller_info.arn }}'
+ constraints:
+ encryption_context_subset:
+ environment: third
+ application: onemoreapp
+ operations:
+ - Decrypt
+ - RetireGrant
+ register: key
+ - name: Assert grants replaced
+ assert:
+ that:
+ - key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 1
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+ - "'encryption_context_equals' not in key.grants[0].constraints"
+ - "'encryption_context_subset' in key.grants[0].constraints"
+
+ always:
+ # ============================================================
+ # CLEAN-UP
+ - name: finish off by deleting keys
+ aws_kms:
+ state: absent
+ alias: '{{ kms_key_alias }}'
+ pending_window: 7
+ ignore_errors: true
+ - name: remove the IAM role
+ iam_role:
+ name: '{{ kms_key_alias }}'
+ state: absent
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_modify.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_modify.yml
new file mode 100644
index 000000000..223074a3e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_modify.yml
@@ -0,0 +1,279 @@
+- block:
+ # ============================================================
+ # PREPARATION
+ #
+ # Get some information about who we are before starting our tests
+ # we'll need this as soon as we start working on the policies
+ - name: get ARN of calling user
+ aws_caller_info:
+ register: aws_caller_info
+ - name: create an IAM role that can do nothing
+ iam_role:
+ name: '{{ kms_key_alias }}'
+ state: present
+ assume_role_policy_document: '{"Version": "2012-10-17", "Statement": {"Action":
+ "sts:AssumeRole", "Principal": {"Service": "ec2.amazonaws.com"}, "Effect":
+ "Deny"} }'
+ register: iam_role_result
+ - name: create a key
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ tags:
+ Hello: World
+ state: present
+ enabled: yes
+ enable_key_rotation: no
+ register: key
+ - name: assert that state is enabled
+ assert:
+ that:
+ - key is changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Save IDs for later
+ set_fact:
+ kms_key_id: '{{ key.key_id }}'
+ kms_key_arn: '{{ key.key_arn }}'
+ - name: find facts about the key (by ID)
+ aws_kms_info:
+ key_id: '{{ kms_key_id }}'
+ register: new_key
+ - name: check that a key was found
+ assert:
+ that:
+ - '"key_id" in new_key.kms_keys[0]'
+ - new_key.kms_keys[0].key_id | length >= 36
+ - not new_key.kms_keys[0].key_id.startswith("arn:aws")
+ - '"key_arn" in new_key.kms_keys[0]'
+ - new_key.kms_keys[0].key_arn.endswith(new_key.kms_keys[0].key_id)
+ - new_key.kms_keys[0].key_arn.startswith("arn:aws")
+ - new_key.kms_keys[0].key_state == "Enabled"
+ - new_key.kms_keys[0].enabled == True
+ - new_key.kms_keys[0].tags | length == 1
+ - new_key.kms_keys[0].tags['Hello'] == 'World'
+ - new_key.kms_keys[0].enable_key_rotation == False
+ - new_key.kms_keys[0].key_usage == 'ENCRYPT_DECRYPT'
+ - new_key.kms_keys[0].customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - new_key.kms_keys[0].grants | length == 0
+ - new_key.kms_keys[0].key_policies | length == 1
+ - new_key.kms_keys[0].key_policies[0].Id == 'key-default-1'
+ - new_key.kms_keys[0].description == ''
+
+ - name: Update policy - check mode
+ aws_kms:
+ key_id: '{{ kms_key_id }}'
+ policy: "{{ lookup('template', 'console-policy.j2') }}"
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - key is changed
+
+ - name: Update policy
+ aws_kms:
+ key_id: '{{ kms_key_id }}'
+ policy: "{{ lookup('template', 'console-policy.j2') }}"
+ register: key
+ - name: Policy should have been changed
+ assert:
+ that:
+ - key is changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-consolepolicy-3'
+ - key.description == ''
+
+ - name: Sleep to wait for updates to propagate
+ wait_for:
+ timeout: 45
+ - name: Update policy (idempotence) - check mode
+ aws_kms:
+ alias: alias/{{ kms_key_alias }}
+ policy: "{{ lookup('template', 'console-policy.j2') }}"
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - not key.changed
+
+ - name: Update policy (idempotence)
+ aws_kms:
+ alias: alias/{{ kms_key_alias }}
+ policy: "{{ lookup('template', 'console-policy.j2') }}"
+ register: key
+ - assert:
+ that:
+ - not key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-consolepolicy-3'
+ - key.description == ''
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Update description - check mode
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ description: test key for testing
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - key.changed
+
+ - name: Update description
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ description: test key for testing
+ register: key
+ - assert:
+ that:
+ - key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-consolepolicy-3'
+ - key.description == 'test key for testing'
+
+ - name: Sleep to wait for updates to propagate
+ wait_for:
+ timeout: 45
+ - name: Update description (idempotence) - check mode
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ description: test key for testing
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - not key.changed
+
+ - name: Update description (idempotence)
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ description: test key for testing
+ register: key
+ - assert:
+ that:
+ - not key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-consolepolicy-3'
+ - key.description == 'test key for testing'
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: update policy to remove access to key rotation status
+ aws_kms:
+ alias: alias/{{ kms_key_alias }}
+ policy: "{{ lookup('template', 'console-policy-no-key-rotation.j2') }}"
+ register: key
+ - assert:
+ that:
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation is none
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-consolepolicy-3'
+ - key.description == 'test key for testing'
+ - "'Disable access to key rotation status' in {{ key.key_policies[0].Statement\
+ \ | map(attribute='Sid') }}"
+
+ always:
+ # ============================================================
+ # CLEAN-UP
+ - name: finish off by deleting keys
+ aws_kms:
+ state: absent
+ alias: '{{ kms_key_alias }}'
+ pending_window: 7
+ ignore_errors: true
+ - name: remove the IAM role
+ iam_role:
+ name: '{{ kms_key_alias }}'
+ state: absent
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_multi_region.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_multi_region.yml
new file mode 100644
index 000000000..c112b4571
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_multi_region.yml
@@ -0,0 +1,100 @@
+- block:
+ # ============================================================
+ # PREPARATION
+ #
+ # Get some information about who we are before starting our tests
+ # we'll need this as soon as we start working on the policies
+ - name: get ARN of calling user
+ aws_caller_info:
+ register: aws_caller_info
+ - name: See whether key exists and its current state
+ kms_key_info:
+ alias: '{{ kms_key_alias }}'
+ - name: create a multi region key - check mode
+ kms_key:
+ alias: '{{ kms_key_alias }}-check'
+ tags:
+ Hello: World
+ state: present
+ multi_region: True
+ enabled: yes
+ register: key_check
+ check_mode: yes
+ - name: find facts about the check mode key
+ kms_key_info:
+ alias: '{{ kms_key_alias }}-check'
+ register: check_key
+ - name: ensure that check mode worked as expected
+ assert:
+ that:
+ - check_key.kms_keys | length == 0
+ - key_check is changed
+
+ - name: create a multi region key
+ kms_key:
+ alias: '{{ kms_key_alias }}'
+ tags:
+ Hello: World
+ state: present
+ enabled: yes
+ multi_region: True
+ enable_key_rotation: no
+ register: key
+ - name: assert that state is enabled
+ assert:
+ that:
+ - key is changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+ - key.multi_region == True
+
+ - name: Sleep to wait for updates to propagate
+ wait_for:
+ timeout: 45
+
+ - name: create a key (expect failure)
+ kms_key:
+ alias: '{{ kms_key_alias }}'
+ tags:
+ Hello: World
+ state: present
+ enabled: yes
+ multi_region: True
+ register: result
+ ignore_errors: True
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg != "MODULE FAILURE"
+ - result.changed == False
+ - '"You cannot change the multi-region property on an existing key." in result.msg'
+
+ always:
+ # ============================================================
+ # CLEAN-UP
+ - name: finish off by deleting keys
+ kms_key:
+ state: absent
+ alias: '{{ item }}'
+ pending_window: 7
+ ignore_errors: true
+ loop:
+ - '{{ kms_key_alias }}'
+ - '{{ kms_key_alias }}-diff-spec-usage'
+ - '{{ kms_key_alias }}-check'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_states.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_states.yml
new file mode 100644
index 000000000..917410c50
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_states.yml
@@ -0,0 +1,522 @@
+- block:
+ # ============================================================
+ # PREPARATION
+ #
+ # Get some information about who we are before starting our tests
+ # we'll need this as soon as we start working on the policies
+ - name: get ARN of calling user
+ aws_caller_info:
+ register: aws_caller_info
+ - name: See whether key exists and its current state
+ aws_kms_info:
+ alias: '{{ kms_key_alias }}'
+ - name: create a key - check mode
+ aws_kms:
+ alias: '{{ kms_key_alias }}-check'
+ tags:
+ Hello: World
+ state: present
+ enabled: yes
+ register: key_check
+ check_mode: yes
+ - name: find facts about the check mode key
+ aws_kms_info:
+ alias: '{{ kms_key_alias }}-check'
+ register: check_key
+ - name: ensure that check mode worked as expected
+ assert:
+ that:
+ - check_key.kms_keys | length == 0
+ - key_check is changed
+
+ - name: create a key
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ tags:
+ Hello: World
+ state: present
+ enabled: yes
+ enable_key_rotation: no
+ register: key
+ - name: assert that state is enabled
+ assert:
+ that:
+ - key is changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+ - key.multi_region == False
+
+ - name: Sleep to wait for updates to propagate
+ wait_for:
+ timeout: 45
+ - name: create a key (idempotence) - check mode
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ tags:
+ Hello: World
+ state: present
+ enabled: yes
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - key is not changed
+
+ - name: create a key (idempotence)
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ tags:
+ Hello: World
+ state: present
+ enabled: yes
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - key is not changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+ - key.multi_region == False
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Save IDs for later
+ set_fact:
+ kms_key_id: '{{ key.key_id }}'
+ kms_key_arn: '{{ key.key_arn }}'
+ - name: Enable key rotation - check mode
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ tags:
+ Hello: World
+ state: present
+ enabled: yes
+ enable_key_rotation: yes
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - key.changed
+
+ - name: Enable key rotation
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ tags:
+ Hello: World
+ state: present
+ enabled: yes
+ enable_key_rotation: yes
+ register: key
+ - name: assert that key rotation is enabled
+ assert:
+ that:
+ - key is changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == True
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+
+ - name: Sleep to wait for updates to propagate
+ wait_for:
+ timeout: 45
+ - name: Enable key rotation (idempotence) - check mode
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ tags:
+ Hello: World
+ state: present
+ enabled: yes
+ enable_key_rotation: yes
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - not key.changed
+
+ - name: Enable key rotation (idempotence)
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ tags:
+ Hello: World
+ state: present
+ enabled: yes
+ enable_key_rotation: yes
+ register: key
+ - assert:
+ that:
+ - not key is changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == True
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Disable key - check mode
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ enabled: no
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - key.changed
+
+ - name: Disable key
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ enabled: no
+ register: key
+ - name: assert that state is disabled
+ assert:
+ that:
+ - key is changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Disabled"
+ - key.enabled == False
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == True
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+
+ - name: Sleep to wait for updates to propagate
+ wait_for:
+ timeout: 45
+ - name: Disable key (idempotence) - check mode
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ enabled: no
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - not key.changed
+
+ - name: Disable key (idempotence)
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ enabled: no
+ register: key
+ - assert:
+ that:
+ - not key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Disabled"
+ - key.enabled == False
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == True
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Delete key - check mode
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: absent
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - key is changed
+
+ - name: Delete key
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: absent
+ register: key
+ - name: Sleep to wait for updates to propagate
+ wait_for:
+ timeout: 45
+ - name: Assert that state is pending deletion
+ vars:
+ now_time: '{{ lookup("pipe", "date -u +%Y-%m-%d\ %H:%M:%S") }}'
+ deletion_time: '{{ key.deletion_date[:19] | to_datetime("%Y-%m-%dT%H:%M:%S")
+ }}'
+ assert:
+ that:
+ - key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "PendingDeletion"
+ - key.enabled == False
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == False
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+ # Times won't be perfect, allow a 24 hour window
+ - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days <= 30
+ - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days >= 29
+
+ - name: Delete key (idempotence) - check mode
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: absent
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - not key.changed
+
+ - name: Delete key (idempotence)
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: absent
+ register: key
+ - vars:
+ now_time: '{{ lookup("pipe", "date -u +%Y-%m-%d\ %H:%M:%S") }}'
+ deletion_time: '{{ key.deletion_date[:19] | to_datetime("%Y-%m-%dT%H:%M:%S")
+ }}'
+ assert:
+ that:
+ - not key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "PendingDeletion"
+ - key.enabled == False
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == False
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+ # Times won't be perfect, allow a 24 hour window
+ - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days <= 30
+ - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days >= 29
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Cancel key deletion - check mode
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - key.changed
+
+ - name: Cancel key deletion
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ register: key
+ - assert:
+ that:
+ - key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == True
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+ - "'deletion_date' not in key"
+
+ - name: Sleep to wait for updates to propagate
+ wait_for:
+ timeout: 45
+ - name: Cancel key deletion (idempotence) - check mode
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - not key.changed
+
+ - name: Cancel key deletion (idempotence)
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ register: key
+ - assert:
+ that:
+ - not key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == True
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+ - "'deletion_date' not in key"
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: delete the key with a specific deletion window
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: absent
+ pending_window: 7
+ register: delete_kms
+ - name: Sleep to wait for updates to propagate
+ wait_for:
+ timeout: 45
+ - name: assert that state is pending deletion
+ vars:
+ now_time: '{{ lookup("pipe", "date -u +%Y-%m-%d\ %H:%M:%S") }}'
+ deletion_time: '{{ delete_kms.deletion_date[:19] | to_datetime("%Y-%m-%dT%H:%M:%S")
+ }}'
+ assert:
+ that:
+ - delete_kms.key_state == "PendingDeletion"
+ - delete_kms.changed
+ # Times won't be perfect, allow a 24 hour window
+ - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days <= 7
+ - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days >= 6
+
+ # ============================================================
+ # test different key usage and specs
+ - name: create kms key with different specs
+ aws_kms:
+ alias: '{{ kms_key_alias }}-diff-spec-usage'
+ purge_grants: yes
+ key_spec: ECC_NIST_P256
+ key_usage: SIGN_VERIFY
+ register: create_diff_kms
+ - name: Sleep to wait for updates to propagate
+ wait_for:
+ timeout: 45
+ - name: verify different specs on kms key
+ assert:
+ that:
+ - '"key_id" in create_diff_kms'
+ - create_diff_kms.key_id | length >= 36
+ - not create_diff_kms.key_id.startswith("arn:aws")
+ - '"key_arn" in create_diff_kms'
+ - create_diff_kms.key_arn.endswith(create_diff_kms.key_id)
+ - create_diff_kms.key_arn.startswith("arn:aws")
+ - create_diff_kms.key_usage == 'SIGN_VERIFY'
+ - create_diff_kms.customer_master_key_spec == 'ECC_NIST_P256'
+
+ always:
+ # ============================================================
+ # CLEAN-UP
+ - name: finish off by deleting keys
+ aws_kms:
+ state: absent
+ alias: '{{ item }}'
+ pending_window: 7
+ ignore_errors: true
+ loop:
+ - '{{ kms_key_alias }}'
+ - '{{ kms_key_alias }}-diff-spec-usage'
+ - '{{ kms_key_alias }}-check'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_tagging.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_tagging.yml
new file mode 100644
index 000000000..7d53b1dad
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_tagging.yml
@@ -0,0 +1,187 @@
+- block:
+ # ============================================================
+ # PREPARATION
+ #
+ # Get some information about who we are before starting our tests
+ # we'll need this as soon as we start working on the policies
+ - name: get ARN of calling user
+ aws_caller_info:
+ register: aws_caller_info
+ - name: create a key
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ tags:
+ Hello: World
+ state: present
+ enabled: yes
+ enable_key_rotation: no
+ register: key
+ - name: assert that state is enabled
+ assert:
+ that:
+ - key is changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 1
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Tag encryption key
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ tags:
+ tag_one: tag_one
+ tag_two: tag_two
+ purge_tags: no
+ register: key
+ - name: Assert tags added
+ assert:
+ that:
+ - key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 3
+ - key.tags['Hello'] == 'World'
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+ - "'tag_one' in key.tags"
+ - "'tag_two' in key.tags"
+
+ - name: Sleep to wait for updates to propagate
+ wait_for:
+ timeout: 45
+ - name: Modify tags - check mode
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ purge_tags: yes
+ tags:
+ tag_two: tag_two_updated
+ Tag Three: '{{ resource_prefix }}'
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - key.changed
+
+ - name: Modify tags
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ purge_tags: yes
+ tags:
+ tag_two: tag_two_updated
+ Tag Three: '{{ resource_prefix }}'
+ register: key
+ - name: Assert tags correctly changed
+ assert:
+ that:
+ - key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 2
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+ - "'tag_one' not in key.tags"
+ - "'tag_two' in key.tags"
+ - key.tags.tag_two == 'tag_two_updated'
+ - "'Tag Three' in key.tags"
+ - key.tags['Tag Three'] == resource_prefix
+
+ - name: Sleep to wait for updates to propagate
+ wait_for:
+ timeout: 45
+ - name: Modify tags (idempotence) - check mode
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ purge_tags: yes
+ tags:
+ tag_two: tag_two_updated
+ Tag Three: '{{ resource_prefix }}'
+ register: key
+ check_mode: yes
+ - assert:
+ that:
+ - not key.changed
+
+ - name: Modify tags (idempotence)
+ aws_kms:
+ alias: '{{ kms_key_alias }}'
+ state: present
+ purge_tags: yes
+ tags:
+ tag_two: tag_two_updated
+ Tag Three: '{{ resource_prefix }}'
+ register: key
+ - assert:
+ that:
+ - not key.changed
+ - '"key_id" in key'
+ - key.key_id | length >= 36
+ - not key.key_id.startswith("arn:aws")
+ - '"key_arn" in key'
+ - key.key_arn.endswith(key.key_id)
+ - key.key_arn.startswith("arn:aws")
+ - key.key_state == "Enabled"
+ - key.enabled == True
+ - key.tags | length == 2
+ - key.enable_key_rotation == false
+ - key.key_usage == 'ENCRYPT_DECRYPT'
+ - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT'
+ - key.grants | length == 0
+ - key.key_policies | length == 1
+ - key.key_policies[0].Id == 'key-default-1'
+ - key.description == ''
+ - "'tag_one' not in key.tags"
+ - "'tag_two' in key.tags"
+ - key.tags.tag_two == 'tag_two_updated'
+ - "'Tag Three' in key.tags"
+ - key.tags['Tag Three'] == resource_prefix
+
+ always:
+ # ============================================================
+ # CLEAN-UP
+ - name: finish off by deleting keys
+ aws_kms:
+ state: absent
+ alias: '{{ kms_key_alias }}'
+ pending_window: 7
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy-no-key-rotation.j2 b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy-no-key-rotation.j2
new file mode 100644
index 000000000..0e019d202
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy-no-key-rotation.j2
@@ -0,0 +1,81 @@
+{
+ "Id": "key-consolepolicy-3",
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "Enable IAM User Permissions",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "arn:aws:iam::{{ aws_caller_info.account }}:root"
+ },
+ "Action": "kms:*",
+ "Resource": "*"
+ },
+ {
+ "Sid": "Allow access for Key Administrators",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "{{ aws_caller_info.arn }}"
+ },
+ "Action": [
+ "kms:Create*",
+ "kms:Describe*",
+ "kms:Enable*",
+ "kms:List*",
+ "kms:Put*",
+ "kms:Update*",
+ "kms:Revoke*",
+ "kms:Disable*",
+ "kms:Get*",
+ "kms:Delete*",
+ "kms:TagResource",
+ "kms:UntagResource",
+ "kms:ScheduleKeyDeletion",
+ "kms:CancelKeyDeletion"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "Allow use of the key",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "{{ aws_caller_info.arn }}"
+ },
+ "Action": [
+ "kms:Encrypt",
+ "kms:Decrypt",
+ "kms:ReEncrypt*",
+ "kms:GenerateDataKey*",
+ "kms:DescribeKey"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "Allow attachment of persistent resources",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "{{ aws_caller_info.arn }}"
+ },
+ "Action": [
+ "kms:CreateGrant",
+ "kms:ListGrants",
+ "kms:RevokeGrant"
+ ],
+ "Resource": "*",
+ "Condition": {
+ "Bool": {
+ "kms:GrantIsForAWSResource": "true"
+ }
+ }
+ },
+ {
+ "Sid": "Disable access to key rotation status",
+ "Effect": "Deny",
+ "Principal": {
+ "AWS": "{{ aws_caller_info.arn }}"
+ },
+ "Action": "kms:GetKeyRotationStatus",
+ "Resource": "*"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy.j2
new file mode 100644
index 000000000..4b60ba588
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy.j2
@@ -0,0 +1,72 @@
+{
+ "Id": "key-consolepolicy-3",
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "Enable IAM User Permissions",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "arn:aws:iam::{{ aws_caller_info.account }}:root"
+ },
+ "Action": "kms:*",
+ "Resource": "*"
+ },
+ {
+ "Sid": "Allow access for Key Administrators",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "{{ aws_caller_info.arn }}"
+ },
+ "Action": [
+ "kms:Create*",
+ "kms:Describe*",
+ "kms:Enable*",
+ "kms:List*",
+ "kms:Put*",
+ "kms:Update*",
+ "kms:Revoke*",
+ "kms:Disable*",
+ "kms:Get*",
+ "kms:Delete*",
+ "kms:TagResource",
+ "kms:UntagResource",
+ "kms:ScheduleKeyDeletion",
+ "kms:CancelKeyDeletion"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "Allow use of the key",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "{{ aws_caller_info.arn }}"
+ },
+ "Action": [
+ "kms:Encrypt",
+ "kms:Decrypt",
+ "kms:ReEncrypt*",
+ "kms:GenerateDataKey*",
+ "kms:DescribeKey"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "Allow attachment of persistent resources",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "{{ aws_caller_info.arn }}"
+ },
+ "Action": [
+ "kms:CreateGrant",
+ "kms:ListGrants",
+ "kms:RevokeGrant"
+ ],
+ "Resource": "*",
+ "Condition": {
+ "Bool": {
+ "kms:GrantIsForAWSResource": "true"
+ }
+ }
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/runme.sh
new file mode 100755
index 000000000..5b5b69fbd
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/runme.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+#
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/aws_kms/tasks/
+
+
+set -eux
+
+export ANSIBLE_ROLES_PATH=../
+
+ansible-playbook main.yml -i inventory "$@"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lambda/aliases
new file mode 100644
index 000000000..f6bf003fe
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/aliases
@@ -0,0 +1,4 @@
+cloud/aws
+
+lambda_execute
+lambda_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda/defaults/main.yml
new file mode 100644
index 000000000..63414fbfd
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/defaults/main.yml
@@ -0,0 +1,13 @@
+# defaults file for lambda integration test
+# IAM role names have to be less than 64 characters
+# we hash the resource_prefix to get a shorter, unique string
+lambda_function_name: '{{ tiny_prefix }}'
+lambda_role_name: ansible-test-{{ tiny_prefix }}-lambda
+
+lambda_python_runtime: python3.9
+lambda_python_handler: mini_lambda.handler
+lambda_python_layers_names:
+ - "{{ tiny_prefix }}-layer-01"
+ - "{{ tiny_prefix }}-layer-02"
+lambda_function_name_with_layer: '{{ tiny_prefix }}-func-with-layer'
+lambda_function_name_with_multiple_layer: '{{ tiny_prefix }}-func-with-mutiplelayer'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/files/mini_lambda.py b/ansible_collections/amazon/aws/tests/integration/targets/lambda/files/mini_lambda.py
new file mode 100644
index 000000000..901f6b55a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/files/mini_lambda.py
@@ -0,0 +1,48 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+
+def handler(event, context):
+ """
+ The handler function is the function which gets called each time
+ the lambda is run.
+ """
+ # printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find
+ # the log entry.
+ print("got event:\n" + json.dumps(event))
+
+ # if the name parameter isn't present this can throw an exception
+ # which will result in an amazon chosen failure from the lambda
+ # which can be completely fine.
+
+ name = event["name"]
+
+ # we can use environment variables as part of the configuration of the lambda
+ # which can change the behaviour of the lambda without needing a new upload
+
+ extra = os.environ.get("EXTRA_MESSAGE")
+ if extra is not None and len(extra) > 0:
+ greeting = "hello {0}. {1}".format(name, extra)
+ else:
+ greeting = "hello " + name
+
+ return {"message": greeting}
+
+
+def main():
+ """
+ This main function will normally never be called during normal
+ lambda use. It is here for testing the lambda program only.
+ """
+ event = {"name": "james"}
+ context = None
+ print(handler(event, context))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/files/minimal_trust_policy.json b/ansible_collections/amazon/aws/tests/integration/targets/lambda/files/minimal_trust_policy.json
new file mode 100644
index 000000000..fb84ae9de
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/files/minimal_trust_policy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "lambda.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda/meta/main.yml
new file mode 100644
index 000000000..409583a2c
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/meta/main.yml
@@ -0,0 +1,5 @@
+dependencies:
+- role: setup_botocore_pip
+ vars:
+ botocore_version: 1.21.51
+- role: setup_remote_tmp_dir
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml
new file mode 100644
index 000000000..443a8327f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml
@@ -0,0 +1,788 @@
+- name: set connection information for AWS modules and run tests
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ collections:
+ - community.general
+ block:
+ # Preparation
+ - name: create minimal lambda role
+ iam_role:
+ name: '{{ lambda_role_name }}'
+ assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json")
+ }}'
+ create_instance_profile: false
+ managed_policies:
+ - arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess
+ register: iam_role
+ - name: wait 10 seconds for role to become available
+ pause:
+ seconds: 10
+ when: iam_role.changed
+ - name: move lambda into place for archive module
+ copy:
+ src: mini_lambda.py
+ dest: '{{ output_dir }}/mini_lambda.py'
+ mode: preserve
+ - name: bundle lambda into a zip
+ register: zip_res
+ archive:
+ format: zip
+ path: '{{ output_dir }}/mini_lambda.py'
+ dest: '{{ output_dir }}/mini_lambda.zip'
+
+ # Parameter tests
+ - name: test with no parameters
+ lambda:
+ register: result
+ ignore_errors: true
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - result.failed
+ - 'result.msg.startswith("missing required arguments: ")'
+ - '"name" in result.msg'
+
+ - name: test with no parameters except state absent
+ lambda:
+ state: absent
+ register: result
+ ignore_errors: true
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - result.failed
+ - 'result.msg.startswith("missing required arguments: name")'
+
+ - name: test with no role or handler
+ lambda:
+ name: ansible-testing-fake-should-not-be-created
+ runtime: '{{ lambda_python_runtime }}'
+ register: result
+ ignore_errors: true
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - result.failed
+ - 'result.msg.startswith("state is present but all of the following are missing:
+ ")'
+ - '"handler" in result.msg'
+ - '"role" in result.msg'
+
+ - name: test execute lambda with no function arn or name
+ execute_lambda:
+ register: result
+ ignore_errors: true
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - result.failed
+ - "result.msg == 'one of the following is required: name, function_arn'"
+
+ - name: test state=present with security group but no vpc
+ lambda:
+ name: '{{ lambda_function_name }}'
+ runtime: '{{ lambda_python_runtime }}'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{ zip_res.dest }}'
+ handler: '{{ omit }}'
+ description: '{{ omit }}'
+ vpc_subnet_ids: '{{ omit }}'
+ vpc_security_group_ids: sg-FA6E
+ environment_variables: '{{ omit }}'
+ dead_letter_arn: '{{ omit }}'
+ register: result
+ ignore_errors: true
+ - name: assert lambda fails with proper message
+ assert:
+ that:
+ - result is failed
+ - result.msg != "MODULE FAILURE"
+ - result.changed == False
+ - '"parameters are required together" in result.msg'
+
+ - name: test state=present with incomplete layers
+ lambda:
+ name: '{{ lambda_function_name }}'
+ runtime: '{{ lambda_python_runtime }}'
+ role: '{{ lambda_role_name }}'
+ handler: mini_lambda.handler
+ zip_file: '{{ zip_res.dest }}'
+ layers:
+ - layer_name: test-layer
+ check_mode: true
+ register: result
+ ignore_errors: true
+ - name: assert lambda fails with proper message
+ assert:
+ that:
+ - result is failed
+ - result is not changed
+ - '"parameters are required together: layer_name, version found in layers" in result.msg'
+
+ - name: test state=present with incomplete layers
+ lambda:
+ name: '{{ lambda_function_name }}'
+ runtime: '{{ lambda_python_runtime }}'
+ role: '{{ lambda_role_name }}'
+ handler: mini_lambda.handler
+ zip_file: '{{ zip_res.dest }}'
+ layers:
+ - layer_version_arn: 'arn:aws:lambda:us-east-2:123456789012:layer:blank-java-lib:7'
+ version: 9
+ check_mode: true
+ register: result
+ ignore_errors: true
+ - name: assert lambda fails with proper message
+ assert:
+ that:
+ - result is failed
+ - result is not changed
+ - '"parameters are mutually exclusive: version|layer_version_arn found in layers" in result.msg'
+
+ # Prepare minimal Lambda
+ - name: test state=present - upload the lambda (check mode)
+ lambda:
+ name: '{{ lambda_function_name }}'
+ runtime: '{{ lambda_python_runtime }}'
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{ zip_res.dest }}'
+ architecture: arm64
+ vars:
+ ansible_python_interpreter: '{{ botocore_virtualenv_interpreter }}'
+ register: result
+ check_mode: yes
+ - name: assert lambda upload succeeded
+ assert:
+ that:
+ - result.changed
+
+ - name: test state=present - upload the lambda
+ lambda:
+ name: '{{ lambda_function_name }}'
+ runtime: '{{ lambda_python_runtime }}'
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{ zip_res.dest }}'
+ architecture: arm64
+ vars:
+ ansible_python_interpreter: '{{ botocore_virtualenv_interpreter }}'
+ register: result
+ - name: assert lambda upload succeeded
+ assert:
+ that:
+ - result.changed
+ - result.configuration.tracing_config.mode == "PassThrough"
+ - result.configuration.architectures == ['arm64']
+
+ - include_tasks: tagging.yml
+
+ # Test basic operation of Uploaded lambda
+ - name: test lambda works (check mode)
+ execute_lambda:
+ name: '{{lambda_function_name}}'
+ payload:
+ name: Mr Ansible Tests
+ register: result
+ check_mode: yes
+ - name: assert check mode works correctly
+ assert:
+ that:
+ - result.changed
+ - "'result' not in result"
+
+ - name: test lambda works
+ execute_lambda:
+ name: '{{lambda_function_name}}'
+ payload:
+ name: Mr Ansible Tests
+ register: result
+ - name: assert lambda manages to respond as expected
+ assert:
+ that:
+ - result is not failed
+ - result.result.output.message == "hello Mr Ansible Tests"
+
+ # Test updating Lambda
+ - name: test lambda config updates (check mode)
+ lambda:
+ name: '{{lambda_function_name}}'
+ runtime: nodejs14.x
+ tracing_mode: Active
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ tags:
+ CamelCase: ACamelCaseValue
+ snake_case: a_snake_case_value
+ Spaced key: A value with spaces
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not failed
+ - update_result.changed == True
+
+ - name: test lambda config updates
+ lambda:
+ name: '{{lambda_function_name}}'
+ runtime: nodejs14.x
+ tracing_mode: Active
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ tags:
+ CamelCase: ACamelCaseValue
+ snake_case: a_snake_case_value
+ Spaced key: A value with spaces
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not failed
+ - update_result.changed == True
+ - update_result.configuration.runtime == 'nodejs14.x'
+ - update_result.configuration.tracing_config.mode == 'Active'
+
+ - name: test no changes are made with the same parameters repeated (check mode)
+ lambda:
+ name: '{{lambda_function_name}}'
+ runtime: nodejs14.x
+ tracing_mode: Active
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ tags:
+ CamelCase: ACamelCaseValue
+ snake_case: a_snake_case_value
+ Spaced key: A value with spaces
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not failed
+ - update_result.changed == False
+
+ - name: test no changes are made with the same parameters repeated
+ lambda:
+ name: '{{lambda_function_name}}'
+ runtime: nodejs14.x
+ tracing_mode: Active
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ tags:
+ CamelCase: ACamelCaseValue
+ snake_case: a_snake_case_value
+ Spaced key: A value with spaces
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not failed
+ - update_result.changed == False
+ - update_result.configuration.runtime == 'nodejs14.x'
+ - update_result.configuration.tracing_config.mode == 'Active'
+
+ - name: reset config updates for the following tests
+ lambda:
+ name: '{{lambda_function_name}}'
+ runtime: '{{ lambda_python_runtime }}'
+ tracing_mode: PassThrough
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ register: result
+ - name: assert that reset succeeded
+ assert:
+ that:
+ - result is not failed
+ - result.changed == True
+ - result.configuration.runtime == lambda_python_runtime
+ - result.configuration.tracing_config.mode == 'PassThrough'
+
+ # Test lambda_info
+ - name: lambda_info | Gather all infos for all lambda functions
+ lambda_info:
+ query: all
+ register: lambda_infos_all
+ check_mode: yes
+ vars:
+ ansible_python_interpreter: '{{ botocore_virtualenv_interpreter }}'
+ - name: lambda_info | Assert successfull retrieval of all information 1
+ vars:
+ lambda_info: "{{ lambda_infos_all.functions | selectattr('function_name', 'eq', lambda_function_name) | first }}"
+ assert:
+ that:
+ - lambda_infos_all is not failed
+ - lambda_infos_all.functions | length > 0
+ - lambda_infos_all.functions | selectattr('function_name', 'eq', lambda_function_name) | length == 1
+ - lambda_info.runtime == lambda_python_runtime
+ - lambda_info.description == ""
+ - lambda_info.function_arn is defined
+ - lambda_info.handler == lambda_python_handler
+ - lambda_info.versions is defined
+ - lambda_info.aliases is defined
+ - lambda_info.policy is defined
+ - lambda_info.mappings is defined
+ - lambda_info.tags is defined
+ - lambda_info.architectures == ['arm64']
+
+ - name: lambda_info | Ensure default query value is 'config' when function name
+ omitted
+ lambda_info:
+ register: lambda_infos_query_config
+ check_mode: yes
+ - name: lambda_info | Assert successfull retrieval of all information 2
+ vars:
+ lambda_info: "{{ lambda_infos_query_config.functions | selectattr('function_name', 'eq', lambda_function_name) | first }}"
+ assert:
+ that:
+ - lambda_infos_query_config is not failed
+ - lambda_infos_query_config.functions | length > 0
+ - lambda_infos_query_config.functions | selectattr('function_name', 'eq', lambda_function_name) | length == 1
+ - lambda_info.runtime == lambda_python_runtime
+ - lambda_info.description == ""
+ - lambda_info.function_arn is defined
+ - lambda_info.handler == lambda_python_handler
+ - lambda_info.versions is not defined
+ - lambda_info.aliases is not defined
+ - lambda_info.policy is not defined
+ - lambda_info.mappings is not defined
+ - lambda_info.tags is not defined
+
+ - name: lambda_info | Ensure default query value is 'all' when function name specified
+ lambda_info:
+ name: '{{ lambda_function_name }}'
+ register: lambda_infos_query_all
+ - name: lambda_info | Assert successfull retrieval of all information 3
+ assert:
+ that:
+ - lambda_infos_query_all is not failed
+ - lambda_infos_query_all.functions | length == 1
+ - lambda_infos_query_all.functions[0].versions|length > 0
+ - lambda_infos_query_all.functions[0].function_name is defined
+ - lambda_infos_query_all.functions[0].policy is defined
+ - lambda_infos_query_all.functions[0].aliases is defined
+ - lambda_infos_query_all.functions[0].mappings is defined
+ - lambda_infos_query_all.functions[0].tags is defined
+
+ - name: lambda_info | Gather version infos for given lambda function
+ lambda_info:
+ name: '{{ lambda_function_name }}'
+ query: versions
+ register: lambda_infos_versions
+ - name: lambda_info | Assert successfull retrieval of versions information
+ assert:
+ that:
+ - lambda_infos_versions is not failed
+ - lambda_infos_versions.functions | length == 1
+ - lambda_infos_versions.functions[0].versions|length > 0
+ - lambda_infos_versions.functions[0].function_name == lambda_function_name
+ - lambda_infos_versions.functions[0].policy is undefined
+ - lambda_infos_versions.functions[0].aliases is undefined
+ - lambda_infos_versions.functions[0].mappings is undefined
+ - lambda_infos_versions.functions[0].tags is undefined
+
+ - name: lambda_info | Gather config infos for given lambda function
+ lambda_info:
+ name: '{{ lambda_function_name }}'
+ query: config
+ register: lambda_infos_config
+ - name: lambda_info | Assert successfull retrieval of config information
+ assert:
+ that:
+ - lambda_infos_config is not failed
+ - lambda_infos_config.functions | length == 1
+ - lambda_infos_config.functions[0].function_name == lambda_function_name
+ - lambda_infos_config.functions[0].description is defined
+ - lambda_infos_config.functions[0].versions is undefined
+ - lambda_infos_config.functions[0].policy is undefined
+ - lambda_infos_config.functions[0].aliases is undefined
+ - lambda_infos_config.functions[0].mappings is undefined
+ - lambda_infos_config.functions[0].tags is undefined
+
+ - name: lambda_info | Gather policy infos for given lambda function
+ lambda_info:
+ name: '{{ lambda_function_name }}'
+ query: policy
+ register: lambda_infos_policy
+ - name: lambda_info | Assert successfull retrieval of policy information
+ assert:
+ that:
+ - lambda_infos_policy is not failed
+ - lambda_infos_policy.functions | length == 1
+ - lambda_infos_policy.functions[0].policy is defined
+ - lambda_infos_policy.functions[0].versions is undefined
+ - lambda_infos_policy.functions[0].function_name == lambda_function_name
+ - lambda_infos_policy.functions[0].aliases is undefined
+ - lambda_infos_policy.functions[0].mappings is undefined
+ - lambda_infos_policy.functions[0].tags is undefined
+
+ - name: lambda_info | Gather aliases infos for given lambda function
+ lambda_info:
+ name: '{{ lambda_function_name }}'
+ query: aliases
+ register: lambda_infos_aliases
+ - name: lambda_info | Assert successfull retrieval of aliases information
+ assert:
+ that:
+ - lambda_infos_aliases is not failed
+ - lambda_infos_aliases.functions | length == 1
+ - lambda_infos_aliases.functions[0].aliases is defined
+ - lambda_infos_aliases.functions[0].versions is undefined
+ - lambda_infos_aliases.functions[0].function_name == lambda_function_name
+ - lambda_infos_aliases.functions[0].policy is undefined
+ - lambda_infos_aliases.functions[0].mappings is undefined
+ - lambda_infos_aliases.functions[0].tags is undefined
+
+ - name: lambda_info | Gather mappings infos for given lambda function
+ lambda_info:
+ name: '{{ lambda_function_name }}'
+ query: mappings
+ register: lambda_infos_mappings
+ - name: lambda_info | Assert successfull retrieval of mappings information
+ assert:
+ that:
+ - lambda_infos_mappings is not failed
+ - lambda_infos_mappings.functions | length == 1
+ - lambda_infos_mappings.functions[0].mappings is defined
+ - lambda_infos_mappings.functions[0].versions is undefined
+ - lambda_infos_mappings.functions[0].function_name == lambda_function_name
+ - lambda_infos_mappings.functions[0].aliases is undefined
+ - lambda_infos_mappings.functions[0].policy is undefined
+ - lambda_infos_mappings.functions[0].tags is undefined
+
+ # More Lambda update tests
+ - name: test state=present with all nullable variables explicitly set to null
+ lambda:
+ name: '{{lambda_function_name}}'
+ runtime: '{{ lambda_python_runtime }}'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{zip_res.dest}}'
+ handler: '{{ lambda_python_handler }}'
+ description:
+ vpc_subnet_ids:
+ vpc_security_group_ids:
+ environment_variables:
+ dead_letter_arn:
+ register: result
+ - name: assert lambda remains as before
+ assert:
+ that:
+ - result is not failed
+ - result.changed == False
+
+ - name: test putting an environment variable changes lambda (check mode)
+ lambda:
+ name: '{{lambda_function_name}}'
+ runtime: '{{ lambda_python_runtime }}'
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{zip_res.dest}}'
+ environment_variables:
+ EXTRA_MESSAGE: I think you are great!!
+ register: result
+ check_mode: yes
+ - name: assert lambda upload succeeded
+ assert:
+ that:
+ - result is not failed
+ - result.changed == True
+
+ - name: test putting an environment variable changes lambda
+ lambda:
+ name: '{{lambda_function_name}}'
+ runtime: '{{ lambda_python_runtime }}'
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{zip_res.dest}}'
+ environment_variables:
+ EXTRA_MESSAGE: I think you are great!!
+ register: result
+ - name: assert lambda upload succeeded
+ assert:
+ that:
+ - result is not failed
+ - result.changed == True
+ - result.configuration.environment.variables.extra_message == "I think you are
+ great!!"
+
+ - name: test lambda works
+ execute_lambda:
+ name: '{{lambda_function_name}}'
+ payload:
+ name: Mr Ansible Tests
+ register: result
+ - name: assert lambda manages to respond as expected
+ assert:
+ that:
+ - result is not failed
+ - result.result.output.message == "hello Mr Ansible Tests. I think you are great!!"
+
+ # Deletion behavious
+ - name: test state=absent (expect changed=True) (check mode)
+ lambda:
+ name: '{{lambda_function_name}}'
+ state: absent
+ register: result
+ check_mode: yes
+
+ - name: assert state=absent
+ assert:
+ that:
+ - result is not failed
+ - result is changed
+
+ - name: test state=absent (expect changed=True)
+ lambda:
+ name: '{{lambda_function_name}}'
+ state: absent
+ register: result
+
+ - name: assert state=absent
+ assert:
+ that:
+ - result is not failed
+ - result is changed
+
+ - name: test state=absent (expect changed=False) when already deleted (check mode)
+ lambda:
+ name: '{{lambda_function_name}}'
+ state: absent
+ register: result
+ check_mode: yes
+
+ - name: assert state=absent
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+
+ - name: test state=absent (expect changed=False) when already deleted
+ lambda:
+ name: '{{lambda_function_name}}'
+ state: absent
+ register: result
+
+ - name: assert state=absent
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+
+ # Parallel creations and deletions
+ - name: parallel lambda creation 1/4
+ lambda:
+ name: '{{lambda_function_name}}_1'
+ runtime: '{{ lambda_python_runtime }}'
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{zip_res.dest}}'
+ async: 1000
+ register: async_1
+ - name: parallel lambda creation 2/4
+ lambda:
+ name: '{{lambda_function_name}}_2'
+ runtime: '{{ lambda_python_runtime }}'
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{zip_res.dest}}'
+ async: 1000
+ register: async_2
+ - name: parallel lambda creation 3/4
+ lambda:
+ name: '{{lambda_function_name}}_3'
+ runtime: '{{ lambda_python_runtime }}'
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{zip_res.dest}}'
+ async: 1000
+ register: async_3
+ - name: parallel lambda creation 4/4
+ lambda:
+ name: '{{lambda_function_name}}_4'
+ runtime: '{{ lambda_python_runtime }}'
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{zip_res.dest}}'
+ register: result
+ - name: assert lambda manages to respond as expected
+ assert:
+ that:
+ - result is not failed
+ - name: parallel lambda deletion 1/4
+ lambda:
+ name: '{{lambda_function_name}}_1'
+ state: absent
+ zip_file: '{{zip_res.dest}}'
+ async: 1000
+ register: async_1
+ - name: parallel lambda deletion 2/4
+ lambda:
+ name: '{{lambda_function_name}}_2'
+ state: absent
+ zip_file: '{{zip_res.dest}}'
+ async: 1000
+ register: async_2
+ - name: parallel lambda deletion 3/4
+ lambda:
+ name: '{{lambda_function_name}}_3'
+ state: absent
+ zip_file: '{{zip_res.dest}}'
+ async: 1000
+ register: async_3
+ - name: parallel lambda deletion 4/4
+ lambda:
+ name: '{{lambda_function_name}}_4'
+ state: absent
+ zip_file: '{{zip_res.dest}}'
+ register: result
+ - name: assert lambda creation has succeeded
+ assert:
+ that:
+ - result is not failed
+
+ # Test creation with layers
+ - name: Create temporary directory for testing
+ tempfile:
+ suffix: lambda
+ state: directory
+ register: test_dir
+
+ - name: Create python directory for lambda layer
+ file:
+ path: "{{ remote_tmp_dir }}/python"
+ state: directory
+
+ - name: Create lambda layer library
+ copy:
+ content: |
+ def hello():
+ print("Hello from the ansible amazon.aws lambda layer")
+ return 1
+ dest: "{{ remote_tmp_dir }}/python/lambda_layer.py"
+
+ - name: Create lambda layer archive
+ archive:
+ format: zip
+ path: "{{ remote_tmp_dir }}"
+ dest: "{{ remote_tmp_dir }}/lambda_layer.zip"
+
+ - name: Create lambda layer
+ lambda_layer:
+ name: "{{ lambda_python_layers_names[0] }}"
+ description: '{{ lambda_python_layers_names[0] }} lambda layer'
+ content:
+ zip_file: "{{ remote_tmp_dir }}/lambda_layer.zip"
+ register: first_layer
+
+ - name: Create another lambda layer
+ lambda_layer:
+ name: "{{ lambda_python_layers_names[1] }}"
+ description: '{{ lambda_python_layers_names[1] }} lambda layer'
+ content:
+ zip_file: "{{ remote_tmp_dir }}/lambda_layer.zip"
+ register: second_layer
+
+ - name: Create lambda function with layers
+ lambda:
+ name: '{{ lambda_function_name_with_layer }}'
+ runtime: '{{ lambda_python_runtime }}'
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{ zip_res.dest }}'
+ layers:
+ - layer_version_arn: "{{ first_layer.layer_versions.0.layer_version_arn }}"
+ register: result
+ - name: Validate that lambda function was created with expected property
+ assert:
+ that:
+ - result is changed
+ - '"layers" in result.configuration'
+ - result.configuration.layers | length == 1
+ - result.configuration.layers.0.arn == first_layer.layer_versions.0.layer_version_arn
+
+ - name: Create lambda function with layers once again (validate idempotency)
+ lambda:
+ name: '{{ lambda_function_name_with_layer }}'
+ runtime: '{{ lambda_python_runtime }}'
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{ zip_res.dest }}'
+ layers:
+ - layer_version_arn: "{{ first_layer.layer_versions.0.layer_version_arn }}"
+ register: result
+ - name: Validate that no change were made
+ assert:
+ that:
+ - result is not changed
+
+ - name: Create lambda function with mutiple layers
+ lambda:
+ name: '{{ lambda_function_name_with_multiple_layer }}'
+ runtime: '{{ lambda_python_runtime }}'
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{ zip_res.dest }}'
+ layers:
+ - layer_version_arn: "{{ first_layer.layer_versions.0.layer_version_arn }}"
+ - layer_name: "{{ second_layer.layer_versions.0.layer_arn }}"
+ version: "{{ second_layer.layer_versions.0.version }}"
+ register: result
+ - name: Validate that lambda function was created with expected property
+ assert:
+ that:
+ - result is changed
+ - '"layers" in result.configuration'
+ - result.configuration.layers | length == 2
+ - first_layer.layer_versions.0.layer_version_arn in lambda_layer_versions
+ - second_layer.layer_versions.0.layer_version_arn in lambda_layer_versions
+ vars:
+ lambda_layer_versions: "{{ result.configuration.layers | map(attribute='arn') | list }}"
+
+ - name: Create lambda function with mutiple layers and changing layers order (idempotency)
+ lambda:
+ name: '{{ lambda_function_name_with_multiple_layer }}'
+ runtime: '{{ lambda_python_runtime }}'
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{ zip_res.dest }}'
+ layers:
+ - layer_version_arn: "{{ second_layer.layer_versions.0.layer_version_arn }}"
+ - layer_name: "{{ first_layer.layer_versions.0.layer_arn }}"
+ version: "{{ first_layer.layer_versions.0.version }}"
+ register: result
+ - name: Validate that lambda function was created with expected property
+ assert:
+ that:
+ - result is not changed
+
+ always:
+
+ - name: Delete lambda layers
+ lambda_layer:
+ name: "{{ item }}"
+ version: -1
+ state: absent
+ ignore_errors: true
+ with_items: "{{ lambda_python_layers_names }}"
+
+ - name: ensure functions are absent at end of test
+ lambda:
+ name: '{{ item }}'
+ state: absent
+ ignore_errors: true
+ with_items:
+ - '{{ lambda_function_name }}'
+ - '{{ lambda_function_name }}_1'
+ - '{{ lambda_function_name }}_2'
+ - '{{ lambda_function_name }}_3'
+ - '{{ lambda_function_name }}_4'
+
+ - name: ensure role has been removed at end of test
+ iam_role:
+ name: '{{ lambda_role_name }}'
+ state: absent
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/tagging.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/tagging.yml
new file mode 100644
index 000000000..135e83ff9
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/tagging.yml
@@ -0,0 +1,246 @@
+- name: Tests relating to tagging lambda
+ vars:
+ first_tags:
+ Key with Spaces: Value with spaces
+ CamelCaseKey: CamelCaseValue
+ pascalCaseKey: pascalCaseValue
+ snake_case_key: snake_case_value
+ second_tags:
+ New Key with Spaces: Value with spaces
+ NewCamelCaseKey: CamelCaseValue
+ newPascalCaseKey: pascalCaseValue
+ new_snake_case_key: snake_case_value
+ third_tags:
+ Key with Spaces: Value with spaces
+ CamelCaseKey: CamelCaseValue
+ pascalCaseKey: pascalCaseValue
+ snake_case_key: snake_case_value
+ New Key with Spaces: Updated Value with spaces
+ final_tags:
+ Key with Spaces: Value with spaces
+ CamelCaseKey: CamelCaseValue
+ pascalCaseKey: pascalCaseValue
+ snake_case_key: snake_case_value
+ New Key with Spaces: Updated Value with spaces
+ NewCamelCaseKey: CamelCaseValue
+ newPascalCaseKey: pascalCaseValue
+ new_snake_case_key: snake_case_value
+ # Mandatory settings
+ module_defaults:
+ amazon.aws.lambda:
+ runtime: '{{ lambda_python_runtime }}'
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ block:
+
+ ###
+
+ - name: test adding tags to lambda (check mode)
+ lambda:
+ name: '{{ lambda_function_name }}'
+ tags: '{{ first_tags }}'
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+
+ - name: test adding tags to lambda
+ lambda:
+ name: '{{ lambda_function_name }}'
+ tags: '{{ first_tags }}'
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+ - update_result.tags == first_tags
+
+ - name: test adding tags to lambda - idempotency (check mode)
+ lambda:
+ name: '{{ lambda_function_name }}'
+ tags: '{{ first_tags }}'
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+
+ - name: test adding tags to lambda - idempotency
+ lambda:
+ name: '{{ lambda_function_name }}'
+ tags: '{{ first_tags }}'
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.tags == first_tags
+
+ ###
+
+ - name: test updating tags with purge on lambda (check mode)
+ lambda:
+ name: '{{ lambda_function_name }}'
+ tags: '{{ second_tags }}'
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+
+ - name: test updating tags with purge on lambda
+ lambda:
+ name: '{{ lambda_function_name }}'
+ tags: '{{ second_tags }}'
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+ - update_result.tags == second_tags
+
+ - name: test updating tags with purge on lambda - idempotency (check mode)
+ lambda:
+ name: '{{ lambda_function_name }}'
+ tags: '{{ second_tags }}'
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+
+ - name: test updating tags with purge on lambda - idempotency
+ lambda:
+ name: '{{ lambda_function_name }}'
+ tags: '{{ second_tags }}'
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.tags == second_tags
+
+ ###
+
+ - name: test updating tags without purge on lambda (check mode)
+ lambda:
+ name: '{{ lambda_function_name }}'
+ tags: '{{ third_tags }}'
+ purge_tags: false
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+
+ - name: test updating tags without purge on lambda
+ lambda:
+ name: '{{ lambda_function_name }}'
+ tags: '{{ third_tags }}'
+ purge_tags: false
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+ - update_result.tags == final_tags
+
+ - name: test updating tags without purge on lambda - idempotency (check mode)
+ lambda:
+ name: '{{ lambda_function_name }}'
+ tags: '{{ third_tags }}'
+ purge_tags: false
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+
+ - name: test updating tags without purge on lambda - idempotency
+ lambda:
+ name: '{{ lambda_function_name }}'
+ tags: '{{ third_tags }}'
+ purge_tags: false
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.tags == final_tags
+
+ ###
+
+ - name: test no tags param lambda (check mode)
+ lambda:
+ name: '{{ lambda_function_name }}'
+ register: update_result
+ check_mode: yes
+ - name: assert no change
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.tags == final_tags
+
+
+ - name: test no tags param lambda
+ lambda:
+ name: '{{ lambda_function_name }}'
+ register: update_result
+ - name: assert no change
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.tags == final_tags
+
+ ###
+
+ - name: test removing tags from lambda (check mode)
+ lambda:
+ name: '{{ lambda_function_name }}'
+ tags: {}
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+
+ - name: test removing tags from lambda
+ lambda:
+ name: '{{ lambda_function_name }}'
+ tags: {}
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is changed
+ - update_result.tags == {}
+
+ - name: test removing tags from lambda - idempotency (check mode)
+ lambda:
+ name: '{{ lambda_function_name }}'
+ tags: {}
+ register: update_result
+ check_mode: yes
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+
+ - name: test removing tags from lambda - idempotency
+ lambda:
+ name: '{{ lambda_function_name }}'
+ tags: {}
+ register: update_result
+ - name: assert that update succeeded
+ assert:
+ that:
+ - update_result is not changed
+ - update_result.tags == {}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/defaults/main.yml
new file mode 100644
index 000000000..692a4f015
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# defaults file for lambda integration test
+# IAM role names have to be less than 64 characters
+# we hash the resource_prefix to get a shorter, unique string
+lambda_function_name: 'ansible-test-{{ tiny_prefix }}'
+lambda_role_name: 'ansible-test-{{ tiny_prefix }}-lambda'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/mini_lambda.py b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/mini_lambda.py
new file mode 100644
index 000000000..901f6b55a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/mini_lambda.py
@@ -0,0 +1,48 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+
+def handler(event, context):
+ """
+ The handler function is the function which gets called each time
+ the lambda is run.
+ """
+ # printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find
+ # the log entry.
+ print("got event:\n" + json.dumps(event))
+
+ # if the name parameter isn't present this can throw an exception
+ # which will result in an amazon chosen failure from the lambda
+ # which can be completely fine.
+
+ name = event["name"]
+
+ # we can use environment variables as part of the configuration of the lambda
+ # which can change the behaviour of the lambda without needing a new upload
+
+ extra = os.environ.get("EXTRA_MESSAGE")
+ if extra is not None and len(extra) > 0:
+ greeting = "hello {0}. {1}".format(name, extra)
+ else:
+ greeting = "hello " + name
+
+ return {"message": greeting}
+
+
+def main():
+ """
+ This main function will normally never be called during normal
+ lambda use. It is here for testing the lambda program only.
+ """
+ event = {"name": "james"}
+ context = None
+ print(handler(event, context))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/minimal_trust_policy.json b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/minimal_trust_policy.json
new file mode 100644
index 000000000..fb84ae9de
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/minimal_trust_policy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "lambda.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml
new file mode 100644
index 000000000..9b264f50c
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml
@@ -0,0 +1,622 @@
+- name: set connection information for AWS modules and run tests
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ collections:
+ - community.general
+ block:
+ # ==============================================================
+ # Preparation
+ - name: create minimal lambda role
+ iam_role:
+ name: '{{ lambda_role_name }}'
+ assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json") }}'
+ create_instance_profile: false
+ managed_policies:
+ - 'arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess'
+ register: iam_role
+ - name: wait 10 seconds for role to become available
+ pause:
+ seconds: 10
+ when: iam_role.changed
+ - name: move lambda into place for archive module
+ copy:
+ src: mini_lambda.py
+ dest: '{{ output_dir }}/mini_lambda.py'
+ mode: preserve
+ - name: bundle lambda into a zip
+ register: zip_res
+ archive:
+ format: zip
+ path: '{{ output_dir }}/mini_lambda.py'
+ dest: '{{ output_dir }}/mini_lambda.zip'
+
+ - name: Upload test lambda (version 1)
+ lambda:
+ name: '{{ lambda_function_name }}'
+ runtime: 'python3.7'
+ handler: 'mini_lambda.handler'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{ zip_res.dest }}'
+ register: lambda_a
+ - name: assert lambda upload succeeded
+ assert:
+ that:
+ - lambda_a is changed
+
+ - name: Update lambda (version 2)
+ lambda:
+ name: '{{ lambda_function_name }}'
+ runtime: 'python3.8'
+ handler: 'mini_lambda.handler'
+ role: '{{ lambda_role_name }}'
+ register: lambda_b
+ - name: assert that update succeeded
+ assert:
+ that:
+ - lambda_b is changed
+
+ - name: Update lambda (version 3 / LATEST)
+ lambda:
+ name: '{{ lambda_function_name }}'
+ runtime: 'python3.9'
+ handler: 'mini_lambda.handler'
+ role: '{{ lambda_role_name }}'
+ register: lambda_c
+ - name: assert that update succeeded
+ assert:
+ that:
+ - lambda_c is changed
+
+ - name: Store Lambda info
+ vars:
+ _full_arn: '{{ lambda_a.configuration.function_arn }}'
+ set_fact:
+ lambda_arn: '{{ ":".join(_full_arn.split(":")[:-1]) }}'
+
+ # ==============================================================
+ # Creation of an alias
+ - name: Create an alias (check mode)
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ check_mode: True
+ register: create_alias
+ - name: Check changed
+ assert:
+ that:
+ - create_alias is changed
+
+ - name: Create an alias
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ register: create_alias
+ - name: Check changed and returned values
+ assert:
+ that:
+ - create_alias is changed
+ - '"alias_arn" in create_alias'
+ - create_alias.alias_arn.startswith(lambda_arn)
+ - create_alias.alias_arn.endswith("Testing")
+ - '"description" in create_alias'
+ - create_alias.description == ""
+ - '"function_version" in create_alias'
+ - create_alias.function_version == "$LATEST"
+ - '"name" in create_alias'
+ - create_alias.name == "Testing"
+ - '"revision_id" in create_alias'
+ # The revision_id doesn't line up with the revision IDs of the versions
+ # It will change any time the alias is updated
+
+ - name: Create an alias - idempotency (check mode)
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ check_mode: True
+ register: create_alias
+ - name: Check not changed
+ assert:
+ that:
+ - create_alias is not changed
+
+ - name: Create an alias - idempotecy
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ register: create_alias
+ - name: Check not changed
+ assert:
+ that:
+ - create_alias is not changed
+ - '"alias_arn" in create_alias'
+ - create_alias.alias_arn.startswith(lambda_arn)
+ - create_alias.alias_arn.endswith("Testing")
+ - '"description" in create_alias'
+ - create_alias.description == ""
+ - '"function_version" in create_alias'
+ - create_alias.function_version == "$LATEST"
+ - '"name" in create_alias'
+ - create_alias.name == "Testing"
+ - '"revision_id" in create_alias'
+ # The revision_id doesn't line up with the revision IDs of the versions
+ # It will change any time the alias is updated
+
+ # ==============================================================
+ # Update description of an alias when none set to start
+ - name: Update an alias description (check mode)
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ description: 'Description 1'
+ check_mode: True
+ register: update_alias_description
+ - name: Check changed
+ assert:
+ that:
+ - update_alias_description is changed
+
+ - name: Update an alias description
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ description: 'Description 1'
+ register: update_alias_description
+ - name: Check changed and returned values
+ assert:
+ that:
+ - update_alias_description is changed
+ - '"alias_arn" in update_alias_description'
+ - update_alias_description.alias_arn.startswith(lambda_arn)
+ - update_alias_description.alias_arn.endswith("Testing")
+ - '"description" in update_alias_description'
+ - update_alias_description.description == "Description 1"
+ - '"function_version" in update_alias_description'
+ - update_alias_description.function_version == "$LATEST"
+ - '"name" in update_alias_description'
+ - update_alias_description.name == "Testing"
+ - '"revision_id" in update_alias_description'
+ # The revision_id doesn't line up with the revision IDs of the versions
+ # It will change any time the alias is updated
+
+ - name: Update an alias description - idempotency (check mode)
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ description: 'Description 1'
+ check_mode: True
+ register: update_alias_description
+ - name: Check not changed
+ assert:
+ that:
+ - update_alias_description is not changed
+
+ - name: Update an alias description - idempotecy
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ description: 'Description 1'
+ register: update_alias_description
+ - name: Check not changed
+ assert:
+ that:
+ - update_alias_description is not changed
+ - '"alias_arn" in update_alias_description'
+ - update_alias_description.alias_arn.startswith(lambda_arn)
+ - update_alias_description.alias_arn.endswith("Testing")
+ - '"description" in update_alias_description'
+ - update_alias_description.description == "Description 1"
+ - '"function_version" in update_alias_description'
+ - update_alias_description.function_version == "$LATEST"
+ - '"name" in update_alias_description'
+ - update_alias_description.name == "Testing"
+ - '"revision_id" in update_alias_description'
+ # The revision_id doesn't line up with the revision IDs of the versions
+ # It will change any time the alias is updated
+
+ # ==============================================================
+ # Update description of an alias when one set to start
+ - name: Update an alias description again (check mode)
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ description: 'description 2'
+ check_mode: True
+ register: update_alias_description
+ - name: Check changed
+ assert:
+ that:
+ - update_alias_description is changed
+
+ - name: Update an alias description again
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ description: 'description 2'
+ register: update_alias_description
+ - name: Check changed and returned values
+ assert:
+ that:
+ - update_alias_description is changed
+ - '"alias_arn" in update_alias_description'
+ - update_alias_description.alias_arn.startswith(lambda_arn)
+ - update_alias_description.alias_arn.endswith("Testing")
+ - '"description" in update_alias_description'
+ - update_alias_description.description == "description 2"
+ - '"function_version" in update_alias_description'
+ - update_alias_description.function_version == "$LATEST"
+ - '"name" in update_alias_description'
+ - update_alias_description.name == "Testing"
+ - '"revision_id" in update_alias_description'
+ # The revision_id doesn't line up with the revision IDs of the versions
+ # It will change any time the alias is updated
+
+ # ==============================================================
+ # Update version of an alias
+ - name: Update an alias version (check mode)
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ function_version: 1
+ check_mode: True
+ register: update_alias_version
+ - name: Check changed
+ assert:
+ that:
+ - update_alias_version is changed
+
+ - name: Update an alias version
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ function_version: 1
+ register: update_alias_version
+ - name: Check changed and returned values
+ assert:
+ that:
+ - update_alias_version is changed
+ - '"alias_arn" in update_alias_version'
+ - update_alias_version.alias_arn.startswith(lambda_arn)
+ - update_alias_version.alias_arn.endswith("Testing")
+ - '"description" in update_alias_version'
+ - update_alias_version.description == "description 2"
+ - '"function_version" in update_alias_version'
+ - update_alias_version.function_version == "1"
+ - '"name" in update_alias_version'
+ - update_alias_version.name == "Testing"
+ - '"revision_id" in update_alias_version'
+ # The revision_id doesn't line up with the revision IDs of the versions
+ # It will change any time the alias is updated
+
+ - name: Update an alias version - idempotency (check mode)
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ function_version: 1
+ check_mode: True
+ register: update_alias_version
+ - name: Check not changed
+ assert:
+ that:
+ - update_alias_version is not changed
+
+ - name: Update an alias version - idempotecy
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ function_version: 1
+ register: update_alias_version
+ - name: Check not changed
+ assert:
+ that:
+ - update_alias_version is not changed
+ - '"alias_arn" in update_alias_version'
+ - update_alias_version.alias_arn.startswith(lambda_arn)
+ - update_alias_version.alias_arn.endswith("Testing")
+ - '"description" in update_alias_version'
+ - update_alias_version.description == "description 2"
+ - '"function_version" in update_alias_version'
+ - update_alias_version.function_version == "1"
+ - '"name" in update_alias_version'
+ - update_alias_version.name == "Testing"
+ - '"revision_id" in update_alias_version'
+ # The revision_id doesn't line up with the revision IDs of the versions
+ # It will change any time the alias is updated
+
+ - name: Update an alias version to implied LATEST (check mode)
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ # docs state that when not defined defaults to LATEST
+ #function_version: 1
+ check_mode: True
+ register: update_alias_version
+ - name: Check changed
+ assert:
+ that:
+ - update_alias_version is changed
+
+ - name: Update an alias version to implied LATEST
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ # docs state that when not defined defaults to LATEST
+ #function_version: 1
+ register: update_alias_version
+ - name: Check changed and returned values
+ assert:
+ that:
+ - update_alias_version is changed
+ - '"alias_arn" in update_alias_version'
+ - update_alias_version.alias_arn.startswith(lambda_arn)
+ - update_alias_version.alias_arn.endswith("Testing")
+ - '"description" in update_alias_version'
+ - update_alias_version.description == "description 2"
+ - '"function_version" in update_alias_version'
+ - update_alias_version.function_version == "$LATEST"
+ - '"name" in update_alias_version'
+ - update_alias_version.name == "Testing"
+ - '"revision_id" in update_alias_version'
+ # The revision_id doesn't line up with the revision IDs of the versions
+ # It will change any time the alias is updated
+
+ # Make sure that 0 also causes a change
+ - name: Update an alias version
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ function_version: 1
+ register: update_alias_version
+ - name: Check not changed
+ assert:
+ that:
+ - update_alias_version is changed
+ - '"alias_arn" in update_alias_version'
+ - update_alias_version.alias_arn.startswith(lambda_arn)
+ - update_alias_version.alias_arn.endswith("Testing")
+ - '"description" in update_alias_version'
+ - update_alias_version.description == "description 2"
+ - '"function_version" in update_alias_version'
+ - update_alias_version.function_version == "1"
+ - '"name" in update_alias_version'
+ - update_alias_version.name == "Testing"
+ - '"revision_id" in update_alias_version'
+ # The revision_id doesn't line up with the revision IDs of the versions
+ # It will change any time the alias is updated
+
+ - name: Update an alias version to explicit LATEST with 0 (check mode)
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ function_version: 0
+ check_mode: True
+ register: update_alias_version
+ - name: Check changed
+ assert:
+ that:
+ - update_alias_version is changed
+
+ - name: Update an alias version to explicit LATEST with 0
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ function_version: 0
+ register: update_alias_version
+ - name: Check changed and returned values
+ assert:
+ that:
+ - update_alias_version is changed
+ - '"alias_arn" in update_alias_version'
+ - update_alias_version.alias_arn.startswith(lambda_arn)
+ - update_alias_version.alias_arn.endswith("Testing")
+ - '"description" in update_alias_version'
+ - update_alias_version.description == "description 2"
+ - '"function_version" in update_alias_version'
+ - update_alias_version.function_version == "$LATEST"
+ - '"name" in update_alias_version'
+ - update_alias_version.name == "Testing"
+ - '"revision_id" in update_alias_version'
+ # The revision_id doesn't line up with the revision IDs of the versions
+ # It will change any time the alias is updated
+
+ - name: Update an alias version to explicit LATEST with 0 - idempotency (check mode)
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ function_version: 0
+ check_mode: True
+ register: update_alias_version
+ - name: Check changed
+ assert:
+ that:
+ - update_alias_version is not changed
+
+ - name: Update an alias version to explicit LATEST with 0 - idempotecy
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ function_version: 0
+ register: update_alias_version
+ - name: Check changed and returned values
+ assert:
+ that:
+ - update_alias_version is not changed
+ - '"alias_arn" in update_alias_version'
+ - update_alias_version.alias_arn.startswith(lambda_arn)
+ - update_alias_version.alias_arn.endswith("Testing")
+ - '"description" in update_alias_version'
+ - update_alias_version.description == "description 2"
+ - '"function_version" in update_alias_version'
+ - update_alias_version.function_version == "$LATEST"
+ - '"name" in update_alias_version'
+ - update_alias_version.name == "Testing"
+ - '"revision_id" in update_alias_version'
+ # The revision_id doesn't line up with the revision IDs of the versions
+ # It will change any time the alias is updated
+
+ # ==============================================================
+ # Creation of an alias with all options
+ - name: Create an alias with all options (check mode)
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ description: 'Hello world'
+ name: stable
+ function_version: 1
+ check_mode: True
+ register: create_alias
+ - name: Check changed
+ assert:
+ that:
+ - create_alias is changed
+
+ - name: Create an alias with all options
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ description: 'Hello world'
+ name: stable
+ function_version: 1
+ register: create_alias
+ - name: Check changed and returned values
+ assert:
+ that:
+ - create_alias is changed
+ - '"alias_arn" in create_alias'
+ - create_alias.alias_arn.startswith(lambda_arn)
+ - create_alias.alias_arn.endswith("stable")
+ - '"description" in create_alias'
+ - create_alias.description == "Hello world"
+ - '"function_version" in create_alias'
+ - create_alias.function_version == "1"
+ - '"name" in create_alias'
+ - create_alias.name == "stable"
+ - '"revision_id" in create_alias'
+ # The revision_id doesn't line up with the revision IDs of the versions
+ # It will change any time the alias is updated
+
+ - name: Create an alias with all options - idempotency (check mode)
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ description: 'Hello world'
+ name: stable
+ function_version: 1
+ check_mode: True
+ register: create_alias
+ - name: Check not changed
+ assert:
+ that:
+ - create_alias is not changed
+
+ - name: Create an alias wth all options - idempotecy
+ lambda_alias:
+ state: present
+ function_name: '{{ lambda_function_name }}'
+ description: 'Hello world'
+ name: stable
+ function_version: 1
+ register: create_alias
+ - name: Check not changed
+ assert:
+ that:
+ - create_alias is not changed
+ - '"alias_arn" in create_alias'
+ - create_alias.alias_arn.startswith(lambda_arn)
+ - create_alias.alias_arn.endswith("stable")
+ - '"description" in create_alias'
+ - create_alias.description == "Hello world"
+ - '"function_version" in create_alias'
+ - create_alias.function_version == "1"
+ - '"name" in create_alias'
+ - create_alias.name == "stable"
+ - '"revision_id" in create_alias'
+ # The revision_id doesn't line up with the revision IDs of the versions
+ # It will change any time the alias is updated
+
+ # ==============================================================
+ # Deletion of an alias
+ - name: Delete an alias (check mode)
+ lambda_alias:
+ state: absent
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ check_mode: True
+ register: delete_alias
+ - name: Check changed
+ assert:
+ that:
+ - delete_alias is changed
+
+ - name: Delete an alias
+ lambda_alias:
+ state: absent
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ register: delete_alias
+ - name: Check changed
+ assert:
+ that:
+ - delete_alias is changed
+
+ - name: Delete an alias - idempotency (check mode)
+ lambda_alias:
+ state: absent
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ check_mode: True
+ register: delete_alias
+ - name: Check not changed
+ assert:
+ that:
+ - delete_alias is not changed
+
+ - name: Delete an alias - idempotecy
+ lambda_alias:
+ state: absent
+ function_name: '{{ lambda_function_name }}'
+ name: Testing
+ register: delete_alias
+ - name: Check not changed
+ assert:
+ that:
+ - delete_alias is not changed
+
+ # ==============================================================
+ # Cleanup
+ always:
+ - name: ensure function is absent at end of test
+ lambda:
+ name: '{{lambda_function_name}}'
+ state: absent
+ ignore_errors: true
+ - name: ensure role has been removed at end of test
+ iam_role:
+ name: '{{ lambda_role_name }}'
+ state: absent
+ delete_instance_profile: True
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/defaults/main.yml
new file mode 100644
index 000000000..200b6b4ba
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/defaults/main.yml
@@ -0,0 +1,10 @@
+# defaults file for lambda integration test
+# IAM role names have to be less than 64 characters
+# we hash the resource_prefix to get a shorter, unique string
+lambda_function_name: 'test-lambda-{{ tiny_prefix }}'
+lambda_role_name: ansible-test-{{ tiny_prefix }}-lambda
+
+dynamodb_table_name: ansible-test-{{ tiny_prefix }}
+
+lambda_python_runtime: python3.9
+lambda_python_handler: mini_lambda.handler
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/mini_lambda.py b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/mini_lambda.py
new file mode 100644
index 000000000..901f6b55a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/mini_lambda.py
@@ -0,0 +1,48 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+
+def handler(event, context):
+ """
+ The handler function is the function which gets called each time
+ the lambda is run.
+ """
+ # printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find
+ # the log entry.
+ print("got event:\n" + json.dumps(event))
+
+ # if the name parameter isn't present this can throw an exception
+ # which will result in an amazon chosen failure from the lambda
+ # which can be completely fine.
+
+ name = event["name"]
+
+ # we can use environment variables as part of the configuration of the lambda
+ # which can change the behaviour of the lambda without needing a new upload
+
+ extra = os.environ.get("EXTRA_MESSAGE")
+ if extra is not None and len(extra) > 0:
+ greeting = "hello {0}. {1}".format(name, extra)
+ else:
+ greeting = "hello " + name
+
+ return {"message": greeting}
+
+
+def main():
+ """
+ This main function will normally never be called during normal
+ lambda use. It is here for testing the lambda program only.
+ """
+ event = {"name": "james"}
+ context = None
+ print(handler(event, context))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/minimal_trust_policy.json b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/minimal_trust_policy.json
new file mode 100644
index 000000000..fb84ae9de
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/minimal_trust_policy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "lambda.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/meta/main.yml
new file mode 100644
index 000000000..463f90ed0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/meta/main.yml
@@ -0,0 +1,5 @@
+dependencies:
+- role: setup_remote_tmp_dir
+- role: setup_botocore_pip
+ vars:
+ botocore_version: 1.21.51 \ No newline at end of file
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml
new file mode 100644
index 000000000..349ee41ac
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml
@@ -0,0 +1,117 @@
+- name: set connection information for AWS modules and run tests
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ collections:
+ - community.general
+ block:
+
+ - name: Create test resources setup
+ import_tasks: setup.yml
+
+# TEST CREATE LAMBDA EVENT ========================================================================================
+ - name: Create DynamoDB stream event mapping (trigger) - check_mode
+ amazon.aws.lambda_event:
+ state: present
+ event_source: stream
+ function_arn: '{{ lambda_function_arn }}'
+ source_params:
+ source_arn: '{{ dynamo_stream_arn }}'
+ enabled: True
+ batch_size: 500
+ starting_position: LATEST
+ function_response_types:
+ - ReportBatchItemFailures
+ check_mode: true
+ register: create_lambda_event_result
+
+ - assert:
+ that:
+ - create_lambda_event_result is changed
+ - create_lambda_event_result is not failed
+ - '"lambda:CreateEventSourceMapping" not in create_lambda_event_result.resource_actions'
+
+ - name: Create DynamoDB stream event mapping (trigger)
+ amazon.aws.lambda_event:
+ state: present
+ event_source: stream
+ function_arn: '{{ lambda_function_arn }}'
+ source_params:
+ source_arn: '{{ dynamo_stream_arn }}'
+ enabled: True
+ batch_size: 500
+ starting_position: LATEST
+ function_response_types:
+ - ReportBatchItemFailures
+ register: create_lambda_event_result
+
+ - name: Get info on above trigger
+ command: 'aws lambda get-event-source-mapping --uuid {{ create_lambda_event_result.events.uuid }}'
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
+ AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
+ AWS_DEFAULT_REGION: "{{ aws_region }}"
+ register: lambda_function_details
+
+ - name: convert it to an object
+ set_fact:
+ lambda_function_details_obj: "{{ lambda_function_details.stdout | from_json }}"
+
+ - assert:
+ that:
+ - lambda_function_details_obj.FunctionResponseTypes is defined
+ - lambda_function_details_obj.FunctionResponseTypes | length > 0
+ - lambda_function_details_obj.FunctionResponseTypes[0] == "ReportBatchItemFailures"
+ - '"lambda:CreateEventSourceMapping" in create_lambda_event_result.resource_actions'
+
+ - name: Create DynamoDB stream event mapping (trigger) - check_mode - idempotency
+ amazon.aws.lambda_event:
+ state: present
+ event_source: stream
+ function_arn: '{{ lambda_function_arn }}'
+ source_params:
+ source_arn: '{{ dynamo_stream_arn }}'
+ enabled: True
+ batch_size: 500
+ starting_position: LATEST
+ function_response_types:
+ - ReportBatchItemFailures
+ check_mode: true
+ register: create_lambda_event_result
+
+ - assert:
+ that:
+ - create_lambda_event_result is not changed
+ - create_lambda_event_result is not failed
+ - '"lambda:CreateEventSourceMapping" not in create_lambda_event_result.resource_actions'
+
+ - name: Create DynamoDB stream event mapping (trigger) - idempotency
+ amazon.aws.lambda_event:
+ state: present
+ event_source: stream
+ function_arn: '{{ lambda_function_arn }}'
+ source_params:
+ source_arn: '{{ dynamo_stream_arn }}'
+ enabled: True
+ batch_size: 500
+ starting_position: LATEST
+ function_response_types:
+ - ReportBatchItemFailures
+ register: create_lambda_event_result
+
+ - assert:
+ that:
+ - create_lambda_event_result is not changed
+ - create_lambda_event_result is not failed
+ - '"lambda:CreateEventSourceMapping" not in create_lambda_event_result.resource_actions'
+
+
+# ========================================================================================
+
+ always:
+ - name: Clean up test resources setup
+ import_tasks: teardown.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml
new file mode 100644
index 000000000..df9b4ce1d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml
@@ -0,0 +1,83 @@
+---
+- debug: msg="Starting test setup......"
+
+# CREATE DYNAMO DB TABLE
+- name: Create minimal dynamo table
+ community.aws.dynamodb_table:
+ name: "{{ dynamodb_table_name }}"
+ hash_key_name: id
+ hash_key_type: STRING
+ tags:
+ Usage: Created By Integration Test
+ register: create_table_result
+
+# ENABLE DYNAMODB STREAM AND GET STREAM ARN
+- name: Enable DynamoDB stream (currently not supported by community.aws.dynamodb_table)
+ command: aws dynamodb update-table --table-name "{{ dynamodb_table_name }}" --stream-specification StreamEnabled=True,StreamViewType=KEYS_ONLY
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
+ AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
+ AWS_DEFAULT_REGION: "{{ aws_region }}"
+ register: enable_stream_result
+- name: convert it to an object
+ set_fact:
+ enable_stream_result: "{{ enable_stream_result.stdout | from_json }}"
+- name: Get DynamoDB stream ARN
+ set_fact:
+ dynamo_stream_arn: "{{ enable_stream_result.TableDescription.LatestStreamArn }}"
+
+# CREATE MINIMAL LAMBDA FUNCTION
+# https://github.com/ansible/ansible/issues/77257
+- name: Set async_dir for HOME env
+ ansible.builtin.set_fact:
+ ansible_async_dir: "{{ lookup('env', 'HOME') }}/.ansible_async_{{ tiny_prefix }}/"
+ when: (lookup('env', 'HOME'))
+
+- name: create minimal lambda role
+ iam_role:
+ name: '{{ lambda_role_name }}'
+ assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json")}}'
+ create_instance_profile: false
+ managed_policies:
+ - arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess
+ - arn:aws:iam::aws:policy/AWSLambdaInvocation-DynamoDB
+ - arn:aws:iam::aws:policy/service-role/AWSLambdaDynamoDBExecutionRole
+ register: iam_role
+- name: wait 10 seconds for role to become available
+ pause:
+ seconds: 10
+ when: iam_role.changed
+
+- name: move lambda into place for archive module
+ copy:
+ src: mini_lambda.py
+ dest: '{{ output_dir }}/mini_lambda.py'
+ mode: preserve
+- name: bundle lambda into a zip
+ register: zip_res
+ archive:
+ format: zip
+ path: '{{ output_dir }}/mini_lambda.py'
+ dest: '{{ output_dir }}/mini_lambda.zip'
+
+- name: test state=present - upload the lambda
+ lambda:
+ name: '{{ lambda_function_name }}'
+ runtime: '{{ lambda_python_runtime }}'
+ handler: '{{ lambda_python_handler }}'
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{ zip_res.dest }}'
+ architecture: x86_64
+ register: result
+ vars:
+ ansible_python_interpreter: '{{ botocore_virtualenv_interpreter }}'
+
+- name: assert lambda upload succeeded
+ assert:
+ that:
+ - result.changed
+
+- name: Get lambda function ARN
+ ansible.builtin.set_fact:
+ lambda_function_arn: "{{ result.configuration.function_arn }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml
new file mode 100644
index 000000000..8b566aa7f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml
@@ -0,0 +1,33 @@
+---
+- debug: msg="Starting test Teardown......"
+
+- name: Delete DynamoDB stream event mapping (trigger)
+ amazon.aws.lambda_event:
+ state: absent
+ event_source: stream
+ function_arn: '{{ lambda_function_arn }}'
+ source_params:
+ source_arn: "{{ dynamo_stream_arn }}"
+ enabled: True
+ batch_size: 500
+ starting_position: LATEST
+ function_response_types:
+ - ReportBatchItemFailures
+ register: create_lambda_event_result
+ ignore_errors: true
+
+- name: Delete lambda function
+ lambda:
+ name: '{{ lambda_function_name }}'
+ state: absent
+
+- name: Delete dynamo table
+ community.aws.dynamodb_table:
+ name: "{{ dynamodb_table_name }}"
+ state: absent
+
+- name: Delete the role
+ community.aws.iam_role:
+ name: '{{ lambda_role_name }}'
+ assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json")}}'
+ state: absent
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/aliases
new file mode 100644
index 000000000..d026dde5e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+lambda_layer_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/defaults/main.yml
new file mode 100644
index 000000000..4f6cbf245
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+lambda_hander_content: |
+ # Copyright: Ansible Project
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+ import logging
+ from datetime import datetime
+ logger = logging.getLogger()
+ logger.setLevel(logging.INFO)
+ def lambda_handler(event, context):
+ logger.info('Ansible amazon.aws collection lambda handler executed at {0}'.format(datetime.now().strftime("%y%m%d-%H%M%S")))
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/tasks/main.yml
new file mode 100644
index 000000000..8d511f00a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/tasks/main.yml
@@ -0,0 +1,248 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key | default(omit) }}'
+ aws_secret_key: '{{ aws_secret_key | default(omit) }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region | default(omit) }}'
+
+ collections:
+ - amazon.aws
+
+ vars:
+ s3_bucket_name: "{{ resource_prefix }}-bucket"
+ s3_bucket_object: "{{ resource_prefix }}-obj-1"
+ layer_name: "{{ resource_prefix }}-layer"
+
+ block:
+ - name: Create temporary directory
+ tempfile:
+ state: directory
+ suffix: .lambda_handler
+ register: _dir
+
+ - copy:
+ content: "{{ lambda_hander_content }}"
+ dest: "{{ _dir.path }}/lambda_handler.py"
+ remote_src: true
+
+ - set_fact:
+ zip_file_path: "{{ _dir.path }}/lambda_handler.zip"
+
+ - name: Create lambda handler archive
+ archive:
+ path: "{{ _dir.path }}/lambda_handler.py"
+ dest: "{{ zip_file_path }}"
+ format: zip
+
+ - name: Create S3 bucket for testing
+ s3_bucket:
+ name: "{{ s3_bucket_name }}"
+ state: present
+
+ - name: add object into bucket
+ s3_object:
+ bucket: "{{ s3_bucket_name }}"
+ mode: put
+ object: "{{ s3_bucket_object }}"
+ # permission: public-read # Commented on because botocore.exceptions.ClientError: An error occurred (AccessControlListNotSupported) when calling the PutObject operation: The bucket does not allow ACLs
+ src: "{{ zip_file_path }}"
+
+ - name: Create lambda layer (check_mode=true)
+ lambda_layer:
+ name: "{{ layer_name }}"
+ description: '{{ resource_prefix }} lambda layer first version'
+ content:
+ zip_file: "{{ zip_file_path }}"
+ compatible_runtimes:
+ - python3.7
+ license_info: GPL-3.0-only
+ register: create_check_mode
+ check_mode: true
+
+ - name: Retrieve all layers versions
+ lambda_layer_info:
+ name: "{{ layer_name }}"
+ register: layers
+
+ - name: Ensure lambda layer was not created
+ assert:
+ that:
+ - create_check_mode is changed
+ - create_check_mode.msg == "Create operation skipped - running in check mode"
+ - layers.layers_versions | length == 0
+
+ - name: Create lambda layer (first version)
+ lambda_layer:
+ name: "{{ layer_name }}"
+ description: '{{ resource_prefix }} lambda layer first version'
+ content:
+ zip_file: "{{ zip_file_path }}"
+ compatible_runtimes:
+ - python3.7
+ license_info: GPL-3.0-only
+ register: first_version
+
+ - name: Create another lambda layer version
+ lambda_layer:
+ name: "{{ layer_name }}"
+ description: '{{ resource_prefix }} lambda layer second version'
+ content:
+ s3_bucket: "{{ s3_bucket_name }}"
+ s3_key: "{{ s3_bucket_object }}"
+ compatible_runtimes:
+ - python3.7
+ license_info: GPL-3.0-only
+ register: last_version
+
+ - name: Retrieve all layers with latest version
+ lambda_layer_info:
+ register: layers
+
+ - name: Ensure layer created above was found
+ assert:
+ that:
+ - '"layers_versions" in layers'
+ - first_version.layer_versions | length == 1
+ - last_version.layer_versions | length == 1
+ - last_version.layer_versions.0.layer_arn in layers_arns
+ - last_version.layer_versions.0.layer_version_arn in layers_version_arns
+ - first_version.layer_versions.0.layer_version_arn not in layers_version_arns
+ vars:
+ layers_arns: '{{ layers.layers_versions | map(attribute="layer_arn") | list }}'
+ layers_version_arns: '{{ layers.layers_versions | map(attribute="layer_version_arn") | list }}'
+
+ - name: Retrieve all layers versions
+ lambda_layer_info:
+ name: "{{ layer_name }}"
+ register: layers
+
+ - name: Ensure layer created above was found
+ assert:
+ that:
+ - '"layers_versions" in layers'
+ - layers.layers_versions | length == 2
+ - first_version.layer_versions | length == 1
+ - last_version.layer_versions | length == 1
+ - last_version.layer_versions.0.layer_version_arn in layers_version_arns
+ - first_version.layer_versions.0.layer_version_arn in layers_version_arns
+ vars:
+ layers_version_arns: '{{ layers.layers_versions | map(attribute="layer_version_arn") | list }}'
+
+ - name: Delete latest layer version
+ lambda_layer:
+ name: "{{ layer_name }}"
+ version: "{{ last_version.layer_versions.0.version }}"
+ state: absent
+ check_mode: true
+ register: delete_check_mode
+
+ - name: Retrieve all layers versions
+ lambda_layer_info:
+ name: "{{ layer_name }}"
+ register: layers
+
+ - name: Ensure no layer version was deleted
+ assert:
+ that:
+ - delete_check_mode is changed
+ - delete_check_mode.layer_versions | length == 1
+ - layers.layers_versions | length == 2
+ - last_version.layer_versions.0.layer_version_arn in layers_version_arns
+ - first_version.layer_versions.0.layer_version_arn in layers_version_arns
+ vars:
+ layers_version_arns: '{{ layers.layers_versions | map(attribute="layer_version_arn") | list }}'
+
+ - name: Delete latest layer version
+ lambda_layer:
+ name: "{{ layer_name }}"
+ version: "{{ last_version.layer_versions.0.version }}"
+ state: absent
+ register: delete_layer
+
+ - name: Retrieve all layers versions
+ lambda_layer_info:
+ name: "{{ layer_name }}"
+ register: layers
+
+ - name: Ensure latest layer version was deleted
+ assert:
+ that:
+ - delete_layer is changed
+ - delete_layer.layer_versions | length == 1
+ - layers.layers_versions | length == 1
+ - last_version.layer_versions.0.layer_version_arn not in layers_version_arns
+ - first_version.layer_versions.0.layer_version_arn in layers_version_arns
+ vars:
+ layers_version_arns: '{{ layers.layers_versions | map(attribute="layer_version_arn") | list }}'
+
+ - name: Delete again the latest layer version (idempotency)
+ lambda_layer:
+ name: "{{ layer_name }}"
+ version: "{{ last_version.layer_versions.0.version }}"
+ state: absent
+ register: delete_idempotent
+
+ - name: Ensure nothing changed
+ assert:
+ that:
+ - delete_idempotent is not changed
+
+ - name: Create multiple lambda layer versions
+ lambda_layer:
+ name: "{{ layer_name }}"
+ description: '{{ resource_prefix }} lambda layer version compatible with python3.{{ item }}'
+ content:
+ s3_bucket: "{{ s3_bucket_name }}"
+ s3_key: "{{ s3_bucket_object }}"
+ compatible_runtimes:
+ - "python3.{{ item }}"
+ license_info: GPL-3.0-only
+ with_items: ["9", "10"]
+
+ - name: Delete all layer versions
+ lambda_layer:
+ name: "{{ layer_name }}"
+ version: -1
+ state: absent
+ register: delete_layer
+
+ - name: Retrieve all layers versions
+ lambda_layer_info:
+ name: "{{ layer_name }}"
+ register: layers
+
+ - name: Ensure layer does not exist anymore
+ assert:
+ that:
+ - delete_layer is changed
+ - delete_layer.layer_versions | length > 1
+ - layers.layers_versions | length == 0
+
+ always:
+ - name: Delete lambda layer if not deleted during testing
+ lambda_layer:
+ name: "{{ layer_name }}"
+ version: -1
+ state: absent
+ ignore_errors: true
+
+ - name: Delete temporary directory
+ file:
+ state: absent
+ path: "{{ _dir.path }}"
+ ignore_errors: true
+
+ - name: Remove object from bucket
+ s3_object:
+ bucket: "{{ s3_bucket_name }}"
+ mode: delobj
+ object: "{{ s3_bucket_object }}"
+ ignore_errors: true
+
+ - name: Delete S3 bucket
+ s3_bucket:
+ name: "{{ s3_bucket_name }}"
+ force: true
+ state: absent
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/defaults/main.yml
new file mode 100644
index 000000000..4f4252fa0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# defaults file for lambda_policy integration test
+# IAM role names have to be less than 64 characters
+# we hash the resource_prefix to get a shorter, unique string
+lambda_function_name: '{{ tiny_prefix }}-api-endpoint'
+lambda_role_name: 'ansible-test-{{ tiny_prefix }}-lambda-policy'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/mini_http_lambda.py b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/mini_http_lambda.py
new file mode 100644
index 000000000..caccac908
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/mini_http_lambda.py
@@ -0,0 +1,40 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def handler(event, context):
+ """
+ The handler function is the function which gets called each time
+ the lambda is run.
+ """
+ # printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find
+ # the log entry.
+ print("got event:\n" + json.dumps(event))
+
+ # if the name parameter isn't present this can throw an exception
+ # which will result in an amazon chosen failure from the lambda
+ # which can be completely fine.
+
+ name = event["pathParameters"]["greet_name"]
+
+ return {"statusCode": 200,
+ "body": 'hello: "' + name + '"',
+ "headers": {}}
+
+
+def main():
+ """
+ This main function will normally never be called during normal
+ lambda use. It is here for testing the lambda program only.
+ """
+ event = {"name": "james"}
+ context = None
+ print(handler(event, context))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/minimal_trust_policy.json b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/minimal_trust_policy.json
new file mode 100644
index 000000000..fb84ae9de
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/minimal_trust_policy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "lambda.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml
new file mode 100644
index 000000000..e0b514bde
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml
@@ -0,0 +1,144 @@
+- name: Integration testing for lambda_policy
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ collections:
+ - community.general
+ - amazon.aws
+
+ block:
+ - name: create minimal lambda role
+ iam_role:
+ name: '{{ lambda_role_name }}'
+ assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json") }}'
+ create_instance_profile: false
+ managed_policies:
+ - 'arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess'
+ register: iam_role
+ - name: wait 10 seconds for role to become available
+ pause:
+ seconds: 10
+ when: iam_role.changed
+
+ - name: test with no parameters
+ lambda_policy: null
+ register: result
+ ignore_errors: true
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - result.failed
+ - 'result.msg.startswith("missing required arguments: ")'
+ - '"action" in result.msg'
+ - '"function_name" in result.msg'
+ - '"principal" in result.msg'
+ - '"statement_id" in result.msg'
+
+ - name: move lambda into place for archive module
+ copy:
+ src: mini_http_lambda.py
+ dest: '{{ output_dir }}/mini_http_lambda.py'
+ mode: preserve
+ - name: bundle lambda into a zip
+ register: zip_res
+ archive:
+ format: zip
+ path: '{{ output_dir }}/mini_http_lambda.py'
+ dest: '{{ output_dir }}/mini_http_lambda.zip'
+ - name: create minimal lambda role
+ iam_role:
+ name: ansible_lambda_role
+ assume_role_policy_document: '{{ lookup(''file'', ''minimal_trust_policy.json'', convert_data=False) }}'
+ create_instance_profile: false
+ register: iam_role
+ - name: wait 10 seconds for role to become available
+ pause:
+ seconds: 10
+ when: iam_role.changed
+ - name: test state=present - upload the lambda
+ lambda:
+ name: '{{lambda_function_name}}'
+ runtime: python3.9
+ handler: mini_http_lambda.handler
+ role: '{{ lambda_role_name }}'
+ zip_file: '{{zip_res.dest}}'
+ register: lambda_result
+ - name: get the aws account ID for use in future commands
+ aws_caller_info: {}
+ register: aws_caller_info
+ - name: register lambda uri for use in template
+ set_fact:
+ mini_lambda_uri: arn:aws:apigateway:{{ aws_region }}:lambda:path/2015-03-31/functions/arn:aws:lambda:{{ aws_region }}:{{ aws_caller_info.account }}:function:{{ lambda_result.configuration.function_name }}/invocations
+ - name: build API file
+ template:
+ src: endpoint-test-swagger-api.yml.j2
+ dest: '{{output_dir}}/endpoint-test-swagger-api.yml.j2'
+ - name: deploy new API
+ aws_api_gateway:
+ api_file: '{{output_dir}}/endpoint-test-swagger-api.yml.j2'
+ stage: lambdabased
+ register: create_result
+ - name: register api id for later
+ set_fact:
+ api_id: '{{ create_result.api_id }}'
+ - name: check API fails with permissions failure
+ uri:
+ url: https://{{create_result.api_id}}.execute-api.{{aws_region}}.amazonaws.com/lambdabased/mini/Mr_Ansible_Tester
+ register: unauth_uri_result
+ ignore_errors: true
+ - name: assert internal server error due to permissions
+ assert:
+ that:
+ - unauth_uri_result is failed
+ - unauth_uri_result.status == 500
+ - name: give api gateway execute permissions on lambda
+ lambda_policy:
+ function_name: '{{ lambda_function_name }}'
+ state: present
+ statement_id: api-gateway-invoke-lambdas
+ action: lambda:InvokeFunction
+ principal: apigateway.amazonaws.com
+ source_arn: arn:aws:execute-api:{{ aws_region }}:{{ aws_caller_info.account }}:*/*
+ - name: try again but with ARN
+ lambda_policy:
+ function_name: '{{ lambda_result.configuration.function_arn }}'
+ state: present
+ statement_id: api-gateway-invoke-lambdas
+ action: lambda:InvokeFunction
+ principal: apigateway.amazonaws.com
+ source_arn: arn:aws:execute-api:{{ aws_region }}:{{ aws_caller_info.account }}:*/*
+ - name: check API works with execute permissions
+ uri:
+ url: https://{{create_result.api_id}}.execute-api.{{aws_region}}.amazonaws.com/lambdabased/mini/Mr_Ansible_Tester
+ register: uri_result
+ - name: assert API works success
+ assert:
+ that:
+ - uri_result
+ - name: deploy new API
+ aws_api_gateway:
+ api_file: '{{output_dir}}/endpoint-test-swagger-api.yml.j2'
+ stage: lambdabased
+ register: create_result
+ ignore_errors: true
+ always:
+ - name: destroy lambda for test cleanup if created
+ lambda:
+ name: '{{lambda_function_name}}'
+ state: absent
+ register: result
+ ignore_errors: true
+ - name: destroy API for test cleanup if created
+ aws_api_gateway:
+ state: absent
+ api_id: '{{api_id}}'
+ register: destroy_result
+ ignore_errors: true
+ - name: Clean up test role
+ iam_role:
+ name: '{{ lambda_role_name }}'
+ state: absent
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j2
new file mode 100644
index 000000000..d62188477
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j2
@@ -0,0 +1,39 @@
+---
+swagger: "2.0"
+info:
+ version: "2017-05-11T12:14:59Z"
+ title: "{{resource_prefix}}LambdaBased_API"
+host: "fakeexample.execute-api.us-east-1.amazonaws.com"
+basePath: "/lambdabased"
+schemes:
+- "https"
+paths:
+ /mini/{greet_name}:
+ get:
+ produces:
+ - "application/json"
+ parameters:
+ - name: "greet_name"
+ in: "path"
+ required: true
+ type: "string"
+ responses:
+ 200:
+ description: "200 response"
+ schema:
+ $ref: "#/definitions/Empty"
+ x-amazon-apigateway-integration:
+ responses:
+ default:
+ statusCode: "200"
+ uri: "{{mini_lambda_uri}}"
+ requestTemplates:
+ application/json: "{\"statusCode\": 200}"
+ passthroughBehavior: "when_no_match"
+ httpMethod: "POST"
+ contentHandling: "CONVERT_TO_TEXT"
+ type: "aws_proxy"
+definitions:
+ Empty:
+ type: "object"
+ title: "Empty Schema"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/README.md b/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/README.md
new file mode 100644
index 000000000..03b5bdc0d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/README.md
@@ -0,0 +1,5 @@
+## Fake integration suite
+
+This is a fake integration suite including an aliases file listing every module name with missing integration tests (some of them are covered by units).
+
+This fake suite is necessary for the new CI ansible-test-splitter behaviour. Namely, if one of the modules (listed in the aliases file) without a test suite is modified, the CI is run for the entire collection since the ansible-test-splitter won't find any target match. This fake integration suite helps handle this situation by avoiding running the CI for the whole collection. Furthermore, since the modules listed in the aliases file are marked as disabled, tests are automatically skipped. \ No newline at end of file
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/aliases b/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/aliases
new file mode 100644
index 000000000..3d3a12fd6
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/aliases
@@ -0,0 +1,5 @@
+disabled
+
+lambda_event
+rds_instance_snapshot
+rds_snapshot_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/tasks/main.yaml
new file mode 100644
index 000000000..0dcc162b8
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/tasks/main.yaml
@@ -0,0 +1,130 @@
+- set_fact:
+ # As a lookup plugin we don't have access to module_defaults
+ connection_args:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ aws_security_token: "{{ security_token | default(omit) }}"
+ no_log: True
+
+- module_defaults:
+ group/aws:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ block:
+ - name: 'Check for EC2 Classic support (has-ec2-classic)'
+ set_fact:
+ has_ec2_classic: "{{ lookup('amazon.aws.aws_account_attribute',
+ attribute='has-ec2-classic',
+ wantlist=True,
+ **connection_args) }}"
+ - assert:
+ that:
+ - ( has_ec2_classic is sameas true ) or ( has_ec2_classic is sameas false )
+
+ - name: 'Fetch all account attributes (wantlist=True)'
+ set_fact:
+ account_attrs: "{{ lookup('amazon.aws.aws_account_attribute',
+ wantlist=True,
+ **connection_args) }}"
+ - assert:
+ that:
+ # Not guaranteed that there will be a default-vpc
+ - '"default-vpc" in account_attrs'
+ - '"max-elastic-ips" in account_attrs'
+ - account_attrs['max-elastic-ips'][0] | int
+ - '"max-instances" in account_attrs'
+ - account_attrs['max-instances'][0] | int
+ # EC2 and VPC are both valid values, but we can't guarantee which are available
+ - '"supported-platforms" in account_attrs'
+ - account_attrs['supported-platforms'] | difference(['VPC', 'EC2']) | length == 0
+ - '"vpc-max-elastic-ips" in account_attrs'
+ - account_attrs['vpc-max-elastic-ips'][0] | int
+ - '"vpc-max-security-groups-per-interface" in account_attrs'
+ - account_attrs['vpc-max-security-groups-per-interface'][0] | int
+
+ # Not espcially useful, but let's be thorough and leave hints what folks could
+ # expect
+ - name: 'Fetch all account attributes (wantlist=False)'
+ set_fact:
+ account_attrs: "{{ lookup('amazon.aws.aws_account_attribute',
+ wantlist=False,
+ **connection_args) }}"
+ - assert:
+ that:
+ - '"default-vpc" in split_attrs'
+ - '"max-elastic-ips" in split_attrs'
+ - '"max-instances" in split_attrs'
+ - '"supported-platforms" in split_attrs'
+ - '"vpc-max-elastic-ips" in split_attrs'
+ - '"vpc-max-security-groups-per-interface" in split_attrs'
+ vars:
+ split_attrs: '{{ account_attrs.split(",") }}'
+
+ - name: 'Check for Default VPC (default-vpc)'
+ set_fact:
+ default_vpc: "{{ lookup('amazon.aws.aws_account_attribute',
+ attribute='default-vpc',
+ **connection_args) }}"
+ - assert:
+ that:
+ - (default_vpc == "none")
+ or
+ default_vpc.startswith("vpc-")
+
+ - name: 'Check for maximum number of EIPs (max-elastic-ips)'
+ set_fact:
+ max_eips: "{{ lookup('amazon.aws.aws_account_attribute',
+ attribute='max-elastic-ips',
+ **connection_args) }}"
+ - assert:
+ that:
+ - max_eips | int
+
+ - name: 'Check for maximum number of Instances (max-instances)'
+ set_fact:
+ max_instances: "{{ lookup('amazon.aws.aws_account_attribute',
+ attribute='max-instances',
+ **connection_args) }}"
+ - assert:
+ that:
+ - max_instances | int
+
+ - name: 'Check for maximum number of EIPs in a VPC (vpc-max-elastic-ips)'
+ set_fact:
+ vpc_max_eips: "{{ lookup('amazon.aws.aws_account_attribute',
+ attribute='vpc-max-elastic-ips',
+ **connection_args) }}"
+ - assert:
+ that:
+ - vpc_max_eips | int
+
+ - name: 'Check for maximum number of Security Groups per Interface (vpc-max-security-groups-per-interface)'
+ set_fact:
+ max_sg_per_int: "{{ lookup('amazon.aws.aws_account_attribute',
+ attribute='vpc-max-security-groups-per-interface',
+ **connection_args) }}"
+ - assert:
+ that:
+ - max_sg_per_int | int
+
+ - name: 'Check for support of Classic EC2 vs VPC (supported-platforms)'
+ set_fact:
+ supported_plat: "{{ lookup('amazon.aws.aws_account_attribute',
+ attribute='supported-platforms',
+ **connection_args) }}"
+ - assert:
+ that:
+ - supported_plat.split(',') | difference(['VPC', 'EC2']) | length == 0
+
+ - name: 'Check for support of Classic EC2 vs VPC (supported-platforms) (wantlist)'
+ set_fact:
+ supported_plat: "{{ lookup('amazon.aws.aws_account_attribute',
+ attribute='supported-platforms',
+ wantlist=True,
+ **connection_args) }}"
+ - assert:
+ that:
+ - supported_plat | difference(['VPC', 'EC2']) | length == 0
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/tasks/main.yaml
new file mode 100644
index 000000000..a22580e3b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/tasks/main.yaml
@@ -0,0 +1,120 @@
+- set_fact:
+ # As a lookup plugin we don't have access to module_defaults
+ connection_args:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ aws_security_token: "{{ security_token | default(omit) }}"
+ no_log: True
+
+- module_defaults:
+ group/aws:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ collections:
+ - amazon.aws
+ - community.aws
+ block:
+ - name: define secret name
+ set_fact:
+ secret_name: "ansible-test-{{ tiny_prefix }}-secret"
+ secret_value: "{{ lookup('password', '/dev/null chars=ascii_lowercase,digits,punctuation length=16') }}"
+ skip: "skip"
+ warn: "warn"
+
+ - name: lookup missing secret (skip)
+ set_fact:
+ missing_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, on_missing=skip, **connection_args) }}"
+
+ - name: assert that missing_secret is defined
+ assert:
+ that:
+ - missing_secret is defined
+ - missing_secret | list | length == 0
+
+ - name: lookup missing secret (warn)
+ set_fact:
+ missing_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, on_missing=warn, **connection_args) }}"
+
+ - name: assert that missing_secret is defined
+ assert:
+ that:
+ - missing_secret is defined
+ - missing_secret | list | length == 0
+
+ - name: lookup missing secret (error)
+ set_fact:
+ missing_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, **connection_args) }}"
+ ignore_errors: True
+ register: get_missing_secret
+
+ - name: assert that setting the missing_secret failed
+ assert:
+ that:
+ - get_missing_secret is failed
+
+ - name: create secret "{{ secret_name }}"
+ aws_secret:
+ name: "{{ secret_name }}"
+ secret: "{{ secret_value }}"
+ tags:
+ ansible-test: "aws-tests-integration"
+ state: present
+
+ - name: read secret value
+ set_fact:
+ look_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, **connection_args) }}"
+
+ - name: assert that secret was successfully retrieved
+ assert:
+ that:
+ - look_secret == secret_value
+
+ - name: delete secret
+ aws_secret:
+ name: "{{ secret_name }}"
+ state: absent
+ recovery_window: 7
+
+ - name: lookup deleted secret (skip)
+ set_fact:
+ deleted_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, on_deleted=skip, **connection_args) }}"
+
+ - name: assert that deleted_secret is defined
+ assert:
+ that:
+ - deleted_secret is defined
+ - deleted_secret | list | length == 0
+
+ - name: lookup deleted secret (warn)
+ set_fact:
+ deleted_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, on_deleted=warn, **connection_args) }}"
+
+ - name: assert that deleted_secret is defined
+ assert:
+ that:
+ - deleted_secret is defined
+ - deleted_secret | list | length == 0
+
+ - name: lookup deleted secret (error)
+ set_fact:
+ missing_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, **connection_args) }}"
+ ignore_errors: True
+ register: get_deleted_secret
+
+ - name: assert that setting the deleted_secret failed
+ assert:
+ that:
+ - get_deleted_secret is failed
+
+ always:
+
+ # delete secret created
+ - name: delete secret
+ aws_secret:
+ name: "{{ secret_name }}"
+ state: absent
+ recovery_window: 0
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/tasks/main.yaml
new file mode 100644
index 000000000..4599ba19a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/tasks/main.yaml
@@ -0,0 +1,148 @@
+- name: lookup range with no arguments
+ set_fact:
+ no_params: "{{ lookup('amazon.aws.aws_service_ip_ranges') }}"
+
+- name: assert that we're returned a single string
+ assert:
+ that:
+ - no_params is defined
+ - no_params is string
+
+- name: lookup range with wantlist
+ set_fact:
+ want_list: "{{ lookup('amazon.aws.aws_service_ip_ranges', wantlist=True) }}"
+ want_ipv6_list: "{{ lookup('amazon.aws.aws_service_ip_ranges', wantlist=True, ipv6_prefixes=True) }}"
+
+- name: assert that we're returned a list
+ assert:
+ that:
+ - want_list is defined
+ - want_list is iterable
+ - want_list is not string
+ - want_list | length > 1
+ - want_list[0] | ansible.utils.ipv4
+ - want_ipv6_list is defined
+ - want_ipv6_list is iterable
+ - want_ipv6_list is not string
+ - want_ipv6_list | length > 1
+ - want_ipv6_list[0] | ansible.utils.ipv6
+
+
+- name: lookup range with service
+ set_fact:
+ s3_ips: "{{ lookup('amazon.aws.aws_service_ip_ranges', service='S3', wantlist=True) }}"
+ s3_ipv6s: "{{ lookup('amazon.aws.aws_service_ip_ranges', service='S3', wantlist=True, ipv6_prefixes=True) }}"
+
+- name: assert that we're returned a list
+ assert:
+ that:
+ - s3_ips is defined
+ - s3_ips is iterable
+ - s3_ips is not string
+ - s3_ips | length > 1
+ - s3_ips[0] | ansible.utils.ipv4
+ - s3_ipv6s is defined
+ - s3_ipv6s is iterable
+ - s3_ipv6s is not string
+ - s3_ipv6s | length > 1
+ - s3_ipv6s[0] | ansible.utils.ipv6
+
+- name: lookup range with a different service
+ set_fact:
+ route53_ips: "{{ lookup('amazon.aws.aws_service_ip_ranges', service='ROUTE53_HEALTHCHECKS', wantlist=True) }}"
+ route53_ipv6s: "{{ lookup('amazon.aws.aws_service_ip_ranges', service='ROUTE53_HEALTHCHECKS', wantlist=True, ipv6_prefixes=True) }}"
+
+- name: assert that we're returned a list
+ assert:
+ that:
+ - route53_ips is defined
+ - route53_ips is iterable
+ - route53_ips is not string
+ - route53_ips | length > 1
+ - route53_ips[0] | ansible.utils.ipv4
+ - route53_ipv6s is defined
+ - route53_ipv6s is iterable
+ - route53_ipv6s is not string
+ - route53_ipv6s | length > 1
+ - route53_ipv6s[0] | ansible.utils.ipv6
+
+
+- name: assert that service IPV4s and IPV6s do not overlap
+ assert:
+ that:
+ - route53_ips | intersect(s3_ips) | length == 0
+ - route53_ipv6s | intersect(s3_ipv6s) | length == 0
+
+- name: lookup range with region
+ set_fact:
+ us_east_1_ips: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='us-east-1', wantlist=True) }}"
+
+- name: lookup IPV6 range with region
+ set_fact:
+ us_east_1_ipv6s: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='us-east-1', wantlist=True, ipv6_prefixes=True) }}"
+
+- name: assert that we're returned a list
+ assert:
+ that:
+ - us_east_1_ips is defined
+ - us_east_1_ips is iterable
+ - us_east_1_ips is not string
+ - us_east_1_ips | length > 1
+ - us_east_1_ips[0] | ansible.utils.ipv4
+ - us_east_1_ipv6s is defined
+ - us_east_1_ipv6s is iterable
+ - us_east_1_ipv6s is not string
+ - us_east_1_ipv6s | length > 1
+ - us_east_1_ipv6s[0] | ansible.utils.ipv6
+
+- name: lookup range with a different region
+ set_fact:
+ eu_central_1_ips: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='eu-central-1', wantlist=True) }}"
+ eu_central_1_ipv6s: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='eu-central-1', wantlist=True, ipv6_prefixes=True) }}"
+
+- name: assert that we're returned a list
+ assert:
+ that:
+ - eu_central_1_ips is defined
+ - eu_central_1_ips is iterable
+ - eu_central_1_ips is not string
+ - eu_central_1_ips | length > 1
+ - eu_central_1_ips[0] | ansible.utils.ipv4
+ - eu_central_1_ipv6s is defined
+ - eu_central_1_ipv6s is iterable
+ - eu_central_1_ipv6s is not string
+ - eu_central_1_ipv6s | length > 1
+ - eu_central_1_ipv6s[0] | ansible.utils.ipv6
+
+- name: assert that regional IPs don't overlap
+ assert:
+ that:
+ - eu_central_1_ips | intersect(us_east_1_ips) | length == 0
+ - eu_central_1_ipv6s | intersect(us_east_1_ipv6s) | length == 0
+
+- name: lookup range with service and region
+ set_fact:
+ s3_us_ips: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='us-east-1', service='S3', wantlist=True) }}"
+ s3_us_ipv6s: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='us-east-1', service='S3', wantlist=True, ipv6_prefixes=True) }}"
+
+- name: assert that we're returned a list
+ assert:
+ that:
+ - s3_us_ips is defined
+ - s3_us_ips is iterable
+ - s3_us_ips is not string
+ - s3_us_ips | length > 1
+ - s3_us_ips[0] | ansible.utils.ipv4
+ - s3_us_ipv6s is defined
+ - s3_us_ipv6s is iterable
+ - s3_us_ipv6s is not string
+ - s3_us_ipv6s | length > 1
+ - s3_us_ipv6s[0] | ansible.utils.ipv6
+
+- name: assert that the regional service IPs are a subset of the regional IPs and service IPs.
+ assert:
+ that:
+ - ( s3_us_ips | intersect(us_east_1_ips) | length ) == ( s3_us_ips | length )
+ - ( s3_us_ips | intersect(s3_ips) | length ) == ( s3_us_ips | length )
+ - ( s3_us_ipv6s | intersect(us_east_1_ipv6s) | length ) == ( s3_us_ipv6s | length )
+ - ( s3_us_ipv6s | intersect(s3_ipv6s) | length ) == ( s3_us_ipv6s | length )
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/defaults/main.yml
new file mode 100644
index 000000000..218afac1c
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+ssm_key_prefix: '{{ resource_prefix }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/tasks/main.yml
new file mode 100644
index 000000000..d46c7b20b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/tasks/main.yml
@@ -0,0 +1,276 @@
+---
+- set_fact:
+ # As a lookup plugin we don't have access to module_defaults
+ connection_args:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ aws_security_token: "{{ security_token | default(omit) }}"
+ no_log: True
+
+- name: 'aws_ssm lookup plugin integration tests'
+ collections:
+ - amazon.aws
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ vars:
+ skip: 'skip'
+ warn: 'warn'
+ simple_name: '/{{ ssm_key_prefix }}/Simple'
+ simple_description: 'This is a simple example'
+ simple_value: 'A simple VALue'
+ updated_value: 'A simple (updated) VALue'
+ path_name: '/{{ ssm_key_prefix }}/path'
+ path_name_a: '{{ path_name }}/key_one'
+ path_shortname_a: 'key_one'
+ path_name_b: '{{ path_name }}/keyTwo'
+ path_shortname_b: 'keyTwo'
+ path_name_c: '{{ path_name }}/Nested/Key'
+ path_shortname_c: 'Key'
+ path_description: 'This is somewhere to store a set of keys'
+ path_value_a: 'value_one'
+ path_value_b: 'valueTwo'
+ path_value_c: 'Value Three'
+ missing_name: '{{ path_name }}/IDoNotExist'
+ block:
+
+ # ============================================================
+ # Simple key/value
+ - name: lookup a missing key (error)
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, **connection_args) }}"
+ ignore_errors: true
+ register: lookup_missing
+ - assert:
+ that:
+ - lookup_missing is failed
+
+ - name: lookup a missing key (warn)
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, on_missing=warn, **connection_args) }}"
+ register: lookup_missing
+ - assert:
+ that:
+ - lookup_value | list | length == 0
+
+ - name: lookup a single missing key (skip)
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, on_missing=skip, **connection_args) }}"
+ register: lookup_missing
+ - assert:
+ that:
+ - lookup_value | list | length == 0
+
+ - name: Create key/value pair in aws parameter store
+ aws_ssm_parameter_store:
+ name: '{{ simple_name }}'
+ description: '{{ simple_description }}'
+ value: '{{ simple_value }}'
+
+ - name: Lookup a single key
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, **connection_args) }}"
+ - assert:
+ that:
+ - lookup_value == simple_value
+
+ - name: Create key/value pair in aws parameter store
+ aws_ssm_parameter_store:
+ name: '{{ simple_name }}'
+ description: '{{ simple_description }}'
+ value: '{{ simple_value }}'
+
+ - name: Lookup a single key
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, **connection_args) }}"
+ - assert:
+ that:
+ - lookup_value == simple_value
+
+ - name: Update key/value pair in aws parameter store
+ aws_ssm_parameter_store:
+ name: '{{ simple_name }}'
+ description: '{{ simple_description }}'
+ value: '{{ updated_value }}'
+
+ - name: Lookup updated single key
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, **connection_args) }}"
+ - assert:
+ that:
+ - lookup_value == updated_value
+
+ - name: Lookup original value from single key
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name + ':1', **connection_args) }}"
+ - assert:
+ that:
+ - lookup_value == simple_value
+
+ # ============================================================
+
+ - name: Create nested key/value pair in aws parameter store (1)
+ aws_ssm_parameter_store:
+ name: '{{ path_name_a }}'
+ description: '{{ path_description }}'
+ value: '{{ path_value_a }}'
+
+ - name: Create nested key/value pair in aws parameter store (2)
+ aws_ssm_parameter_store:
+ name: '{{ path_name_b }}'
+ description: '{{ path_description }}'
+ value: '{{ path_value_b }}'
+
+ - name: Create nested key/value pair in aws parameter store (3)
+ aws_ssm_parameter_store:
+ name: '{{ path_name_c }}'
+ description: '{{ path_description }}'
+ value: '{{ path_value_c }}'
+
+ # ============================================================
+ - name: Lookup a keys using bypath
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, bypath=True, wantlist=True, **connection_args ) | first }}"
+ - assert:
+ that:
+ - path_name_a in lookup_value
+ - lookup_value[path_name_a] == path_value_a
+ - path_name_b in lookup_value
+ - lookup_value[path_name_b] == path_value_b
+ - lookup_value | length == 2
+
+ - name: Lookup a keys using bypath and recursive
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, bypath=True, recursive=True, wantlist=True, **connection_args ) | first }}"
+ - assert:
+ that:
+ - path_name_a in lookup_value
+ - lookup_value[path_name_a] == path_value_a
+ - path_name_b in lookup_value
+ - lookup_value[path_name_b] == path_value_b
+ - path_name_c in lookup_value
+ - lookup_value[path_name_c] == path_value_c
+ - lookup_value | length == 3
+
+ - name: Lookup a keys using bypath and shortname
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, bypath=True, shortnames=True, wantlist=True, **connection_args ) | first }}"
+ - assert:
+ that:
+ - path_shortname_a in lookup_value
+ - lookup_value[path_shortname_a] == path_value_a
+ - path_shortname_b in lookup_value
+ - lookup_value[path_shortname_b] == path_value_b
+ - lookup_value | length == 2
+
+ - name: Lookup a keys using bypath and recursive and shortname
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, bypath=True, recursive=True, shortnames=True, wantlist=True, **connection_args ) | first }}"
+ - assert:
+ that:
+ - path_shortname_a in lookup_value
+ - lookup_value[path_shortname_a] == path_value_a
+ - path_shortname_b in lookup_value
+ - lookup_value[path_shortname_b] == path_value_b
+ - path_shortname_c in lookup_value
+ - lookup_value[path_shortname_c] == path_value_c
+ - lookup_value | length == 3
+
+ # ============================================================
+
+ - name: Explicitly lookup two keys
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, path_name_a, wantlist=True, **connection_args) }}"
+ - assert:
+ that:
+ - lookup_value | list | length == 2
+ - lookup_value[0] == updated_value
+ - lookup_value[1] == path_value_a
+
+ ###
+
+ - name: Explicitly lookup two keys - one missing
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, missing_name, wantlist=True, **connection_args) }}"
+ ignore_errors: True
+ register: lookup_missing
+ - assert:
+ that:
+ - lookup_missing is failed
+
+ - name: Explicitly lookup two keys - one missing (skip)
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, missing_name, on_missing=skip, wantlist=True, **connection_args) }}"
+ - assert:
+ that:
+ - lookup_value | list | length == 2
+ - lookup_value[0] == updated_value
+ - lookup_value | bool == False
+
+ ###
+
+ - name: Explicitly lookup two paths - one missing
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, missing_name, bypath=True, wantlist=True, **connection_args) }}"
+ ignore_errors: True
+ register: lookup_missing
+ - assert:
+ that:
+ - lookup_missing is failed
+
+ - name: Explicitly lookup two paths - one missing (skip)
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, missing_name, on_missing=skip, bypath=True, wantlist=True, **connection_args) }}"
+ - assert:
+ that:
+ - lookup_value | list | length == 2
+ - lookup_value[1] | bool == False
+ - path_name_a in lookup_value[0]
+ - lookup_value[0][path_name_a] == path_value_a
+ - path_name_b in lookup_value[0]
+ - lookup_value[0][path_name_b] == path_value_b
+ - lookup_value[0] | length == 2
+
+ ###
+
+ - name: Explicitly lookup two paths with recurse - one missing
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, missing_name, bypath=True, recursive=True, wantlist=True, **connection_args) }}"
+ ignore_errors: True
+ register: lookup_missing
+ - assert:
+ that:
+ - lookup_missing is failed
+
+ - name: Explicitly lookup two paths with recurse - one missing (skip)
+ set_fact:
+ lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, missing_name, on_missing=skip, bypath=True, recursive=True, wantlist=True, **connection_args) }}"
+ - assert:
+ that:
+ - lookup_value | list | length == 2
+ - lookup_value[1] | bool == False
+ - path_name_a in lookup_value[0]
+ - lookup_value[0][path_name_a] == path_value_a
+ - path_name_b in lookup_value[0]
+ - lookup_value[0][path_name_b] == path_value_b
+ - path_name_c in lookup_value[0]
+ - lookup_value[0][path_name_c] == path_value_c
+ - lookup_value[0] | length == 3
+
+ always:
+ # ============================================================
+ - name: Delete remaining key/value pairs in aws parameter store
+ aws_ssm_parameter_store:
+ name: "{{item}}"
+ state: absent
+ ignore_errors: True
+ with_items:
+ - '{{ path_name_c }}'
+ - '{{ path_name_b }}'
+ - '{{ path_name_c }}'
+ - '{{ path_name }}'
+ - '{{ simple_name }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/aliases b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/aliases
new file mode 100644
index 000000000..4fd9dff76
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/aliases
@@ -0,0 +1 @@
+module_utils_botocore
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/main.yml
new file mode 100644
index 000000000..a8dedcf47
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/main.yml
@@ -0,0 +1,12 @@
+---
+- hosts: localhost
+ tasks:
+ - name: Call aws_az_info
+ amazon.aws.aws_az_info:
+ register: result
+ - name: Get called information
+ amazon.aws.aws_caller_info:
+ register: result
+ - assert:
+ that:
+ - lookup('ansible.builtin.env', '_ANSIBLE_PLACEBO_RECORD') or (lookup('ansible.builtin.env', '_ANSIBLE_PLACEBO_REPLAY') and result.user_id == "AWZBREIZHEOMABRONIFVGFS6GH")
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/record.sh b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/record.sh
new file mode 100755
index 000000000..180e58d05
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/record.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+#
+set -eux
+
+if [ -d recording ]; then
+ echo "Please check and remove the 'recording' directory."
+ exit 1
+fi
+if [ -v ANSIBLE_TEST_PYTHON_VERSION ]; then
+ echo "Please call ./runme.sh directly without ansible-test"
+ exit 1
+fi
+export _ANSIBLE_PLACEBO_RECORD=recording
+
+mkdir recording
+ansible-playbook main.yml -vvv
+account_id=$(aws sts get-caller-identity --query "Account" --output text)
+user_id=$(aws sts get-caller-identity --query "UserId" --output text)
+find recording -type f -exec sed -i "s,$account_id,123456789012,g" "{}" \;
+find recording -type f -exec sed -i "s,$user_id,AIDA12345EXAMPLE54321,g" "{}" \;
+find recording -type f -exec sed -i "s,$USER,george,g" "{}" \;
+tar cfzv recording.tar.gz recording
+rm -r recording
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/recording.tar.gz b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/recording.tar.gz
new file mode 100644
index 000000000..29c8dd90a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/recording.tar.gz
Binary files differ
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/runme.sh
new file mode 100755
index 000000000..2f0d591a4
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/runme.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+#
+
+set -eux
+
+export ANSIBLE_ROLES_PATH=../
+
+
+tar xfzv recording.tar.gz
+export _ANSIBLE_PLACEBO_REPLAY=${PWD}/recording
+export AWS_ACCESS_KEY_ID=disabled
+export AWS_SECRET_ACCESS_KEY=disabled
+export AWS_SESSION_TOKEN=disabled
+export AWS_DEFAULT_REGION=us-east-2
+ansible-playbook main.yml -vvv
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/aliases b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/aliases
new file mode 100644
index 000000000..d13ca0492
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/aliases
@@ -0,0 +1,4 @@
+cloud/aws
+
+module_utils_botocore
+module_utils_modules
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/inventory b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/inventory
new file mode 100644
index 000000000..5093e8582
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/inventory
@@ -0,0 +1,6 @@
+[tests]
+localhost
+
+[all:vars]
+ansible_connection=local
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/main.yml
new file mode 100644
index 000000000..29604c495
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/main.yml
@@ -0,0 +1,8 @@
+- hosts: all
+ gather_facts: no
+ collections:
+ - amazon.aws
+ - community.aws
+ roles:
+ # Test the behaviour of module_utils.core.AnsibleAWSModule.client (boto3)
+ - 'ansibleawsmodule.client'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/amazonroot.pem b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/amazonroot.pem
new file mode 100644
index 000000000..a6f3e92af
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/amazonroot.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
+ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
+9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
+IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
+VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
+93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
+jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA
+A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI
+U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs
+N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv
+o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU
+5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy
+rqXRfboQnoZsG4q5WTP468SQvvG5
+-----END CERTIFICATE-----
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/isrg-x1.pem b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/isrg-x1.pem
new file mode 100644
index 000000000..b85c8037f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/isrg-x1.pem
@@ -0,0 +1,31 @@
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
+TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
+cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
+WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
+ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
+h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
+0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
+A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
+T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
+B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
+B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
+KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
+OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
+jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
+qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
+rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
+hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
+ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
+3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
+NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
+ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
+TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
+jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
+oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
+4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
+mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
+emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
+-----END CERTIFICATE-----
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py
new file mode 100644
index 000000000..5e2c8e3e8
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py
@@ -0,0 +1,46 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# A bare-minimum Ansible Module based on AnsibleAWSModule used for testing some
+# of the core behaviour around AWS/Boto3 connection details
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={},
+ supports_check_mode=True,
+ )
+
+ decorator = AWSRetry.jittered_backoff()
+ client = module.client('ec2', retry_decorator=decorator)
+
+ filters = ansible_dict_to_boto3_filter_list({'name': 'amzn2-ami-hvm-2.0.202006*-x86_64-gp2'})
+
+ try:
+ images = client.describe_images(aws_retry=True, ImageIds=[], Filters=filters, Owners=['amazon'], ExecutableUsers=[])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Fail JSON AWS')
+
+ # Return something, just because we can.
+ module.exit_json(
+ changed=False,
+ **camel_dict_to_snake_dict(images))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml
new file mode 100644
index 000000000..d8b08ab22
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies: []
+collections:
+ - amazon.aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml
new file mode 100644
index 000000000..7ad4e7a34
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml
@@ -0,0 +1,202 @@
+---
+- name: 'Create temporary location for CA files'
+ tempfile:
+ state: directory
+ suffix: 'test-CAs'
+ register: ca_tmp
+
+- name: 'Ensure we have Amazons root CA available to us'
+ copy:
+ src: 'amazonroot.pem'
+ dest: '{{ ca_tmp.path }}/amazonroot.pem'
+ mode: 0644
+
+- name: 'Ensure we have a another CA (ISRG-X1) bundle available to us'
+ copy:
+ src: 'isrg-x1.pem'
+ dest: '{{ ca_tmp.path }}/isrg-x1.pem'
+ mode: 0644
+
+##################################################################################
+# Test disabling cert validation (make sure we don't error)
+
+- name: 'Test basic operation using default CA bundle (no validation) - parameter'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ validate_certs: False
+ register: default_bundle_result
+
+- assert:
+ that:
+ - default_bundle_result is successful
+
+##################################################################################
+# Tests using Amazon's CA (the one the endpoint certs should be signed with)
+
+- name: 'Test basic operation using Amazons root CA - parameter'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ aws_ca_bundle: '{{ ca_tmp.path }}/amazonroot.pem'
+ register: amazon_ca_result
+
+- assert:
+ that:
+ - amazon_ca_result is successful
+
+- name: 'Test basic operation using Amazons root CA - environment'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ environment:
+ AWS_CA_BUNDLE: '{{ ca_tmp.path }}/amazonroot.pem'
+ register: amazon_ca_result
+
+- assert:
+ that:
+ - amazon_ca_result is successful
+
+- name: 'Test basic operation using Amazons root CA (no validation) - parameter'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ aws_ca_bundle: '{{ ca_tmp.path }}/amazonroot.pem'
+ validate_certs: False
+ register: amazon_ca_result
+
+- assert:
+ that:
+ - amazon_ca_result is successful
+
+- name: 'Test basic operation using Amazons root CA (no validation) - environment'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ validate_certs: False
+ environment:
+ AWS_CA_BUNDLE: '{{ ca_tmp.path }}/amazonroot.pem'
+ register: amazon_ca_result
+
+- assert:
+ that:
+ - amazon_ca_result is successful
+
+##################################################################################
+# Tests using ISRG's CA (one that the endpoint certs *aren't* signed with)
+
+- name: 'Test basic operation using a different CA - parameter'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem'
+ register: isrg_ca_result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - isrg_ca_result is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg'
+ - '"Fail JSON AWS" in isrg_ca_result.msg'
+
+- name: 'Test basic operation using a different CA - environment'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ environment:
+ AWS_CA_BUNDLE: '{{ ca_tmp.path }}/isrg-x1.pem'
+ register: isrg_ca_result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - isrg_ca_result is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg'
+ - '"Fail JSON AWS" in isrg_ca_result.msg'
+
+- name: 'Test basic operation using a different CA (no validation) - parameter'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem'
+ validate_certs: False
+ register: isrg_ca_result
+
+- assert:
+ that:
+ - isrg_ca_result is successful
+
+- name: 'Test basic operation using a different CA (no validation) - environment'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ validate_certs: False
+ environment:
+ AWS_CA_BUNDLE: '{{ ca_tmp.path }}/isrg-x1.pem'
+ register: isrg_ca_result
+
+- assert:
+ that:
+ - isrg_ca_result is successful
+
+##################################################################################
+# https://github.com/ansible-collections/amazon.aws/issues/129
+- name: 'Test CA bundle is used when authenticating with a profile - implied validation'
+ example_module:
+ profile: 'test_profile'
+ aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem'
+ register: isrg_ca_result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - isrg_ca_result is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg'
+ - '"Fail JSON AWS" in isrg_ca_result.msg'
+
+- name: 'Test CA bundle is used when authenticating with a profile - explicit validation'
+ example_module:
+ profile: 'test_profile'
+ aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem'
+ validate_certs: True
+ register: isrg_ca_result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - isrg_ca_result is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg'
+ - '"Fail JSON AWS" in isrg_ca_result.msg'
+
+- name: 'Test CA bundle is used when authenticating with a profile - explicitly disable validation'
+ example_module:
+ profile: 'test_profile'
+ aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem'
+ validate_certs: False
+ register: isrg_ca_result
+
+- assert:
+ that:
+ - isrg_ca_result is success
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml
new file mode 100644
index 000000000..94925829b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml
@@ -0,0 +1,281 @@
+---
+##################################################################################
+# Tests using standard credential parameters
+
+- name: 'Test basic operation using simple credentials (simple-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+- name: 'Test basic operation using simple credentials (aws-parameters)'
+ example_module:
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+- name: 'Test basic operation using simple credentials (ec2-parameters)'
+ example_module:
+ ec2_region: '{{ aws_region }}'
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ access_token: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+##################################################################################
+# Tests using standard credentials from environment variables
+
+- name: 'Test basic operation using simple credentials (aws-environment)'
+ example_module:
+ environment:
+ AWS_REGION: '{{ aws_region }}'
+ AWS_ACCESS_KEY_ID: '{{ aws_access_key }}'
+ AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}'
+ AWS_SECURITY_TOKEN: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+- name: 'Test basic operation using simple credentials (aws2-environment)'
+ example_module:
+ environment:
+ AWS_DEFAULT_REGION: '{{ aws_region }}'
+ AWS_ACCESS_KEY: '{{ aws_access_key }}'
+ AWS_SECRET_KEY: '{{ aws_secret_key }}'
+ AWS_SESSION_TOKEN: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+- name: 'Test basic operation using simple credentials (ec2-environment)'
+ example_module:
+ environment:
+ EC2_REGION: '{{ aws_region }}'
+ EC2_ACCESS_KEY: '{{ aws_access_key }}'
+ EC2_SECRET_KEY: '{{ aws_secret_key }}'
+ EC2_SECURITY_TOKEN: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+##################################################################################
+# Tests for missing parameters
+
+- name: 'Test with missing region'
+ example_module:
+ region: '{{ omit }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: missing_region
+ ignore_errors: True
+
+- assert:
+ that:
+ - missing_region is failed
+ - '"requires a region" in missing_region.msg'
+
+- name: 'Test with missing access key'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ omit }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: missing_access
+ ignore_errors: True
+
+- assert:
+ that:
+ - missing_access is failed
+ - '"Partial credentials found" in missing_access.msg'
+ - '"aws_access_key_id" in missing_access.msg'
+
+- name: 'Test with missing secret key'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ omit }}'
+ security_token: '{{ security_token }}'
+ register: missing_secret
+ ignore_errors: True
+
+- assert:
+ that:
+ - missing_secret is failed
+ - '"Partial credentials found" in missing_secret.msg'
+ - '"aws_secret_access_key" in missing_secret.msg'
+
+- name: 'Test with missing security token'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ omit }}'
+ register: missing_token
+ ignore_errors: True
+
+- assert:
+ that:
+ - missing_token is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"AuthFailure" in missing_token.msg'
+ - '"Fail JSON AWS" in missing_token.msg'
+ - '"error" in missing_token'
+ - '"code" in missing_token.error'
+ - missing_token.error.code == 'AuthFailure'
+ - '"message" in missing_token.error'
+
+##################################################################################
+# Run an additional authentication request to ensure that we're out of any
+# deny-lists caused by bad requests
+- name: 'Perform valid authentication to avoid deny-listing'
+ example_module:
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: anti_denylist
+ until: anti_denylist is success
+ retries: 5
+ delay: 5
+
+##################################################################################
+# Tests for bad parameters
+
+- name: 'Test with bad region'
+ example_module:
+ region: 'junk-example'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: bad_region
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_region is failed
+ - '"msg" in bad_region'
+ - '"Could not connect to the endpoint URL" in bad_region.msg'
+ - '"Fail JSON AWS" in bad_region.msg'
+ - '"ec2.junk-example" in bad_region.msg'
+
+- name: 'Test with bad access key'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: 'junk-example'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: bad_access
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_access is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"AuthFailure" in bad_access.msg'
+ - '"Fail JSON AWS" in bad_access.msg'
+ - '"error" in bad_access'
+ - '"code" in bad_access.error'
+ - bad_access.error.code == 'AuthFailure'
+ - '"message" in bad_access.error'
+
+# Run an additional authentication request to ensure that we're out of any
+# deny-lists caused by bad requests
+- name: 'Perform valid authentication to avoid deny-listing'
+ example_module:
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: anti_denylist
+ until: anti_denylist is success
+ retries: 5
+ delay: 5
+
+- name: 'Test with bad secret key'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: 'junk-example'
+ security_token: '{{ security_token }}'
+ register: bad_secret
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_secret is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"AuthFailure" in bad_secret.msg'
+ - '"Fail JSON AWS" in bad_secret.msg'
+ - '"error" in bad_secret'
+ - '"code" in bad_secret.error'
+ - bad_secret.error.code == 'AuthFailure'
+ - '"message" in bad_secret.error'
+
+# Run an additional authentication request to ensure that we're out of any
+# deny-lists caused by bad requests
+- name: 'Perform valid authentication to avoid deny-listing'
+ example_module:
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: anti_denylist
+ until: anti_denylist is success
+ retries: 5
+ delay: 5
+
+- name: 'Test with bad security token'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: 'junk-example'
+ register: bad_token
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_token is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"AuthFailure" in bad_token.msg'
+ - '"Fail JSON AWS" in bad_token.msg'
+ - '"error" in bad_token'
+ - '"code" in bad_token.error'
+ - bad_token.error.code == 'AuthFailure'
+ - '"message" in bad_token.error'
+
+# Run an additional authentication request to ensure that we're out of any
+# deny-lists caused by bad requests
+- name: 'Perform valid authentication to avoid deny-listing'
+ example_module:
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: anti_denylist
+ until: anti_denylist is success
+ retries: 5
+ delay: 5
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml
new file mode 100644
index 000000000..590af9134
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml
@@ -0,0 +1,123 @@
+---
+##################################################################################
+# Tests using Endpoints
+
+- name: 'Test basic operation using standard endpoint (aws-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ aws_endpoint_url: 'https://ec2.{{ aws_region }}.amazonaws.com'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: standard_endpoint_result
+
+- name: 'Check that we connected to the standard endpoint'
+ assert:
+ that:
+ - standard_endpoint_result is successful
+ - '"ec2:DescribeImages" in standard_endpoint_result.resource_actions'
+
+# The FIPS endpoints aren't available in every region, this will trigger errors
+# outside of: [ us-east-1, us-east-2, us-west-1, us-west-2 ]
+
+- name: 'Test basic operation using FIPS endpoint (aws-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ aws_endpoint_url: 'https://ec2-fips.us-east-1.amazonaws.com'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: fips_endpoint_result
+
+- name: 'Check that we connected to the FIPS endpoint'
+ assert:
+ that:
+ - fips_endpoint_result is successful
+ - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions'
+
+- name: 'Test basic operation using FIPS endpoint (aws-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ endpoint_url: 'https://ec2-fips.us-east-1.amazonaws.com'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: fips_endpoint_result
+
+- name: 'Check that we connected to the FIPS endpoint'
+ assert:
+ that:
+ - fips_endpoint_result is successful
+ - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions'
+
+- name: 'Test basic operation using FIPS endpoint (aws-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ ec2_url: 'https://ec2-fips.us-east-1.amazonaws.com'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: fips_endpoint_result
+
+- name: 'Check that we connected to the FIPS endpoint'
+ assert:
+ that:
+ - fips_endpoint_result is successful
+ - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions'
+
+##################################################################################
+# Tests using environment variables
+
+- name: 'Test basic operation using FIPS endpoint (aws-environment)'
+ example_module:
+ region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ environment:
+ AWS_URL: 'https://ec2-fips.us-east-1.amazonaws.com'
+ register: fips_endpoint_result
+
+- name: 'Check that we connected to the FIPS endpoint'
+ assert:
+ that:
+ - fips_endpoint_result is successful
+ - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions'
+
+- name: 'Test basic operation using FIPS endpoint (ec2-environment)'
+ example_module:
+ region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ environment:
+ EC2_URL: 'https://ec2-fips.us-east-1.amazonaws.com'
+ register: fips_endpoint_result
+
+- name: 'Check that we connected to the FIPS endpoint'
+ assert:
+ that:
+ - fips_endpoint_result is successful
+ - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions'
+
+##################################################################################
+# Tests using a bad endpoint URL
+# - This demonstrates that endpoint_url overrode region
+
+- name: 'Test with bad endpoint URL'
+ example_module:
+ region: '{{ aws_region }}'
+ endpoint_url: 'https://junk.{{ aws_region }}.amazonaws.com'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: bad_endpoint
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_endpoint is failed
+ - '"msg" in bad_endpoint'
+ - '"Could not connect to the endpoint URL" in bad_endpoint.msg'
+ - '"Fail JSON AWS" in bad_endpoint.msg'
+ - '"junk.{{ aws_region }}.amazonaws.com" in bad_endpoint.msg'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml
new file mode 100644
index 000000000..dc61fad68
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: 'Tests around standard credentials'
+ include_tasks: 'credentials.yml'
+
+- name: 'Tests around profiles'
+ include_tasks: 'profiles.yml'
+
+- name: 'Tests around endpoints'
+ include_tasks: 'endpoints.yml'
+
+- name: 'Tests around CA Bundles'
+ include_tasks: 'ca_bundle.yml'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml
new file mode 100644
index 000000000..1673a5e15
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml
@@ -0,0 +1,74 @@
+---
+##################################################################################
+# Tests using profiles instead of directly consuming credentials
+
+- name: 'Test basic operation using profile (simple-parameters)'
+ example_module:
+ profile: 'test_profile'
+ register: profile_result
+
+- assert:
+ that:
+ - profile_result is successful
+
+- name: 'Test basic operation using profile (aws-parameters)'
+ example_module:
+ aws_profile: 'test_profile'
+ register: profile_result
+
+- assert:
+ that:
+ - profile_result is successful
+
+- name: 'Test basic operation using profile (aws-environment)'
+ example_module:
+ environment:
+ AWS_PROFILE: 'test_profile'
+ register: profile_result
+
+- assert:
+ that:
+ - profile_result is successful
+
+- name: 'Test basic operation using profile (aws2-environment)'
+ example_module:
+ environment:
+ AWS_DEFAULT_PROFILE: 'test_profile'
+ register: profile_result
+
+- assert:
+ that:
+ - profile_result is successful
+
+##################################################################################
+# Tests with bad profile
+
+- name: 'Test with bad profile'
+ example_module:
+ profile: 'junk-profile'
+ register: bad_profile
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_profile is failed
+ - '"msg" in bad_profile'
+ - '"junk-profile" in bad_profile.msg'
+ - '"could not be found" in bad_profile.msg'
+
+- name: 'Test with profile and credentials (should error)'
+ example_module:
+ profile: 'test_profile'
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: bad_profile
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_profile is failed
+ - '"msg" in bad_profile'
+ - '"Passing both" in bad_profile.msg'
+ - '"not supported" in bad_profile.msg'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/runme.sh
new file mode 100755
index 000000000..9b0536d26
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/runme.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ANSIBLE_ROLES_PATH="../"
+# Boto3
+AWS_CONFIG_FILE="$( pwd )/boto3_config"
+# Boto2
+BOTO_CONFIG="$( pwd )/boto3_config"
+
+export ANSIBLE_ROLES_PATH
+export AWS_CONFIG_FILE
+export BOTO_CONFIG
+
+ansible-playbook setup.yml -i localhost "$@"
+ansible-playbook main.yml -i inventory "$@" -e "@session_credentials.yml"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/setup.yml
new file mode 100644
index 000000000..9b219eb20
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/setup.yml
@@ -0,0 +1,40 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ # ===========================================================
+ # While CI uses a dedicated session, the easiest way to run
+ # tests outside of CI is with a simple access/secret key pair.
+ #
+ # For consistency, use sts_session_token to grab session
+ # credentials if we're not already using a session
+ # Note: this can't be done within a session, hence the slightly
+ # strange dance
+ - name: 'Get a session token if we are using a basic key'
+ when:
+ - security_token is not defined
+ block:
+ - name: 'Get a session token'
+ sts_session_token:
+ region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ register: session_token
+ no_log: true
+ - name: 'Override initial tokens'
+ set_fact:
+ session_access_key: '{{ session_token.sts_creds.access_key }}'
+ session_secret_key: '{{ session_token.sts_creds.secret_key }}'
+ session_security_token: '{{ session_token.sts_creds.session_token }}'
+ no_log: true
+
+ - name: 'Write out credentials'
+ template:
+ dest: './session_credentials.yml'
+ src: 'session_credentials.yml.j2'
+
+ - name: 'Write out boto config file'
+ template:
+ dest: './boto3_config'
+ src: 'boto_config.j2'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/boto_config.j2 b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/boto_config.j2
new file mode 100644
index 000000000..f8668f057
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/boto_config.j2
@@ -0,0 +1,5 @@
+[profile test_profile]
+region = {{ aws_region }}
+aws_access_key_id = {{ session_access_key | default(aws_access_key) }}
+aws_secret_access_key = {{ session_secret_key | default(aws_secret_key) }}
+aws_security_token = {{ session_security_token | default(security_token) }}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/session_credentials.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/session_credentials.yml.j2
new file mode 100644
index 000000000..bb0304393
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/session_credentials.yml.j2
@@ -0,0 +1,3 @@
+aws_access_key: {{ session_access_key | default(aws_access_key) }}
+aws_secret_key: {{ session_secret_key | default(aws_secret_key) }}
+security_token: {{ session_security_token | default(security_token) }}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/aliases b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/inventory b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/inventory
new file mode 100644
index 000000000..5093e8582
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/inventory
@@ -0,0 +1,6 @@
+[tests]
+localhost
+
+[all:vars]
+ansible_connection=local
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/main.yml
new file mode 100644
index 000000000..4edc36377
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/main.yml
@@ -0,0 +1,7 @@
+- hosts: all
+ gather_facts: no
+ collections:
+ - amazon.aws
+ roles:
+ # Test the behaviour of module_utils.core.AnsibleAWSModule.client (boto3)
+ - 'get_waiter'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py
new file mode 100644
index 000000000..4e16fb1bc
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py
@@ -0,0 +1,39 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# A bare-minimum Ansible Module based on AnsibleAWSModule used for testing some
+# of the core behaviour around AWS/Boto3 connection details
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+
+def main():
+ argument_spec = dict(
+ client=dict(required=True, type='str'),
+ waiter_name=dict(required=True, type='str'),
+ with_decorator=dict(required=False, type='bool', default=False),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ decorator = None
+ if module.params.get('with_decorator'):
+ decorator = AWSRetry.jittered_backoff()
+
+ client = module.client(module.params.get('client'), retry_decorator=decorator)
+ waiter = get_waiter(client, module.params.get('waiter_name'))
+
+ module.exit_json(changed=False, waiter_attributes=dir(waiter))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml
new file mode 100644
index 000000000..d8b08ab22
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies: []
+collections:
+ - amazon.aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml
new file mode 100644
index 000000000..466d9584e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml
@@ -0,0 +1,36 @@
+---
+- module_defaults:
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ block:
+ - name: 'Attempt to get a waiter (no retry decorator)'
+ example_module:
+ client: 'ec2'
+ waiter_name: 'internet_gateway_exists'
+ register: test_no_decorator
+
+ - assert:
+ that:
+ - test_no_decorator is succeeded
+ # Standard methods on a boto3 wrapper
+ - '"wait" in test_no_decorator.waiter_attributes'
+ - '"name" in test_no_decorator.waiter_attributes'
+ - '"config" in test_no_decorator.waiter_attributes'
+
+ - name: 'Attempt to get a waiter (with decorator)'
+ example_module:
+ client: 'ec2'
+ waiter_name: 'internet_gateway_exists'
+ with_decorator: True
+ register: test_with_decorator
+
+ - assert:
+ that:
+ - test_with_decorator is succeeded
+ # Standard methods on a boto3 wrapper
+ - '"wait" in test_with_decorator.waiter_attributes'
+ - '"name" in test_with_decorator.waiter_attributes'
+ - '"config" in test_with_decorator.waiter_attributes'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/runme.sh
new file mode 100755
index 000000000..78a6f6dbe
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/runme.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ANSIBLE_ROLES_PATH="../"
+export ANSIBLE_ROLES_PATH
+
+ansible-playbook main.yml -i inventory "$@"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/aliases
new file mode 100644
index 000000000..6e9f239e0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/aliases
@@ -0,0 +1,5 @@
+time=10m
+
+cloud/aws
+
+rds_cluster_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/inventory b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/inventory
new file mode 100644
index 000000000..1acd86420
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/inventory
@@ -0,0 +1,23 @@
+[tests]
+# basic rds_cluster cretion tests
+create
+
+# restore cluster tests
+restore
+
+# TODO: Cannot be tested in the CI because:
+# An error occurred (InvalidParameterValue) when calling the CreateDBCluster operation: Replication from cluster in same region is not supported
+# promote
+
+# security groups db tests
+create_sgs
+
+# basic modify operations applied on the rds cluster
+modify
+
+# tag rds cluster test
+tag
+
+[all:vars]
+ansible_connection=local
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/main.yml
new file mode 100644
index 000000000..2674f4268
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/main.yml
@@ -0,0 +1,10 @@
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/rds_cluster/tasks/
+
+- hosts: all
+ gather_facts: no
+ strategy: free
+ serial: 6
+ roles:
+ - rds_cluster
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/defaults/main.yml
new file mode 100644
index 000000000..f1217a95e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/defaults/main.yml
@@ -0,0 +1,36 @@
+# defaults file for rds_cluster
+
+# Create cluster
+cluster_id: ansible-test-{{ inventory_hostname | replace('_','-') }}{{ tiny_prefix}}
+username: testrdsusername
+password: test-rds_password
+engine: aurora
+port: 3306
+tags_create:
+ Name: ansible-test-cluster-{{ tiny_prefix }}
+ Created_By: Ansible_rds_cluster_integration_test
+
+# Modify cluster
+new_cluster_id: ansible-test-cluster-{{ tiny_prefix }}-new
+new_port: 1155
+new_password: test-rds_password-new
+new_db_parameter_group_name: ansible-test-db-parameter-group-{{ tiny_prefix }}-new
+
+# Tag cluster
+tags_patch:
+ Name: '{{ tiny_prefix }}-new'
+ Created_by: Ansible rds_cluster integration tests
+
+# Create cluster in a VPC
+vpc_name: ansible-test-vpc-{{ tiny_prefix }}
+vpc_cidr: 10.{{ 256 | random(seed=tiny_prefix) }}.0.0/16
+subnets:
+- {cidr: '10.{{ 256 | random(seed=tiny_prefix) }}.1.0/24', zone: '{{ aws_region }}a'}
+- {cidr: '10.{{ 256 | random(seed=tiny_prefix) }}.2.0/24', zone: '{{ aws_region }}b'}
+- {cidr: '10.{{ 256 | random(seed=tiny_prefix) }}.3.0/24', zone: '{{ aws_region }}c'}
+- {cidr: '10.{{ 256 | random(seed=tiny_prefix) }}.4.0/24', zone: '{{ aws_region }}d'}
+
+security_groups:
+- '{{ tiny_prefix }}-sg-1'
+- '{{ tiny_prefix }}-sg-2'
+- '{{ tiny_prefix }}-sg-3'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/meta/main.yml
new file mode 100644
index 000000000..73b314ff7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/meta/main.yml
@@ -0,0 +1 @@
+--- \ No newline at end of file
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/main.yml
new file mode 100644
index 000000000..55f8a551e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/main.yml
@@ -0,0 +1,10 @@
+- name: rds_cluster integration tests
+ module_defaults:
+ group/aws:
+ region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+
+ block:
+ - include: ./test_{{ inventory_hostname }}.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create.yml
new file mode 100644
index 000000000..54b3143ff
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create.yml
@@ -0,0 +1,123 @@
+- block:
+ - name: Ensure the resource doesn't exist
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: absent
+ engine: '{{ engine}}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ skip_final_snapshot: true
+ register: _result_delete_db_cluster
+
+ - assert:
+ that:
+ - not _result_delete_db_cluster.changed
+ ignore_errors: yes
+
+ - name: Get info of all existing clusters
+ rds_cluster_info:
+ register: _result_cluster_info
+
+ - assert:
+ that:
+ - _result_cluster_info is successful
+
+ - name: Create minimal aurora cluster in default VPC and default subnet group (CHECK
+ MODE)
+ rds_cluster:
+ engine: '{{ engine }}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ cluster_id: '{{ cluster_id }}'
+ tags: '{{ tags_create }}'
+ register: _result_create_db_cluster
+ check_mode: true
+
+ - assert:
+ that:
+ - _result_create_db_cluster.changed
+
+ - name: Create minimal aurora cluster in default VPC and default subnet group
+ rds_cluster:
+ engine: '{{ engine }}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ cluster_id: '{{ cluster_id }}'
+ tags: '{{ tags_create }}'
+ register: _result_create_db_cluster
+
+ - assert:
+ that:
+ - _result_create_db_cluster.changed
+ - "'allocated_storage' in _result_create_db_cluster"
+ - _result_create_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_create_db_cluster"
+ - _result_create_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_create_db_cluster"
+ - "'db_cluster_identifier' in _result_create_db_cluster"
+ - _result_create_db_cluster.db_cluster_identifier == "{{ cluster_id }}"
+ - "'db_cluster_parameter_group' in _result_create_db_cluster"
+ - "'db_cluster_resource_id' in _result_create_db_cluster"
+ - "'endpoint' in _result_create_db_cluster"
+ - "'engine' in _result_create_db_cluster"
+ - _result_create_db_cluster.engine == "{{ engine }}"
+ - "'engine_mode' in _result_create_db_cluster"
+ - _result_create_db_cluster.engine_mode == "serverless"
+ - "'engine_version' in _result_create_db_cluster"
+ - "'master_username' in _result_create_db_cluster"
+ - _result_create_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_create_db_cluster"
+ - _result_create_db_cluster.port == {{ port }}
+ - "'status' in _result_create_db_cluster"
+ - _result_create_db_cluster.status == 'available'
+ - _result_create_db_cluster.storage_encrypted == true
+ - "'tags' in _result_create_db_cluster"
+ - _result_create_db_cluster.tags | length == 2
+ - _result_create_db_cluster.tags["Created_By"] == "{{ tags_create["Created_By"]}}"
+ - _result_create_db_cluster.tags["Name"] == "{{ tags_create["Name"]}}"
+ - "'vpc_security_groups' in _result_create_db_cluster"
+ - name: Get info of the existing cluster
+ rds_cluster_info:
+ cluster_id: '{{ cluster_id }}'
+ register: result_cluster_info
+
+ - assert:
+ that:
+ - result_cluster_info is successful
+
+ - name: Create minimal aurora cluster in default VPC and default subnet group -
+ idempotence (CHECK MODE)
+ rds_cluster:
+ engine: '{{ engine }}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ cluster_id: '{{ cluster_id }}'
+ tags: '{{ tags_create }}'
+ register: _result_create_db_cluster
+ check_mode: true
+
+ - assert:
+ that:
+ - not _result_create_db_cluster.changed
+
+ - name: Create minimal aurora cluster in default VPC and default subnet group -
+ idempotence
+ rds_cluster:
+ engine: '{{ engine }}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ cluster_id: '{{ cluster_id }}'
+ tags: '{{ tags_create }}'
+ register: _result_create_db_cluster
+
+ - assert:
+ that:
+ - not _result_create_db_cluster.changed
+
+ always:
+ - name: Delete DB cluster without creating a final snapshot
+ rds_cluster:
+ state: absent
+ cluster_id: '{{ cluster_id }}'
+ skip_final_snapshot: true
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create_sgs.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create_sgs.yml
new file mode 100644
index 000000000..99362ee07
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create_sgs.yml
@@ -0,0 +1,208 @@
+- block:
+ - name: Ensure the resource doesn't exist
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: absent
+ engine: '{{ engine}}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ skip_final_snapshot: true
+ register: _result_delete_db_cluster
+
+ - assert:
+ that:
+ - not _result_delete_db_cluster.changed
+ ignore_errors: yes
+
+ - name: Create a VPC
+ ec2_vpc_net:
+ name: '{{ vpc_name }}'
+ state: present
+ cidr_block: '{{ vpc_cidr }}'
+ tags:
+ Name: '{{ vpc_name }}'
+ Description: Created by rds_cluster integration tests
+ register: _result_create_vpc
+
+ - name: Create subnets
+ ec2_vpc_subnet:
+ cidr: '{{ item.cidr }}'
+ az: '{{ item.zone }}'
+ vpc_id: '{{ _result_create_vpc.vpc.id }}'
+ tags:
+ Name: '{{ resource_prefix }}-subnet'
+ Description: created by rds_cluster integration tests
+ state: present
+ register: _result_create_subnet
+ loop: '{{ subnets }}'
+
+ - name: Create security groups
+ ec2_group:
+ name: '{{ item }}'
+ description: Created by rds_cluster integration tests
+ state: present
+ register: _result_create_sg
+ loop: '{{ security_groups }}'
+
+ - name: Create an RDS cluster in the VPC with two security groups
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ engine: '{{ engine }}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ vpc_security_group_ids:
+ - '{{ _result_create_sg.results.0.group_id }}'
+ - '{{ _result_create_sg.results.1.group_id }}'
+ register: _result_create_db_cluster
+
+ - assert:
+ that:
+ - _result_create_db_cluster.changed
+ - "'allocated_storage' in _result_create_db_cluster"
+ - _result_create_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_create_db_cluster"
+ - _result_create_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_create_db_cluster"
+ - "'db_cluster_identifier' in _result_create_db_cluster"
+ - _result_create_db_cluster.db_cluster_identifier == "{{ cluster_id }}"
+ - "'db_cluster_parameter_group' in _result_create_db_cluster"
+ - "'db_cluster_resource_id' in _result_create_db_cluster"
+ - "'endpoint' in _result_create_db_cluster"
+ - "'engine' in _result_create_db_cluster"
+ - _result_create_db_cluster.engine == "{{ engine }}"
+ - "'engine_mode' in _result_create_db_cluster"
+ - _result_create_db_cluster.engine_mode == "serverless"
+ - "'engine_version' in _result_create_db_cluster"
+ - "'master_username' in _result_create_db_cluster"
+ - _result_create_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_create_db_cluster"
+ - _result_create_db_cluster.port == {{ port }}
+ - "'status' in _result_create_db_cluster"
+ - _result_create_db_cluster.status == 'available'
+ - _result_create_db_cluster.storage_encrypted == true
+ - "'tags' in _result_create_db_cluster"
+ - "'vpc_security_groups' in _result_create_db_cluster"
+ - _result_create_db_cluster.vpc_security_groups | selectattr('status', 'in',
+ ['active', 'adding']) | list | length == 2
+
+ - name: Add a new security group without purge (check_mode)
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: present
+ vpc_security_group_ids:
+ - '{{ _result_create_sg.results.2.group_id }}'
+ apply_immediately: true
+ purge_security_groups: false
+ check_mode: true
+ register: _result_create_db_cluster
+
+ - assert:
+ that:
+ - _result_create_db_cluster.changed
+
+ - name: Add a new security group without purge
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: present
+ vpc_security_group_ids:
+ - '{{ _result_create_sg.results.2.group_id }}'
+ apply_immediately: true
+ purge_security_groups: false
+ register: _result_create_db_cluster
+
+ - assert:
+ that:
+ - _result_create_db_cluster.changed
+ - "'allocated_storage' in _result_create_db_cluster"
+ - _result_create_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_create_db_cluster"
+ - _result_create_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_create_db_cluster"
+ - "'db_cluster_identifier' in _result_create_db_cluster"
+ - _result_create_db_cluster.db_cluster_identifier == "{{ cluster_id }}"
+ - "'db_cluster_parameter_group' in _result_create_db_cluster"
+ - "'db_cluster_resource_id' in _result_create_db_cluster"
+ - "'endpoint' in _result_create_db_cluster"
+ - "'engine' in _result_create_db_cluster"
+ - _result_create_db_cluster.engine == "{{ engine }}"
+ - "'engine_mode' in _result_create_db_cluster"
+ - _result_create_db_cluster.engine_mode == "serverless"
+ - "'engine_version' in _result_create_db_cluster"
+ - "'master_username' in _result_create_db_cluster"
+ - _result_create_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_create_db_cluster"
+ - _result_create_db_cluster.port == {{ port }}
+ - "'status' in _result_create_db_cluster"
+ - _result_create_db_cluster.status == 'available'
+ - _result_create_db_cluster.storage_encrypted == true
+ - "'tags' in _result_create_db_cluster"
+ - "'vpc_security_groups' in _result_create_db_cluster"
+ - _result_create_db_cluster.vpc_security_groups | selectattr('status', 'in',
+ ['active', 'adding']) | list | length == 3
+
+ - name: Add a new security group without purge (test idempotence)
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: present
+ vpc_security_group_ids:
+ - '{{ _result_create_sg.results.2.group_id }}'
+ apply_immediately: true
+ purge_security_groups: false
+ register: _result_create_db_cluster
+
+ - assert:
+ that:
+ - not _result_create_db_cluster.changed
+
+ - name: Add a security group with purge
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: present
+ vpc_security_group_ids:
+ - '{{ _result_create_sg .results.2.group_id }}'
+ apply_immediately: true
+ register: _result_create_db_cluster
+
+ - assert:
+ that:
+ - _result_create_db_cluster.changed
+ - _result_create_db_cluster.db_cluster_identifier == '{{ cluster_id }}'
+ - _result_create_db_cluster.vpc_security_groups | selectattr('status', 'in',
+ ['active', 'adding']) | list | length == 1
+
+ always:
+ - name: Delete DB cluster without creating a final snapshot
+ rds_cluster:
+ state: absent
+ cluster_id: '{{ cluster_id }}'
+ skip_final_snapshot: true
+ ignore_errors: true
+
+ - name: Remove security groups
+ ec2_group:
+ name: '{{ item }}'
+ description: created by rds_cluster integration tests
+ state: absent
+ loop: '{{ security_groups }}'
+
+ - name: Remove subnets
+ ec2_vpc_subnet:
+ cidr: '{{ item.cidr }}'
+ az: '{{ item.zone }}'
+ vpc_id: '{{ _result_create_vpc.vpc.id }}'
+ tags:
+ Name: '{{ resource_prefix }}-subnet'
+ Description: Created by rds_cluster integration tests
+ state: absent
+ ignore_errors: yes
+ loop: '{{ subnets }}'
+
+ - name: Delete VPC
+ ec2_vpc_net:
+ name: '{{ vpc_name }}'
+ state: absent
+ cidr_block: '{{ vpc_cidr }}'
+ tags:
+ Name: '{{ vpc_name }}'
+ Description: Created by rds_cluster integration tests
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_modify.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_modify.yml
new file mode 100644
index 000000000..f72357ddc
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_modify.yml
@@ -0,0 +1,270 @@
+- block:
+ - name: Ensure the resource doesn't exist
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: absent
+ engine: '{{ engine }}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ skip_final_snapshot: true
+ register: _result_delete_db_cluster
+
+ - assert:
+ that:
+ - not _result_delete_db_cluster.changed
+ ignore_errors: yes
+
+ # Follow up to Aurora Serverless V2 release, we use an aurora-mysql to
+ # avoid the following error when we try to adjust the port:
+ # You currently can't modify EndpointPort with Aurora Serverless.
+ - name: Create an Aurora-MySQL DB cluster
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: present
+ engine: aurora-mysql
+ engine_mode: provisioned
+ username: '{{ username }}'
+ password: '{{ password }}'
+ register: _result_create_source_db_cluster
+
+ - assert:
+ that:
+ - _result_create_source_db_cluster.changed
+ - _result_create_source_db_cluster.changed
+ - "'allocated_storage' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.db_cluster_identifier == '{{ cluster_id }}'
+ - "'db_cluster_parameter_group' in _result_create_source_db_cluster"
+ - "'db_cluster_resource_id' in _result_create_source_db_cluster"
+ - "'endpoint' in _result_create_source_db_cluster"
+ - "'engine' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.engine == "aurora-mysql"
+ - "'engine_mode' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.engine_mode == "provisioned"
+ - "'engine_version' in _result_create_source_db_cluster"
+ - "'master_username' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.port == {{ port }}
+ - "'status' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.status == "available"
+ - "'tags' in _result_create_source_db_cluster"
+ - "'vpc_security_groups' in _result_create_source_db_cluster"
+
+ - name: Modify DB cluster password
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: present
+ password: '{{ new_password }}'
+ force_update_password: true
+ apply_immediately: true
+ register: _result_modify_password
+
+ - assert:
+ that:
+ - _result_modify_password.changed
+ - "'allocated_storage' in _result_modify_password"
+ - _result_modify_password.allocated_storage == 1
+ - "'cluster_create_time' in _result_modify_password"
+ - _result_modify_password.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_modify_password"
+ - _result_modify_password.db_cluster_identifier == '{{ cluster_id }}'
+ - "'db_cluster_parameter_group' in _result_modify_password"
+ - "'db_cluster_resource_id' in _result_modify_password"
+ - "'endpoint' in _result_modify_password"
+ - "'engine' in _result_modify_password"
+ - _result_modify_password.engine == "aurora-mysql"
+ - "'engine_mode' in _result_modify_password"
+ - _result_modify_password.engine_mode == "provisioned"
+ - "'engine_version' in _result_modify_password"
+ - "'master_username' in _result_modify_password"
+ - _result_modify_password.master_username == "{{ username }}"
+ - "'port' in _result_create_source_db_cluster"
+ - _result_modify_password.port == {{ port }}
+ - "'status' in _result_modify_password"
+ - _result_modify_password.status == "available"
+ - "'tags' in _result_modify_password"
+ - "'vpc_security_groups' in _result_modify_password"
+
+ - name: Modify DB cluster port
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: present
+ port: '{{ new_port }}'
+ register: _result_modify_port
+
+ - assert:
+ that:
+ - _result_modify_port.changed
+ - "'allocated_storage' in _result_modify_port"
+ - _result_modify_port.allocated_storage == 1
+ - "'cluster_create_time' in _result_modify_port"
+ - _result_modify_port.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_modify_port"
+ - _result_modify_port.db_cluster_identifier == '{{ cluster_id }}'
+ - "'db_cluster_parameter_group' in _result_modify_port"
+ - "'db_cluster_resource_id' in _result_modify_port"
+ - "'endpoint' in _result_modify_port"
+ - "'engine' in _result_modify_port"
+ - _result_modify_port.engine == "aurora-mysql"
+ - "'engine_mode' in _result_modify_port"
+ - _result_modify_port.engine_mode == "provisioned"
+ - "'engine_version' in _result_modify_port"
+ - "'master_username' in _result_modify_port"
+ - _result_modify_port.master_username == "{{ username }}"
+ - "'port' in _result_modify_port"
+ - _result_modify_port.port == {{ new_port }}
+ - "'status' in _result_modify_port"
+ - _result_modify_port.status == "available"
+ - "'tags' in _result_modify_port"
+ - "'vpc_security_groups' in _result_modify_port"
+
+ - name: Modify DB cluster identifier
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: present
+ purge_tags: false
+ new_cluster_id: '{{ new_cluster_id }}'
+ apply_immediately: true
+ register: _result_modify_id
+
+ - assert:
+ that:
+ - _result_modify_id.changed
+ - "'allocated_storage' in _result_modify_id"
+ - _result_modify_id.allocated_storage == 1
+ - "'cluster_create_time' in _result_modify_id"
+ - _result_modify_id.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_modify_id"
+ - _result_modify_id.db_cluster_identifier == '{{ new_cluster_id }}'
+ - "'db_cluster_parameter_group' in _result_modify_id"
+ - "'db_cluster_resource_id' in _result_modify_id"
+ - "'endpoint' in _result_modify_id"
+ - "'engine' in _result_modify_id"
+ - _result_modify_id.engine == "aurora-mysql"
+ - "'engine_mode' in _result_modify_id"
+ - _result_modify_id.engine_mode == "provisioned"
+ - "'engine_version' in _result_modify_id"
+ - "'master_username' in _result_modify_id"
+ - _result_modify_id.master_username == "{{ username }}"
+ - "'port' in _result_modify_id"
+ - _result_modify_id.port == {{ new_port }}
+ - "'status' in _result_modify_id"
+ - _result_modify_id.status == "available"
+ - "'tags' in _result_modify_id"
+ - "'vpc_security_groups' in _result_modify_id"
+
+ - name: Check if DB cluster parameter group exists
+ command: aws rds describe-db-cluster-parameter-groups --db-cluster-parameter-group-name
+ {{ new_db_parameter_group_name }}
+ environment:
+ AWS_ACCESS_KEY_ID: '{{ aws_access_key }}'
+ AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}'
+ AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
+ AWS_DEFAULT_REGION: '{{ aws_region }}'
+ register: _result_check_db_parameter_group
+ ignore_errors: true
+ changed_when: _result_check_db_parameter_group.rc == 0
+
+ - name: Create DB cluster parameter group if not exists
+ command: aws rds create-db-cluster-parameter-group --db-cluster-parameter-group-name
+ {{ new_db_parameter_group_name }} --db-parameter-group-family aurora-mysql5.7 --description
+ "Test DB cluster parameter group"
+ environment:
+ AWS_ACCESS_KEY_ID: '{{ aws_access_key }}'
+ AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}'
+ AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
+ AWS_DEFAULT_REGION: '{{ aws_region }}'
+ register: _result_create_db_parameter_group
+ when: _result_check_db_parameter_group.rc != 0
+
+ - name: Modify DB cluster parameter group
+ rds_cluster:
+ id: '{{ new_cluster_id }}'
+ state: present
+ db_cluster_parameter_group_name: '{{ new_db_parameter_group_name }}'
+ apply_immediately: true
+ register: _result_modify_db_parameter_group_name
+
+ - assert:
+ that:
+ - _result_modify_db_parameter_group_name.changed
+ - "'allocated_storage' in _result_modify_db_parameter_group_name"
+ - _result_modify_db_parameter_group_name.allocated_storage == 1
+ - "'cluster_create_time' in _result_modify_db_parameter_group_name"
+ - _result_modify_db_parameter_group_name.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_modify_db_parameter_group_name"
+ - _result_modify_db_parameter_group_name.db_cluster_identifier == '{{ new_cluster_id
+ }}'
+ - "'db_cluster_parameter_group' in _result_modify_db_parameter_group_name"
+ - "'db_cluster_resource_id' in _result_modify_db_parameter_group_name"
+ - "'endpoint' in _result_modify_db_parameter_group_name"
+ - "'engine' in _result_modify_db_parameter_group_name"
+ - _result_modify_db_parameter_group_name.engine == "aurora-mysql"
+ - "'engine_mode' in _result_modify_db_parameter_group_name"
+ - _result_modify_db_parameter_group_name.engine_mode == "provisioned"
+ - "'engine_version' in _result_modify_db_parameter_group_name"
+ - "'master_username' in _result_modify_db_parameter_group_name"
+ - _result_modify_db_parameter_group_name.master_username == "{{ username }}"
+ - "'port' in _result_modify_db_parameter_group_name"
+ - _result_modify_db_parameter_group_name.db_cluster_parameter_group == "{{ new_db_parameter_group_name
+ }}"
+ - "'status' in _result_modify_db_parameter_group_name"
+ - _result_modify_db_parameter_group_name.status == "available"
+ - "'tags' in _result_modify_db_parameter_group_name"
+ - "'vpc_security_groups' in _result_modify_db_parameter_group_name"
+
+ - name: Delete DB cluster without creating a final snapshot (CHECK MODE)
+ rds_cluster:
+ state: absent
+ cluster_id: '{{ new_cluster_id }}'
+ skip_final_snapshot: true
+ register: _result_delete_cluster
+ check_mode: true
+
+ - assert:
+ that:
+ - _result_delete_cluster.changed
+
+ - name: Delete DB cluster without creating a final snapshot
+ rds_cluster:
+ state: absent
+ cluster_id: '{{ new_cluster_id }}'
+ skip_final_snapshot: true
+ register: _result_delete_cluster
+
+ - assert:
+ that:
+ - _result_delete_cluster.changed
+
+ - name: Delete DB cluster without creating a final snapshot (idempotence)
+ rds_cluster:
+ state: absent
+ cluster_id: '{{ new_cluster_id }}'
+ skip_final_snapshot: true
+ register: _result_delete_cluster
+
+ - assert:
+ that:
+ - not _result_delete_cluster.changed
+
+ always:
+ - name: Delete DB cluster without creating a final snapshot
+ rds_cluster:
+ state: absent
+ cluster_id: '{{ cluster_id }}'
+ skip_final_snapshot: true
+ ignore_errors: true
+
+ - name: Delete cluster parameter group
+ command: aws rds delete-db-cluster-parameter-group --db-cluster-parameter-group-name
+ {{ new_db_parameter_group_name }}
+ environment:
+ AWS_ACCESS_KEY_ID: '{{ aws_access_key }}'
+ AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}'
+ AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
+ AWS_DEFAULT_REGION: '{{ aws_region }}'
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_promote.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_promote.yml
new file mode 100644
index 000000000..8443063ad
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_promote.yml
@@ -0,0 +1,187 @@
+- block:
+ - name: Ensure the resource doesn't exist
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: absent
+ engine: '{{ engine}}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ skip_final_snapshot: true
+ register: _result_delete_db_cluster
+
+ - assert:
+ that:
+ - not _result_delete_db_cluster.changed
+ ignore_errors: yes
+
+ - name: Set the two regions for the source DB and the read replica
+ set_fact:
+ region_src: '{{ aws_region }}'
+ region_dest: '{{ aws_region }}'
+
+ - name: Create a source DB cluster
+ rds_cluster:
+ cluster_id: '{{ cluster_id }}'
+ state: present
+ engine: '{{ engine}}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ region: '{{ region_src }}'
+ tags:
+ Name: '{{ cluster_id }}'
+ Created_by: Ansible rds_cluster tests
+ register: _result_create_src_db_cluster
+
+ - assert:
+ that:
+ - _result_create_src_db_cluster.changed
+ - "'allocated_storage' in _result_create_src_db_cluster"
+ - _result_create_src_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_create_src_db_cluster"
+ - _result_create_src_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_create_src_db_cluster"
+ - _result_create_src_db_cluster.db_cluster_identifier == '{{ cluster_id }}'
+ - "'db_cluster_parameter_group' in _result_create_src_db_cluster"
+ - "'db_cluster_resource_id' in _result_create_src_db_cluster"
+ - "'endpoint' in _result_create_src_db_cluster"
+ - "'engine' in _result_create_src_db_cluster"
+ - _result_create_src_db_cluster.engine == "{{ engine }}"
+ - "'engine_mode' in _result_create_src_db_cluster"
+ - _result_create_src_db_cluster.engine_mode == "serverless"
+ - "'engine_version' in _result_create_src_db_cluster"
+ - "'master_username' in _result_create_src_db_cluster"
+ - _result_create_src_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_create_src_db_cluster"
+ - _result_create_src_db_cluster.port == {{ port }}
+ - "'status' in _result_create_src_db_cluster"
+ - _result_create_src_db_cluster.status == "available"
+ - "'tags' in _result_create_src_db_cluster"
+ - _result_create_src_db_cluster.tags | length == 2
+ - _result_create_src_db_cluster.tags.Name == '{{ cluster_id }}'
+ - _result_create_src_db_cluster.tags.Created_by == 'Ansible rds_cluster tests'
+ - "'vpc_security_groups' in _result_create_src_db_cluster"
+
+ - name: Get info on DB cluster
+ rds_cluster_info:
+ db_cluster_identifier: '{{ cluster_id }}'
+ register: _result_cluster_info
+
+ - assert:
+ that:
+ - _result_cluster_info is successful
+
+ - name: Set the ARN of the source DB cluster
+ set_fact:
+ src_db_cluster_arn: '{{ _result_cluster_info.clusters[0].db_cluster_arn}}'
+
+ - name: Create a DB cluster read replica in a different region
+ rds_cluster:
+ id: '{{ cluster_id }}-replica'
+ state: present
+ replication_source_identifier: '{{ src_db_cluster_arn }}'
+ engine: '{{ engine}}'
+ region: '{{ region_dest }}'
+ tags:
+ Name: '{{ cluster_id }}'
+ Created_by: Ansible rds_cluster tests
+ wait: yes
+ register: _result_create_replica_db_cluster
+
+ - assert:
+ that:
+ - _result_create_replica_db_cluster.changed
+ - "'allocated_storage' in _result_create_replica_db_cluster"
+ - _result_create_replica_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_create_replica_db_cluster"
+ - _result_create_replica_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_create_replica_db_cluster"
+ - _result_create_replica_db_cluster.db_cluster_identifier == '{{ cluster_id
+ }}'
+ - "'db_cluster_parameter_group' in _result_create_replica_db_cluster"
+ - "'db_cluster_resource_id' in _result_create_replica_db_cluster"
+ - "'endpoint' in _result_create_replica_db_cluster"
+ - "'engine' in _result_create_replica_db_cluster"
+ - _result_create_replica_db_cluster.engine == "{{ engine }}"
+ - "'engine_mode' in _result_create_replica_db_cluster"
+ - _result_create_replica_db_cluster.engine_mode == "serverless"
+ - "'engine_version' in _result_create_replica_db_cluster"
+ - "'master_username' in _result_create_replica_db_cluster"
+ - _result_create_replica_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_create_replica_db_cluster"
+ - _result_create_replica_db_cluster.port == {{ port }}
+ - "'status' in _result_create_replica_db_cluster"
+ - _result_create_replica_db_cluster.status == "available"
+ - "'tags' in _result_create_replica_db_cluster"
+ - _result_create_replica_db_cluster.tags | length == 2
+ - _result_create_replica_db_cluster.tags.Name == '{{ cluster_id }}'
+ - _result_create_replica_db_cluster.tags.Created_by == 'Ansible rds_cluster
+ tests'
+ - "'vpc_security_groups' in _result_create_replica_db_cluster"
+
+ - name: Test idempotence with a DB cluster read replica
+ rds_cluster:
+ id: '{{ cluster_id }}-replica'
+ state: present
+ replication_source_identifier: '{{ src_db_cluster_arn }}'
+ engine: '{{ engine}}'
+ region: '{{ region_dest }}'
+ tags:
+ Name: '{{ cluster_id }}'
+ Created_by: Ansible rds_cluster tests
+ register: _result_create_replica_db_cluster
+
+ - assert:
+ that:
+ - not _result_create_replica_db_cluster.changed
+
+ - name: Get info of existing DB cluster
+ rds_cluster_info:
+ db_cluster_identifier: '{{ cluster_id }}-replica'
+ region: '{{ region_dest }}'
+ register: _result_cluster_info
+
+ - assert:
+ that:
+ - _result_cluster_info is successful
+ # - _result_cluster_info.clusters | length == 0
+
+ - name: Promote the DB cluster read replica
+ rds_cluster:
+ cluster_id: '{{ cluster_id }}-replica'
+ state: present
+ promote: true
+ region: '{{ region_dest }}'
+ register: _result_promote_replica_db_cluster
+
+ - assert:
+ that:
+ - _result_promote_replica_db_cluster.changed
+
+ - name: Promote the DB cluster read replica (idempotence)
+ rds_cluster:
+ cluster_id: '{{ cluster_id }}-replica'
+ state: present
+ promote: true
+ region: '{{ region_dest }}'
+ register: _result_promote_replica_db_cluster
+
+ - assert:
+ that:
+ - not _result_promote_replica_db_cluster.changed
+
+ always:
+ - name: Remove the DB cluster
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: absent
+ skip_final_snapshot: true
+ region: '{{ region_src }}'
+ ignore_errors: yes
+
+ - name: Remove the DB cluster read replica
+ rds_cluster:
+ id: '{{ cluster_id }}-replica'
+ state: absent
+ skip_final_snapshot: true
+ region: '{{ region_dest }}'
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_restore.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_restore.yml
new file mode 100644
index 000000000..b991a457b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_restore.yml
@@ -0,0 +1,185 @@
+- block:
+ - name: Ensure the resource doesn't exist
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: absent
+ engine: '{{ engine}}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ skip_final_snapshot: true
+ register: _result_delete_db_cluster
+
+ - assert:
+ that:
+ - not _result_delete_db_cluster.changed
+ ignore_errors: true
+
+ - name: Create a source DB cluster
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: present
+ engine: '{{ engine}}'
+ backup_retention_period: 1
+ username: '{{ username }}'
+ password: '{{ password }}'
+ wait: true
+ register: _result_create_source_db_cluster
+
+ - assert:
+ that:
+ - _result_create_source_db_cluster.changed
+ - "'allocated_storage' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_create_source_db_cluster"
+ - "'db_cluster_identifier' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.db_cluster_identifier == "{{ cluster_id }}"
+ - "'db_cluster_parameter_group' in _result_create_source_db_cluster"
+ - "'db_cluster_resource_id' in _result_create_source_db_cluster"
+ - "'endpoint' in _result_create_source_db_cluster"
+ - "'engine' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.engine == "{{ engine }}"
+ - "'engine_mode' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.engine_mode == "serverless"
+ - "'engine_version' in _result_create_source_db_cluster"
+ - "'master_username' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.port == {{ port }}
+ - "'status' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.status == 'available'
+ - _result_create_source_db_cluster.storage_encrypted == true
+ - "'tags' in _result_create_source_db_cluster"
+ - "'vpc_security_groups' in _result_create_source_db_cluster"
+
+ - name: Create a point in time DB cluster
+ rds_cluster:
+ state: present
+ id: '{{ cluster_id }}-point-in-time'
+ source_db_cluster_identifier: '{{ cluster_id }}'
+ creation_source: cluster
+ engine: '{{ engine}}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ use_latest_restorable_time: true
+ tags:
+ Name: '{{ cluster_id }}'
+ Created_by: Ansible rds_cluster tests
+ register: _result_restored_db_cluster
+
+ - assert:
+ that:
+ - _result_restored_db_cluster.changed
+ - "'allocated_storage' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.db_cluster_identifier == '{{ cluster_id }}-point-in-time'
+ - "'db_cluster_parameter_group' in _result_restored_db_cluster"
+ - "'db_cluster_resource_id' in _result_restored_db_cluster"
+ - "'endpoint' in _result_restored_db_cluster"
+ - "'engine' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.engine == "{{ engine }}"
+ - "'engine_mode' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.engine_mode == "serverless"
+ - "'engine_version' in _result_restored_db_cluster"
+ - "'master_username' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.port == {{ port }}
+ - "'status' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.status == "available"
+ - "'tags' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.tags | length == 2
+ - _result_restored_db_cluster.tags.Name == '{{ cluster_id }}'
+ - _result_restored_db_cluster.tags.Created_by == 'Ansible rds_cluster tests'
+ - "'vpc_security_groups' in _result_restored_db_cluster"
+
+ - name: Create a point in time DB cluster (idempotence)
+ rds_cluster:
+ state: present
+ id: '{{ cluster_id }}-point-in-time'
+ source_db_cluster_identifier: '{{ cluster_id }}'
+ creation_source: cluster
+ engine: '{{ engine}}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ restore_to_time: '{{ _result_restored_db_cluster.latest_restorable_time }}'
+ tags:
+ Name: '{{ cluster_id }}'
+ Created_by: Ansible rds_cluster tests
+ register: _result_restored_db_cluster
+
+ - assert:
+ that:
+ - not _result_restored_db_cluster.changed
+
+ - name: Take a snapshot of the DB cluster
+ rds_cluster_snapshot:
+ state: present
+ db_cluster_identifier: '{{ cluster_id }}'
+ db_cluster_snapshot_identifier: '{{ cluster_id }}-snapshot'
+ wait: true
+ register: _result_cluster_snapshot
+
+ - assert:
+ that:
+ - _result_cluster_snapshot.changed
+
+ - name: Restore DB cluster from source (snapshot)
+ rds_cluster:
+ creation_source: snapshot
+ engine: '{{ engine }}'
+ cluster_id: '{{ cluster_id }}-restored-snapshot'
+ snapshot_identifier: '{{ cluster_id }}-snapshot'
+ wait: true
+ register: _result_restored_db_cluster
+
+ - assert:
+ that:
+ - _result_restored_db_cluster.changed
+ - "'allocated_storage' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.db_cluster_identifier == '{{ cluster_id }}-restored-snapshot'
+ - "'db_cluster_parameter_group' in _result_restored_db_cluster"
+ - "'db_cluster_resource_id' in _result_restored_db_cluster"
+ - "'endpoint' in _result_restored_db_cluster"
+ - "'engine' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.engine == "{{ engine }}"
+ - "'engine_mode' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.engine_mode == "serverless"
+ - "'engine_version' in _result_restored_db_cluster"
+ - "'master_username' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.port == {{ port }}
+ - "'status' in _result_restored_db_cluster"
+ - _result_restored_db_cluster.status == "available"
+ - "'tags' in _result_restored_db_cluster"
+ - "'vpc_security_groups' in _result_restored_db_cluster"
+
+ # TODO: export a snapshot to an S3 bucket and restore cluster from it
+ # Requires rds_export_task module
+ always:
+ - name: Delete the snapshot
+ rds_cluster_snapshot:
+ db_cluster_snapshot_identifier: '{{ cluster_id }}-snapshot'
+ state: absent
+ register: _result_delete_snapshot
+ ignore_errors: true
+
+ - name: Delete DB cluster without creating a final snapshot
+ rds_cluster:
+ state: absent
+ cluster_id: '{{ item }}'
+ skip_final_snapshot: true
+ ignore_errors: true
+ loop:
+ - '{{ cluster_id }}'
+ - '{{ cluster_id }}-point-in-time'
+ - '{{ cluster_id }}-restored-snapshot'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_tag.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_tag.yml
new file mode 100644
index 000000000..be0fa3ee3
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_tag.yml
@@ -0,0 +1,290 @@
+- block:
+ - name: Ensure the resource doesn't exist
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: absent
+ engine: '{{ engine}}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ skip_final_snapshot: true
+ register: _result_delete_db_cluster
+
+ - assert:
+ that:
+ - not _result_delete_db_cluster.changed
+ ignore_errors: yes
+
+ - name: Create a DB cluster
+ rds_cluster:
+ engine: '{{ engine }}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ cluster_id: '{{ cluster_id }}'
+ tags: '{{ tags_create }}'
+ register: _result_create_db_cluster
+
+ - assert:
+ that:
+ - _result_create_db_cluster.changed
+ - "'allocated_storage' in _result_create_db_cluster"
+ - _result_create_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_create_db_cluster"
+ - _result_create_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_create_db_cluster"
+ - "'db_cluster_identifier' in _result_create_db_cluster"
+ - _result_create_db_cluster.db_cluster_identifier == "{{ cluster_id }}"
+ - "'db_cluster_parameter_group' in _result_create_db_cluster"
+ - "'db_cluster_resource_id' in _result_create_db_cluster"
+ - "'endpoint' in _result_create_db_cluster"
+ - "'engine' in _result_create_db_cluster"
+ - _result_create_db_cluster.engine == "{{ engine }}"
+ - "'engine_mode' in _result_create_db_cluster"
+ - _result_create_db_cluster.engine_mode == "serverless"
+ - "'engine_version' in _result_create_db_cluster"
+ - "'master_username' in _result_create_db_cluster"
+ - _result_create_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_create_db_cluster"
+ - _result_create_db_cluster.port == {{ port }}
+ - "'status' in _result_create_db_cluster"
+ - _result_create_db_cluster.status == 'available'
+ - _result_create_db_cluster.storage_encrypted == true
+ - "'tags' in _result_create_db_cluster"
+ - _result_create_db_cluster.tags | length == 2
+ - _result_create_db_cluster.tags["Created_By"] == "{{ tags_create["Created_By"]
+ }}"
+ - _result_create_db_cluster.tags["Name"] == "{{ tags_create["Name"]}}"
+ - "'vpc_security_groups' in _result_create_db_cluster"
+
+ - name: Test tags are not purged if purge_tags is False
+ rds_cluster:
+ engine: '{{ engine }}'
+ username: '{{ username }}'
+ password: '{{ new_password }}'
+ cluster_id: '{{ cluster_id }}'
+ tags: {}
+ purge_tags: false
+ register: _result_tag_db_cluster
+
+ - assert:
+ that:
+ - not _result_tag_db_cluster.changed
+ - "'allocated_storage' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_tag_db_cluster"
+ - "'db_cluster_identifier' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.db_cluster_identifier == "{{ cluster_id }}"
+ - "'db_cluster_parameter_group' in _result_tag_db_cluster"
+ - "'db_cluster_resource_id' in _result_tag_db_cluster"
+ - "'endpoint' in _result_tag_db_cluster"
+ - "'engine' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.engine == "{{ engine }}"
+ - "'engine_mode' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.engine_mode == "serverless"
+ - "'engine_version' in _result_tag_db_cluster"
+ - "'master_username' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.port == {{ port }}
+ - "'status' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.status == 'available'
+ - _result_tag_db_cluster.storage_encrypted == true
+ - "'tags' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.tags | length == 2
+ - _result_tag_db_cluster.tags["Created_By"] == "{{ tags_create["Created_By"]
+ }}"
+ - _result_tag_db_cluster.tags["Name"] == "{{ tags_create["Name"] }}"
+ - "'vpc_security_groups' in _result_tag_db_cluster"
+
+ - name: Add a tag and remove a tag (purge_tags is True)
+ rds_cluster:
+ cluster_id: '{{ cluster_id }}'
+ state: present
+ tags: '{{ tags_patch }}'
+ register: _result_tag_db_cluster
+
+ - assert:
+ that:
+ - _result_tag_db_cluster.changed
+ - "'allocated_storage' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_tag_db_cluster"
+ - "'db_cluster_identifier' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.db_cluster_identifier == "{{ cluster_id }}"
+ - "'db_cluster_parameter_group' in _result_tag_db_cluster"
+ - "'db_cluster_resource_id' in _result_tag_db_cluster"
+ - "'endpoint' in _result_tag_db_cluster"
+ - "'engine' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.engine == "{{ engine }}"
+ - "'engine_mode' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.engine_mode == "serverless"
+ - "'engine_version' in _result_tag_db_cluster"
+ - "'master_username' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.port == {{ port }}
+ - "'status' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.status == 'available'
+ - _result_tag_db_cluster.storage_encrypted == true
+ - "'tags' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.tags | length == 2
+ - _result_tag_db_cluster.tags["Name"] == "{{ tags_patch['Name'] }}"
+ - "'vpc_security_groups' in _result_tag_db_cluster"
+
+ - name: Purge a tag from the cluster (CHECK MODE)
+ rds_cluster:
+ engine: '{{ engine }}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ cluster_id: '{{ cluster_id }}'
+ tags:
+ Created_By: Ansible_rds_cluster_integration_test
+ register: _result_tag_db_cluster
+ check_mode: true
+
+ - assert:
+ that:
+ - _result_tag_db_cluster.changed
+
+ - name: Purge a tag from the cluster
+ rds_cluster:
+ engine: '{{ engine }}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ cluster_id: '{{ cluster_id }}'
+ tags:
+ Created_By: Ansible_rds_cluster_integration_test
+ register: _result_tag_db_cluster
+
+ - assert:
+ that:
+ - _result_tag_db_cluster.changed
+ - "'allocated_storage' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_tag_db_cluster"
+ - "'db_cluster_identifier' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.db_cluster_identifier == "{{ cluster_id }}"
+ - "'db_cluster_parameter_group' in _result_tag_db_cluster"
+ - "'db_cluster_resource_id' in _result_tag_db_cluster"
+ - "'endpoint' in _result_tag_db_cluster"
+ - "'engine' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.engine == "{{ engine }}"
+ - "'engine_mode' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.engine_mode == "serverless"
+ - "'engine_version' in _result_tag_db_cluster"
+ - "'master_username' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.port == {{ port }}
+ - "'status' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.status == 'available'
+ - _result_tag_db_cluster.storage_encrypted == true
+ - "'tags' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.tags | length == 1
+ - _result_tag_db_cluster.tags["Created_By"] == "Ansible_rds_cluster_integration_test"
+ - "'vpc_security_groups' in _result_tag_db_cluster"
+
+ - name: Add a tag to the cluster (CHECK MODE)
+ rds_cluster:
+ engine: '{{ engine }}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ cluster_id: '{{ cluster_id }}'
+ tags:
+ Name: cluster-{{ resource_prefix }}
+ Created_By: Ansible_rds_cluster_integration_test
+ register: _result_tag_db_cluster
+ check_mode: true
+
+ - assert:
+ that:
+ - _result_tag_db_cluster.changed
+
+ - name: Add a tag to the cluster
+ rds_cluster:
+ engine: '{{ engine }}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ cluster_id: '{{ cluster_id }}'
+ tags: '{{ tags_create }}'
+ register: _result_tag_db_cluster
+
+ - assert:
+ that:
+ - _result_tag_db_cluster.changed
+ - "'allocated_storage' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_tag_db_cluster"
+ - "'db_cluster_identifier' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.db_cluster_identifier == "{{ cluster_id }}"
+ - "'db_cluster_parameter_group' in _result_tag_db_cluster"
+ - "'db_cluster_resource_id' in _result_tag_db_cluster"
+ - "'endpoint' in _result_tag_db_cluster"
+ - "'engine' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.engine == "{{ engine }}"
+ - "'engine_mode' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.engine_mode == "serverless"
+ - "'engine_version' in _result_tag_db_cluster"
+ - "'master_username' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.port == {{ port }}
+ - "'status' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.status == 'available'
+ - _result_tag_db_cluster.storage_encrypted == true
+ - "'tags' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.tags | length == 2
+ - _result_tag_db_cluster.tags["Created_By"] == "{{ tags_create["Created_By"]}}"
+ - _result_tag_db_cluster.tags["Name"] == "{{ tags_create["Name"]}}"
+ - "'vpc_security_groups' in _result_tag_db_cluster"
+ - name: Remove all tags
+ rds_cluster:
+ engine: '{{ engine }}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ cluster_id: '{{ cluster_id }}'
+ tags: {}
+ register: _result_tag_db_cluster
+
+ - assert:
+ that:
+ - _result_tag_db_cluster.changed
+ - "'allocated_storage' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_tag_db_cluster"
+ - "'db_cluster_identifier' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.db_cluster_identifier == "{{ cluster_id }}"
+ - "'db_cluster_parameter_group' in _result_tag_db_cluster"
+ - "'db_cluster_resource_id' in _result_tag_db_cluster"
+ - "'endpoint' in _result_tag_db_cluster"
+ - "'engine' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.engine == "{{ engine }}"
+ - "'engine_mode' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.engine_mode == "serverless"
+ - "'engine_version' in _result_tag_db_cluster"
+ - "'master_username' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.port == {{ port }}
+ - "'status' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.status == 'available'
+ - _result_tag_db_cluster.storage_encrypted == true
+ - "'tags' in _result_tag_db_cluster"
+ - _result_tag_db_cluster.tags | length == 0
+ - "'vpc_security_groups' in _result_tag_db_cluster"
+ always:
+ - name: Delete DB cluster without creating a final snapshot
+ rds_cluster:
+ state: absent
+ cluster_id: '{{ cluster_id }}'
+ skip_final_snapshot: true
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/vars/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/vars/main.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/runme.sh
new file mode 100755
index 000000000..21720b263
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/runme.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+#
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/rds_cluster/tasks/
+
+
+set -eux
+
+export ANSIBLE_ROLES_PATH=../
+
+ansible-playbook main.yml -i inventory "$@"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/aliases
new file mode 100644
index 000000000..dc15e8ab0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/aliases
@@ -0,0 +1,7 @@
+cloud/aws
+
+# It takes >20min to spawn the mutlti az cluster
+disabled
+
+rds_cluster
+rds_cluster_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/defaults/main.yml
new file mode 100644
index 000000000..f666a2d77
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/defaults/main.yml
@@ -0,0 +1,7 @@
+# Create cluster
+cluster_id: ansible-test-{{ tiny_prefix }}
+username: testrdsusername
+password: "{{ lookup('password', 'dev/null length=12 chars=ascii_letters,digits') }}"
+tags_create:
+ Name: ansible-test-cluster-{{ tiny_prefix }}
+ Created_By: Ansible_rds_cluster_integration_test
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/meta/main.yml
new file mode 100644
index 000000000..39e88928a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/meta/main.yml
@@ -0,0 +1,5 @@
+---
+dependencies:
+ - role: setup_botocore_pip
+ vars:
+ botocore_version: "1.23.44"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/tasks/main.yml
new file mode 100644
index 000000000..911eb60de
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/tasks/main.yml
@@ -0,0 +1,79 @@
+---
+- module_defaults:
+ group/aws:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ collections:
+ - amazon.aws
+
+ block:
+ - name: Ensure the resource doesn't exist
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: absent
+ engine: 'mysql'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ skip_final_snapshot: true
+ register: _result_delete_db_cluster
+
+ - assert:
+ that:
+ - not _result_delete_db_cluster.changed
+ ignore_errors: true
+
+ - name: Create a source DB cluster (CHECK_MODE)
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: present
+ engine: 'mysql'
+ engine_version: 8.0.28
+ allocated_storage: 100
+ iops: 5000
+ db_cluster_instance_class: db.r6gd.xlarge
+ username: '{{ username }}'
+ password: '{{ password }}'
+ wait: true
+ tags: '{{ tags_create }}'
+ register: _result_create_source_db_cluster
+ check_mode: True
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - assert:
+ that:
+ - _result_create_source_db_cluster.changed
+
+ - name: Create a source DB cluster
+ rds_cluster:
+ id: '{{ cluster_id }}'
+ state: present
+ engine: 'mysql'
+ engine_version: 8.0.28
+ allocated_storage: 100
+ iops: 5000
+ db_cluster_instance_class: db.r6gd.xlarge
+ username: '{{ username }}'
+ password: '{{ password }}'
+ wait: true
+ tags: '{{ tags_create }}'
+ register: _result_create_source_db_cluster
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - assert:
+ that:
+ - _result_create_source_db_cluster.changed
+
+ always:
+
+ - name: Delete DB cluster without creating a final snapshot
+ rds_cluster:
+ state: absent
+ cluster_id: '{{ item }}'
+ skip_final_snapshot: true
+ ignore_errors: true
+ loop:
+ - '{{ cluster_id }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/aliases
new file mode 100644
index 000000000..7f2c75f26
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/aliases
@@ -0,0 +1,5 @@
+time=10m
+
+cloud/aws
+
+rds_snapshot_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/defaults/main.yml
new file mode 100644
index 000000000..268ab154f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/defaults/main.yml
@@ -0,0 +1,13 @@
+---
+# defaults file for rds_cluster_snapshot
+_resource_prefix: 'ansible-test-{{ tiny_prefix }}'
+
+# Create RDS cluster
+cluster_id: '{{ _resource_prefix }}-rds-cluster'
+username: 'testrdsusername'
+password: "{{ lookup('password', 'dev/null length=12 chars=ascii_letters,digits') }}"
+engine: 'aurora'
+port: 3306
+
+# Create snapshot
+snapshot_id: '{{ _resource_prefix }}-rds-cluster-snapshot'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/tasks/main.yml
new file mode 100644
index 000000000..a105044d9
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/tasks/main.yml
@@ -0,0 +1,480 @@
+---
+- module_defaults:
+ group/aws:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ collections:
+ - amazon.aws
+
+ block:
+ - name: Create a source DB cluster
+ rds_cluster:
+ id: "{{ cluster_id }}"
+ state: present
+ engine: "{{ engine}}"
+ backup_retention_period: 1
+ username: "{{ username }}"
+ password: "{{ password }}"
+ preferred_backup_window: "01:15-01:45"
+ register: _result_create_source_db_cluster
+
+ - assert:
+ that:
+ - _result_create_source_db_cluster.changed
+ - "'allocated_storage' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.db_cluster_identifier == "{{ cluster_id }}"
+ - "'db_cluster_parameter_group' in _result_create_source_db_cluster"
+ - "'db_cluster_resource_id' in _result_create_source_db_cluster"
+ - "'endpoint' in _result_create_source_db_cluster"
+ - "'engine' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.engine == "{{ engine }}"
+ - "'engine_mode' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.engine_mode == "serverless"
+ - "'engine_version' in _result_create_source_db_cluster"
+ - "'master_username' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_create_source_db_cluster"
+ - "_result_create_source_db_cluster.port == {{ port }}"
+ - "'status' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.status == "available"
+ - "'tags' in _result_create_source_db_cluster"
+ - "'vpc_security_groups' in _result_create_source_db_cluster"
+
+ - name: Get all RDS snapshots for the existing DB cluster
+ rds_snapshot_info:
+ db_cluster_identifier: "{{ cluster_id }}"
+ register: _result_cluster_snapshot_info
+
+ - assert:
+ that:
+ - _result_cluster_snapshot_info is successful
+ - _result_cluster_snapshot_info.cluster_snapshots | length == 0
+
+ - name: Take a snapshot of the existing DB cluster (CHECK_MODE)
+ rds_cluster_snapshot:
+ state: present
+ db_cluster_identifier: "{{ cluster_id }}"
+ db_cluster_snapshot_identifier: "{{ snapshot_id }}"
+ check_mode: true
+ register: _result_cluster_snapshot
+
+ - assert:
+ that:
+ - _result_cluster_snapshot.changed
+
+ - name: Take a snapshot of the existing DB cluster
+ rds_cluster_snapshot:
+ state: present
+ db_cluster_identifier: "{{ cluster_id }}"
+ db_cluster_snapshot_identifier: "{{ snapshot_id }}"
+ wait: true
+ register: _result_cluster_snapshot
+
+ - assert:
+ that:
+ - _result_cluster_snapshot.changed
+ - "'allocated_storage' in _result_cluster_snapshot"
+ - "'cluster_create_time' in _result_cluster_snapshot"
+ - "'db_cluster_identifier' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.db_cluster_identifier == "{{ cluster_id }}"
+ - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}"
+ - "'db_cluster_snapshot_arn' in _result_cluster_snapshot"
+ - "'engine' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.engine == "{{ engine }}"
+ # - "'engine_mode' in _result_cluster_snapshot"
+ # - _result_cluster_snapshot.engine_mode == "provisioned"
+ - "'engine_version' in _result_cluster_snapshot"
+ - "'iam_database_authentication_enabled' in _result_cluster_snapshot"
+ - "'license_model' in _result_cluster_snapshot"
+ - "'master_username' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.master_username == "{{ username }}"
+ - "'snapshot_create_time' in _result_cluster_snapshot"
+ - "'snapshot_type' in _result_cluster_snapshot"
+ - "'status' in _result_cluster_snapshot"
+ - _result_create_source_db_cluster.status == "available"
+ - "'storage_encrypted' in _result_cluster_snapshot"
+ - "'tags' in _result_cluster_snapshot"
+ - "'vpc_id' in _result_cluster_snapshot"
+
+ - name: Get information about the existing DB snapshot
+ rds_snapshot_info:
+ db_cluster_snapshot_identifier: "{{ snapshot_id }}"
+ register: _result_cluster_snapshot_info
+
+ - assert:
+ that:
+ - _result_cluster_snapshot_info is successful
+ - _result_cluster_snapshot_info.cluster_snapshots[0].db_cluster_identifier == "{{ cluster_id }}"
+ - _result_cluster_snapshot_info.cluster_snapshots[0].db_cluster_snapshot_identifier == "{{ snapshot_id }}"
+
+ - name: Get info of the existing DB cluster
+ rds_cluster_info:
+ cluster_id: "{{ cluster_id }}"
+ register: result_cluster_info
+
+ - assert:
+ that:
+ - result_cluster_info is successful
+
+ - name: Create another source DB cluster
+ rds_cluster:
+ id: "{{ cluster_id }}-b"
+ state: present
+ engine: "{{ engine}}"
+ backup_retention_period: 1
+ username: "{{ username }}"
+ password: "{{ password }}"
+ preferred_backup_window: "01:15-01:45"
+ register: _result_create_source_db_cluster
+
+ - assert:
+ that:
+ - _result_create_source_db_cluster.changed
+ - "'allocated_storage' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.allocated_storage == 1
+ - "'cluster_create_time' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.copy_tags_to_snapshot == false
+ - "'db_cluster_arn' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.db_cluster_identifier == "{{ cluster_id }}-b"
+ - "'db_cluster_parameter_group' in _result_create_source_db_cluster"
+ - "'db_cluster_resource_id' in _result_create_source_db_cluster"
+ - "'endpoint' in _result_create_source_db_cluster"
+ - "'engine' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.engine == "{{ engine }}"
+ - "'engine_mode' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.engine_mode == "serverless"
+ - "'engine_version' in _result_create_source_db_cluster"
+ - "'master_username' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.master_username == "{{ username }}"
+ - "'port' in _result_create_source_db_cluster"
+ - "_result_create_source_db_cluster.port == {{ port }}"
+ - "'status' in _result_create_source_db_cluster"
+ - _result_create_source_db_cluster.status == "available"
+ - "'tags' in _result_create_source_db_cluster"
+ - "'vpc_security_groups' in _result_create_source_db_cluster"
+
+ - name: Take another snapshot of the existing DB cluster
+ rds_cluster_snapshot:
+ state: present
+ db_cluster_identifier: "{{ cluster_id }}-b"
+ db_cluster_snapshot_identifier: "{{ snapshot_id }}-b"
+ wait: true
+ register: _result_cluster_snapshot
+
+ - assert:
+ that:
+ - _result_cluster_snapshot.changed
+ - "'allocated_storage' in _result_cluster_snapshot"
+ - "'cluster_create_time' in _result_cluster_snapshot"
+ - "'db_cluster_identifier' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.db_cluster_identifier == "{{ cluster_id }}-b"
+ - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-b"
+ - "'db_cluster_snapshot_arn' in _result_cluster_snapshot"
+ - "'engine' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.engine == "{{ engine }}"
+ # - "'engine_mode' in _result_cluster_snapshot"
+ # - _result_cluster_snapshot.engine_mode == "provisioned"
+ - "'engine_version' in _result_cluster_snapshot"
+ - "'iam_database_authentication_enabled' in _result_cluster_snapshot"
+ - "'license_model' in _result_cluster_snapshot"
+ - "'master_username' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.master_username == "{{ username }}"
+ - "'snapshot_create_time' in _result_cluster_snapshot"
+ - "'snapshot_type' in _result_cluster_snapshot"
+ - "'status' in _result_cluster_snapshot"
+ - _result_create_source_db_cluster.status == "available"
+ - "'storage_encrypted' in _result_cluster_snapshot"
+ - "'tags' in _result_cluster_snapshot"
+ - "'vpc_id' in _result_cluster_snapshot"
+
+ - name: Get all RDS snapshots for the existing DB cluster
+ rds_snapshot_info:
+ db_cluster_identifier: "{{ cluster_id }}-b"
+ register: _result_cluster_snapshot_info
+
+ - assert:
+ that:
+ - _result_cluster_snapshot_info is successful
+ - _result_cluster_snapshot_info.cluster_snapshots | length == 1
+
+ - name: Delete existing DB cluster snapshot (CHECK_MODE)
+ rds_cluster_snapshot:
+ state: absent
+ db_cluster_snapshot_identifier: "{{ snapshot_id }}-b"
+ register: _result_delete_snapshot
+ check_mode: true
+
+ - assert:
+ that:
+ - _result_delete_snapshot.changed
+
+ - name: Delete the existing DB cluster snapshot
+ rds_cluster_snapshot:
+ state: absent
+ db_cluster_snapshot_identifier: "{{ snapshot_id }}-b"
+ register: _result_delete_snapshot
+
+ - assert:
+ that:
+ - _result_delete_snapshot.changed
+
+ - name: Get info of the existing DB cluster
+ rds_cluster_info:
+ cluster_id: "{{ cluster_id }}"
+ register: _result_cluster_info
+
+ - assert:
+ that:
+ - result_cluster_info is successful
+
+ - name: Take another snapshot of the existing DB cluster and assign tags
+ rds_cluster_snapshot:
+ state: present
+ db_cluster_identifier: "{{ cluster_id }}"
+ db_cluster_snapshot_identifier: "{{ snapshot_id }}-b"
+ wait: true
+ tags:
+ tag_one: '{{ snapshot_id }}-b One'
+ "Tag Two": 'two {{ snapshot_id }}-b'
+ register: _result_cluster_snapshot
+
+ - assert:
+ that:
+ - _result_cluster_snapshot.changed
+ - "'allocated_storage' in _result_cluster_snapshot"
+ - "'cluster_create_time' in _result_cluster_snapshot"
+ - "'db_cluster_identifier' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.db_cluster_identifier == "{{ cluster_id }}"
+ - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-b"
+ - "'db_cluster_snapshot_arn' in _result_cluster_snapshot"
+ - "'engine' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.engine == "{{ engine }}"
+ # - "'engine_mode' in _result_cluster_snapshot"
+ # - _result_cluster_snapshot.engine_mode == "provisioned"
+ - "'engine_version' in _result_cluster_snapshot"
+ - "'iam_database_authentication_enabled' in _result_cluster_snapshot"
+ - "'license_model' in _result_cluster_snapshot"
+ - "'master_username' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.master_username == "{{ username }}"
+ - "'snapshot_create_time' in _result_cluster_snapshot"
+ - "'snapshot_type' in _result_cluster_snapshot"
+ - "'status' in _result_cluster_snapshot"
+ - _result_create_source_db_cluster.status == "available"
+ - "'storage_encrypted' in _result_cluster_snapshot"
+ - "'tags' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.tags | length == 2
+ - _result_cluster_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One"
+ - _result_cluster_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b"
+ - "'vpc_id' in _result_cluster_snapshot"
+
+ - name: Attempt to take another snapshot of the existing DB cluster and assign tags (idempotence)
+ rds_cluster_snapshot:
+ state: present
+ db_cluster_identifier: "{{ cluster_id }}"
+ db_cluster_snapshot_identifier: "{{ snapshot_id }}-b"
+ wait: true
+ tags:
+ tag_one: '{{ snapshot_id }}-b One'
+ "Tag Two": 'two {{ snapshot_id }}-b'
+ register: _result_cluster_snapshot
+
+ - assert:
+ that:
+ - not _result_cluster_snapshot.changed
+
+ - name: Take another snapshot of the existing DB cluster and update tags
+ rds_cluster_snapshot:
+ state: present
+ db_cluster_identifier: "{{ cluster_id }}"
+ db_cluster_snapshot_identifier: "{{ snapshot_id }}-b"
+ tags:
+ tag_three: '{{ snapshot_id }}-b Three'
+ "Tag Two": 'two {{ snapshot_id }}-b'
+ register: _result_cluster_snapshot
+
+ - assert:
+ that:
+ - _result_cluster_snapshot.changed
+ - "'allocated_storage' in _result_cluster_snapshot"
+ - "'cluster_create_time' in _result_cluster_snapshot"
+ - "'db_cluster_identifier' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.db_cluster_identifier == "{{ cluster_id }}"
+ - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-b"
+ - "'db_cluster_snapshot_arn' in _result_cluster_snapshot"
+ - "'engine' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.engine == "{{ engine }}"
+ # - "'engine_mode' in _result_cluster_snapshot"
+ # - _result_cluster_snapshot.engine_mode == "provisioned"
+ - "'engine_version' in _result_cluster_snapshot"
+ - "'iam_database_authentication_enabled' in _result_cluster_snapshot"
+ - "'license_model' in _result_cluster_snapshot"
+ - "'master_username' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.master_username == "{{ username }}"
+ - "'snapshot_create_time' in _result_cluster_snapshot"
+ - "'snapshot_type' in _result_cluster_snapshot"
+ - "'status' in _result_cluster_snapshot"
+ - _result_create_source_db_cluster.status == "available"
+ - "'storage_encrypted' in _result_cluster_snapshot"
+ - "'tags' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.tags | length == 2
+ - _result_cluster_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three"
+ - _result_cluster_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b"
+ - "'vpc_id' in _result_cluster_snapshot"
+
+ - name: Take another snapshot of the existing DB cluster and update tags without purge
+ rds_cluster_snapshot:
+ state: present
+ db_cluster_identifier: "{{ cluster_id }}"
+ db_cluster_snapshot_identifier: "{{ snapshot_id }}-b"
+ purge_tags: false
+ tags:
+ tag_one: '{{ snapshot_id }}-b One'
+ register: _result_cluster_snapshot
+
+ - assert:
+ that:
+ - _result_cluster_snapshot.changed
+ - "'allocated_storage' in _result_cluster_snapshot"
+ - "'cluster_create_time' in _result_cluster_snapshot"
+ - "'db_cluster_identifier' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.db_cluster_identifier == "{{ cluster_id }}"
+ - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-b"
+ - "'db_cluster_snapshot_arn' in _result_cluster_snapshot"
+ - "'engine' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.engine == "{{ engine }}"
+ # - "'engine_mode' in _result_cluster_snapshot"
+ # - _result_cluster_snapshot.engine_mode == "provisioned"
+ - "'engine_version' in _result_cluster_snapshot"
+ - "'iam_database_authentication_enabled' in _result_cluster_snapshot"
+ - "'license_model' in _result_cluster_snapshot"
+ - "'master_username' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.master_username == "{{ username }}"
+ - "'snapshot_create_time' in _result_cluster_snapshot"
+ - "'snapshot_type' in _result_cluster_snapshot"
+ - "'status' in _result_cluster_snapshot"
+ - _result_create_source_db_cluster.status == "available"
+ - "'storage_encrypted' in _result_cluster_snapshot"
+ - "'tags' in _result_cluster_snapshot"
+ - _result_cluster_snapshot.tags | length == 3
+ - _result_cluster_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One"
+ - _result_cluster_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b"
+ - _result_cluster_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three"
+ - "'vpc_id' in _result_cluster_snapshot"
+
+ - name: Take another snapshot of the existing DB cluster and do not specify any tag to ensure previous tags are not removed
+ rds_cluster_snapshot:
+ state: present
+ db_cluster_identifier: "{{ cluster_id }}"
+ db_cluster_snapshot_identifier: "{{ snapshot_id }}-b"
+ register: _result_cluster_snapshot
+
+ - assert:
+ that:
+ - not _result_cluster_snapshot.changed
+
+ # ------------------------------------------------------------------------------------------
+ # Test copying a snapshot
+ ### Copying a DB cluster snapshot from a different region is supported, but not in CI,
+ ### because the aws-terminator only terminates resources in one region.
+ - set_fact:
+ _snapshot_arn: "{{ _result_cluster_snapshot.db_cluster_snapshot_arn }}"
+
+ - name: Copy a DB cluster snapshot (check mode)
+ rds_cluster_snapshot:
+ id: "{{ snapshot_id }}-copy"
+ source_id: "{{ snapshot_id }}-b"
+ copy_tags: yes
+ wait: true
+ register: _result_cluster_copy_snapshot
+ check_mode: yes
+
+ - assert:
+ that:
+ - _result_cluster_copy_snapshot.changed
+
+ - name: Copy a DB cluster snapshot
+ rds_cluster_snapshot:
+ id: "{{ snapshot_id }}-copy"
+ source_id: "{{ snapshot_id }}-b"
+ copy_tags: yes
+ wait: true
+ register: _result_cluster_copy_snapshot
+
+ - assert:
+ that:
+ - _result_cluster_copy_snapshot.changed
+ - _result_cluster_copy_snapshot.db_cluster_identifier == "{{ cluster_id }}"
+ - _result_cluster_copy_snapshot.source_db_cluster_snapshot_arn == "{{ _snapshot_arn }}"
+ - _result_cluster_copy_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-copy"
+ - "'tags' in _result_cluster_copy_snapshot"
+ - _result_cluster_copy_snapshot.tags | length == 3
+ - _result_cluster_copy_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One"
+ - _result_cluster_copy_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b"
+ - _result_cluster_copy_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three"
+
+ - name: Copy a DB cluster snapshot (idempotence - check mode)
+ rds_cluster_snapshot:
+ id: "{{ snapshot_id }}-copy"
+ source_id: "{{ snapshot_id }}-b"
+ copy_tags: yes
+ wait: true
+ register: _result_cluster_copy_snapshot
+ check_mode: yes
+
+ - assert:
+ that:
+ - not _result_cluster_copy_snapshot.changed
+
+ - name: Copy a DB cluster snapshot (idempotence)
+ rds_cluster_snapshot:
+ id: "{{ snapshot_id }}-copy"
+ source_id: "{{ snapshot_id }}-b"
+ copy_tags: yes
+ wait: true
+ register: _result_cluster_copy_snapshot
+
+ - assert:
+ that:
+ - not _result_cluster_copy_snapshot.changed
+ - _result_cluster_copy_snapshot.db_cluster_identifier == "{{ cluster_id }}"
+ - _result_cluster_copy_snapshot.source_db_cluster_snapshot_arn == "{{ _snapshot_arn }}"
+ - _result_cluster_copy_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-copy"
+ - "'tags' in _result_cluster_copy_snapshot"
+ - _result_cluster_copy_snapshot.tags | length == 3
+ - _result_cluster_copy_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One"
+ - _result_cluster_copy_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b"
+ - _result_cluster_copy_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three"
+
+ always:
+ - name: Delete the existing DB cluster snapshots
+ rds_cluster_snapshot:
+ state: absent
+ db_cluster_snapshot_identifier: "{{ item }}"
+ register: _result_delete_snapshot
+ ignore_errors: true
+ loop:
+ - "{{ snapshot_id }}"
+ - "{{ snapshot_id }}-b"
+ - "{{ snapshot_id }}-copy"
+
+ - name: Delete the existing DB cluster without creating a final snapshot
+ rds_cluster:
+ state: absent
+ cluster_id: "{{ item }}"
+ skip_final_snapshot: true
+ register: _result_delete_cluster
+ ignore_errors: true
+ loop:
+ - "{{ cluster_id }}"
+ - "{{ cluster_id }}-b"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/vars/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/vars/main.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/aliases
new file mode 100644
index 000000000..bff4494c2
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+time=30m
+rds_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/defaults/main.yml
new file mode 100644
index 000000000..3647e4126
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/defaults/main.yml
@@ -0,0 +1,9 @@
+instance_id: ansible-test-{{ tiny_prefix }}
+modified_instance_id: '{{ instance_id }}-updated'
+username: test
+password: test12345678
+db_instance_class: db.t3.micro
+
+# For aurora tests
+cluster_id: '{{ instance_id }}-cluster'
+aurora_db_instance_class: db.t3.medium
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/tasks/main.yml
new file mode 100644
index 000000000..522894afc
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/tasks/main.yml
@@ -0,0 +1,122 @@
+- name: rds_instance / aurora integration tests
+ collections:
+ - community.aws
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+
+ - name: Create minimal aurora cluster in default VPC and default subnet group
+ rds_cluster:
+ state: present
+ engine: aurora-postgresql
+ engine_mode: provisioned
+ cluster_id: '{{ cluster_id }}'
+ username: '{{ username }}'
+ password: '{{ password }}'
+ tags:
+ CreatedBy: rds_instance integration tests
+ register: my_cluster
+
+ - assert:
+ that:
+ - my_cluster.engine_mode == "provisioned"
+
+ - name: Create an Aurora instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ cluster_id: '{{ cluster_id }}'
+ engine: aurora-postgresql
+ state: present
+ db_instance_class: '{{ aurora_db_instance_class }}'
+ tags:
+ CreatedBy: rds_instance integration tests
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+ - result.tags | length == 1
+
+ - name: Create an Aurora instance with both username/password and id - invalid
+ rds_instance:
+ id: '{{ instance_id }}-new'
+ cluster_id: '{{ cluster_id }}'
+ engine: aurora-postgresql
+ state: present
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ aurora_db_instance_class }}'
+ tags:
+ CreatedBy: rds_instance integration tests
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result.failed
+ - "'Set master user password for the DB Cluster' in result.msg"
+
+ - name: Attempt to modify password (a cluster-managed attribute)
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ password: '{{ password }}'
+ force_update_password: true
+ apply_immediately: true
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result.failed
+ - "'Modify master user password for the DB Cluster using the ModifyDbCluster\
+ \ API' in result.msg"
+ - "'Please see rds_cluster' in result.msg"
+
+ - name: Modify aurora instance port (a cluster-managed attribute)
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ port: 1150
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - not result.changed
+ - "'Modify database endpoint port number for the DB Cluster using the ModifyDbCluster\
+ \ API' in result.msg"
+ - "'Please see rds_cluster' in result.msg"
+
+ always:
+
+ - name: Delete the instance
+ rds_instance:
+ id: '{{ item }}'
+ state: absent
+ skip_final_snapshot: true
+ wait: false
+ loop:
+ - '{{ instance_id }}'
+ - '{{ modified_instance_id }}'
+ ignore_errors: yes
+
+ - name: Delete the cluster
+ rds_cluster:
+ cluster_id: '{{ cluster_id }}'
+ state: absent
+ skip_final_snapshot: true
+ wait: false
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/aliases
new file mode 100644
index 000000000..777fbe40a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+time=15m
+rds_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml
new file mode 100644
index 000000000..fd3a29a79
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml
@@ -0,0 +1,16 @@
+instance_id: ansible-test-{{ tiny_prefix }}
+modified_instance_id: '{{ instance_id }}-updated'
+username: test
+password: test12345678
+db_instance_class: db.t3.micro
+modified_db_instance_class: db.t3.medium
+allocated_storage: 20
+io1_allocated_storage: 100
+io1_modified_allocated_storage: 110
+monitoring_interval: 60
+preferred_maintenance_window: mon:06:20-mon:06:50
+storage_type: io1
+iops: 1000
+
+# For mariadb tests
+mariadb_engine_version: 10.6.10
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/enhanced_monitoring_assume_policy.json b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/enhanced_monitoring_assume_policy.json
new file mode 100644
index 000000000..29acf369f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/enhanced_monitoring_assume_policy.json
@@ -0,0 +1,13 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "",
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "monitoring.rds.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/s3_integration_policy.json b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/s3_integration_policy.json
new file mode 100644
index 000000000..71f07d07c
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/s3_integration_policy.json
@@ -0,0 +1,16 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "",
+ "Effect": "Allow",
+ "Action": [
+ "s3:PutObject",
+ "s3:GetObject",
+ "s3:ListBucket",
+ "rds:*"
+ ],
+ "Resource": "*"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/s3_integration_trust_policy.json b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/s3_integration_trust_policy.json
new file mode 100644
index 000000000..9ea5ec3b4
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/files/s3_integration_trust_policy.json
@@ -0,0 +1,13 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "",
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "rds.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml
new file mode 100644
index 000000000..024e0978a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml
@@ -0,0 +1,205 @@
+- name: rds_instance / complex integration tests
+ collections:
+ - community.aws
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ #TODO: test availability_zone and multi_az
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create an enhanced monitoring role
+ iam_role:
+ assume_role_policy_document: "{{ lookup('file','files/enhanced_monitoring_assume_policy.json')\
+ \ }}"
+ name: '{{ instance_id }}-role'
+ state: present
+ managed_policy: arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole
+ register: enhanced_monitoring_role
+
+ - name: Create a mariadb instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ engine_version: '{{ mariadb_engine_version }}'
+ allow_major_version_upgrade: true
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ io1_allocated_storage }}'
+ storage_type: '{{ storage_type }}'
+ iops: '{{ iops }}'
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+
+ - name: Add IAM roles to mariab (should fail - iam roles not supported for mariadb)
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ engine_version: '{{ mariadb_engine_version }}'
+ allow_major_version_upgrade: true
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ io1_allocated_storage }}'
+ storage_type: '{{ storage_type }}'
+ iops: '{{ iops }}'
+ iam_roles:
+ - role_arn: my_role
+ feature_name: my_feature
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result.failed
+ - '"is not valid for adding IAM roles" in result.msg'
+
+ # TODO: test modifying db_subnet_group_name, db_security_groups, db_parameter_group_name, option_group_name,
+ # monitoring_role_arn, monitoring_interval, domain, domain_iam_role_name, cloudwatch_logs_export_configuration
+
+ # Test multiple modifications including enabling enhanced monitoring
+
+ - name: Modify several attributes - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ allocated_storage: '{{ io1_modified_allocated_storage }}'
+ storage_type: '{{ storage_type }}'
+ db_instance_class: '{{ modified_db_instance_class }}'
+ backup_retention_period: 2
+ preferred_backup_window: 05:00-06:00
+ preferred_maintenance_window: '{{ preferred_maintenance_window }}'
+ auto_minor_version_upgrade: false
+ monitoring_interval: '{{ monitoring_interval }}'
+ monitoring_role_arn: '{{ enhanced_monitoring_role.arn }}'
+ iops: '{{ iops }}'
+ port: 1150
+ max_allocated_storage: 150
+ apply_immediately: true
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Modify several attributes
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ allocated_storage: '{{ io1_modified_allocated_storage }}'
+ storage_type: '{{ storage_type }}'
+ db_instance_class: '{{ modified_db_instance_class }}'
+ backup_retention_period: 2
+ preferred_backup_window: 05:00-06:00
+ preferred_maintenance_window: '{{ preferred_maintenance_window }}'
+ auto_minor_version_upgrade: false
+ monitoring_interval: '{{ monitoring_interval }}'
+ monitoring_role_arn: '{{ enhanced_monitoring_role.arn }}'
+ iops: '{{ iops }}'
+ port: 1150
+ max_allocated_storage: 150
+ apply_immediately: true
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - '"allocated_storage" in result.pending_modified_values or result.allocated_storage
+ == io1_modified_allocated_storage'
+ - '"max_allocated_storage" in result.pending_modified_values or result.max_allocated_storage
+ == 150'
+ - '"port" in result.pending_modified_values or result.endpoint.port == 1150'
+ - '"db_instance_class" in result.pending_modified_values or result.db_instance_class
+ == modified_db_instance_class'
+ - '"monitoring_interval" in result.pending_modified_values or result.monitoring_interval
+ == monitoring_interval'
+
+ - name: Idempotence modifying several pending attributes - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ allocated_storage: '{{ io1_modified_allocated_storage }}'
+ storage_type: '{{ storage_type }}'
+ db_instance_class: '{{ modified_db_instance_class }}'
+ backup_retention_period: 2
+ preferred_backup_window: 05:00-06:00
+ preferred_maintenance_window: '{{ preferred_maintenance_window }}'
+ auto_minor_version_upgrade: false
+ monitoring_interval: '{{ monitoring_interval }}'
+ monitoring_role_arn: '{{ enhanced_monitoring_role.arn }}'
+ iops: '{{ iops }}'
+ port: 1150
+ max_allocated_storage: 150
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Idempotence modifying several pending attributes
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ allocated_storage: '{{ io1_modified_allocated_storage }}'
+ storage_type: '{{ storage_type }}'
+ db_instance_class: '{{ modified_db_instance_class }}'
+ backup_retention_period: 2
+ preferred_backup_window: 05:00-06:00
+ preferred_maintenance_window: '{{ preferred_maintenance_window }}'
+ auto_minor_version_upgrade: false
+ monitoring_interval: '{{ monitoring_interval }}'
+ monitoring_role_arn: '{{ enhanced_monitoring_role.arn }}'
+ iops: '{{ iops }}'
+ port: 1150
+ max_allocated_storage: 150
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - '"allocated_storage" in result.pending_modified_values or result.allocated_storage
+ == io1_modified_allocated_storage'
+ - '"max_allocated_storage" in result.pending_modified_values or result.max_allocated_storage
+ == 150'
+ - '"port" in result.pending_modified_values or result.endpoint.port == 1150'
+ - '"db_instance_class" in result.pending_modified_values or result.db_instance_class
+ == modified_db_instance_class'
+
+ always:
+ - name: Delete the instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ wait: false
+ ignore_errors: yes
+
+ - name: Remove enhanced monitoring role
+ iam_role:
+ assume_role_policy_document: "{{ lookup('file','files/enhanced_monitoring_assume_policy.json')\
+ \ }}"
+ name: '{{ instance_id }}-role'
+ state: absent
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/aliases
new file mode 100644
index 000000000..777fbe40a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+time=15m
+rds_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml
new file mode 100644
index 000000000..fafb0becc
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml
@@ -0,0 +1,9 @@
+instance_id: ansible-test-{{ tiny_prefix }}
+modified_instance_id: '{{ instance_id }}-updated'
+username: test
+password: test12345678
+db_instance_class: db.t3.micro
+allocated_storage: 20
+
+# For mariadb tests
+mariadb_engine_version: 10.6.10
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml
new file mode 100644
index 000000000..e13573416
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml
@@ -0,0 +1,206 @@
+- name: rds_instance / modify integration tests
+ collections:
+ - community.aws
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create a mariadb instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ engine_version: '{{ mariadb_engine_version }}'
+ allow_major_version_upgrade: true
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ tags:
+ Name: '{{ instance_id }}'
+ Created_by: Ansible rds_instance tests
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+
+ - name: Create a DB instance with an invalid engine
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: thisisnotavalidengine
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result.failed
+ - '"value of engine must be one of" in result.msg'
+
+ - name: Add IAM roles to mariadb (should fail - iam roles not supported for mariadb)
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ engine_version: '{{ mariadb_engine_version }}'
+ allow_major_version_upgrade: true
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ iam_roles:
+ - role_arn: my_role
+ feature_name: my_feature
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result.failed
+ - '"is not valid for adding IAM roles" in result.msg'
+
+ # TODO: test modifying db_subnet_group_name, db_security_groups, db_parameter_group_name, option_group_name,
+ # monitoring_role_arn, monitoring_interval, domain, domain_iam_role_name, cloudwatch_logs_export_configuration
+
+ # ------------------------------------------------------------------------------------------
+ - name: Modify the storage type without immediate application - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ storage_type: gp3
+ apply_immediately: false
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+ - 'result.storage_type == "gp2"'
+
+ - name: Modify the storage type without immediate application
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ storage_type: gp3
+ apply_immediately: false
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - 'result.pending_modified_values.storage_type == "gp3"'
+ - 'result.storage_type == "gp2"'
+
+ - name: Modify the storage type without immediate application - idempotent
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ storage_type: gp3
+ apply_immediately: false
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+ - 'result.pending_modified_values.storage_type == "gp3"'
+ - 'result.storage_type == "gp2"'
+
+ - name: Modify the storage type back to gp2 without immediate application
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ storage_type: gp2
+ apply_immediately: false
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - 'result.pending_modified_values == {}'
+ - 'result.storage_type == "gp2"'
+
+ - name: Modify the instance name without immediate application - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ new_id: '{{ modified_instance_id }}'
+ apply_immediately: false
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Modify the instance name without immediate application
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ new_id: '{{ modified_instance_id }}'
+ apply_immediately: false
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == "{{ instance_id }}"
+
+ - name: Immediately apply the pending update - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ new_id: '{{ modified_instance_id }}'
+ apply_immediately: true
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Immediately apply the pending update
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ new_id: '{{ modified_instance_id }}'
+ apply_immediately: true
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == "{{ modified_instance_id }}"
+
+ always:
+ - name: Delete the instance
+ rds_instance:
+ id: '{{ item }}'
+ state: absent
+ skip_final_snapshot: true
+ wait: false
+ ignore_errors: yes
+ loop:
+ - '{{ instance_id }}'
+ - '{{ modified_instance_id }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/aliases
new file mode 100644
index 000000000..777fbe40a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+time=15m
+rds_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/defaults/main.yml
new file mode 100644
index 000000000..41d99538a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/defaults/main.yml
@@ -0,0 +1,12 @@
+instance_id: ansible-test-{{ tiny_prefix }}
+username: test
+password: test12345678
+db_instance_class: db.t3.micro
+allocated_storage: 20
+
+# For oracle tests
+# Smallest instance that permits modification of the coreCount
+oracle_ee_db_instance_class: db.r5.2xlarge
+modified_processor_features:
+ coreCount: 4
+ threadsPerCore: 2
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/tasks/main.yml
new file mode 100644
index 000000000..260a37951
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/tasks/main.yml
@@ -0,0 +1,141 @@
+- name: rds_instance / processor integration tests
+ collections:
+ - community.aws
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create an oracle-ee DB instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: oracle-ee
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ oracle_ee_db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ storage_encrypted: true
+ processor_features: {}
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Modify the processor features - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: oracle-ee
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ oracle_ee_db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ storage_encrypted: true
+ processor_features: '{{ modified_processor_features }}'
+ apply_immediately: true
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Modify the processor features
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: oracle-ee
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ oracle_ee_db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ storage_encrypted: true
+ processor_features: '{{ modified_processor_features }}'
+ apply_immediately: true
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.pending_modified_values.processor_features.coreCount == "{{ modified_processor_features.coreCount
+ }}"
+ - result.pending_modified_values.processor_features.threadsPerCore == "{{ modified_processor_features.threadsPerCore
+ }}"
+
+ - name: Modify the processor features (idempotence) - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: oracle-ee
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ oracle_ee_db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ storage_encrypted: true
+ processor_features: '{{ modified_processor_features }}'
+ apply_immediately: true
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Modify the processor features (idempotence)
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: oracle-ee
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ oracle_ee_db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ storage_encrypted: true
+ processor_features: '{{ modified_processor_features }}'
+ apply_immediately: true
+ register: result
+
+ # Check if processor features either are pending or already changed
+ - assert:
+ that:
+ - not result.changed
+ - (result.pending_modified_values.processor_features.coreCount is defined and
+ result.pending_modified_values.processor_features.coreCount == "{{ modified_processor_features.coreCount
+ }}") or (result.processor_features.coreCount is defined and result.processor_features.coreCount
+ == "{{ modified_processor_features.coreCount }}")
+ - (result.pending_modified_values.processor_features.threadsPerCore is defined
+ and result.pending_modified_values.processor_features.threadsPerCore == "{{
+ modified_processor_features.threadsPerCore }}") or (result.processor_features.threadsPerCore
+ is defined and result.processor_features.threadsPerCore == "{{ modified_processor_features.threadsPerCore
+ }}")
+
+ always:
+
+ - name: Delete the DB instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ wait: false
+ register: result
+
+ - assert:
+ that:
+ - result.changed
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/aliases
new file mode 100644
index 000000000..4bdc79e47
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+time=25m
+rds_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/defaults/main.yml
new file mode 100644
index 000000000..b559f8c3f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/defaults/main.yml
@@ -0,0 +1,6 @@
+instance_id: ansible-test-{{ tiny_prefix }}
+modified_instance_id: '{{ instance_id }}-updated'
+username: test
+password: test12345678
+db_instance_class: db.t3.micro
+allocated_storage: 20
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/tasks/main.yml
new file mode 100644
index 000000000..c282f1f23
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/tasks/main.yml
@@ -0,0 +1,234 @@
+- name: rds_instance / replica integration tests
+ collections:
+ - community.aws
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ - name: set the two regions for the source DB and the replica
+ set_fact:
+ region_src: '{{ aws_region }}'
+ region_dest: '{{ aws_region }}'
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ region: '{{ region_src }}'
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create a source DB instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mysql
+ backup_retention_period: 1
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ region: '{{ region_src }}'
+ tags:
+ Name: '{{ instance_id }}'
+ Created_by: Ansible rds_instance tests
+ register: source_db
+
+ - assert:
+ that:
+ - source_db.changed
+ - source_db.db_instance_identifier == '{{ instance_id }}'
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Create a read replica in a different region - check_mode
+ rds_instance:
+ id: '{{ instance_id }}-replica'
+ state: present
+ source_db_instance_identifier: '{{ instance_id }}'
+ engine: mysql
+ username: '{{ username }}'
+ password: '{{ password }}'
+ read_replica: true
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ region: '{{ region_dest }}'
+ tags:
+ Name: '{{ instance_id }}'
+ Created_by: Ansible rds_instance tests
+ wait: yes
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Create a read replica in a different region
+ rds_instance:
+ id: '{{ instance_id }}-replica'
+ state: present
+ source_db_instance_identifier: '{{ instance_id }}'
+ engine: mysql
+ username: '{{ username }}'
+ password: '{{ password }}'
+ read_replica: true
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ region: '{{ region_dest }}'
+ tags:
+ Name: '{{ instance_id }}'
+ Created_by: Ansible rds_instance tests
+ wait: yes
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == '{{ instance_id }}-replica'
+ - result.tags | length == 2
+ - result.tags.Name == '{{ instance_id }}'
+ - result.tags.Created_by == 'Ansible rds_instance tests'
+
+ - name: Test idempotence with a read replica - check_mode
+ rds_instance:
+ id: '{{ instance_id }}-replica'
+ state: present
+ source_db_instance_identifier: '{{ instance_id }}'
+ engine: mysql
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ region: '{{ region_dest }}'
+ tags:
+ Name: '{{ instance_id }}'
+ Created_by: Ansible rds_instance tests
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Test idempotence with a read replica
+ rds_instance:
+ id: '{{ instance_id }}-replica'
+ state: present
+ source_db_instance_identifier: '{{ instance_id }}'
+ engine: mysql
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ region: '{{ region_dest }}'
+ tags:
+ Name: '{{ instance_id }}'
+ Created_by: Ansible rds_instance tests
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Test idempotence with read_replica=True
+ rds_instance:
+ id: '{{ instance_id }}-replica'
+ state: present
+ read_replica: true
+ source_db_instance_identifier: '{{ instance_id }}'
+ engine: mysql
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ region: '{{ region_dest }}'
+ tags:
+ Name: '{{ instance_id }}'
+ Created_by: Ansible rds_instance tests
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Promote the read replica - check_mode
+ rds_instance:
+ id: '{{ instance_id }}-replica'
+ state: present
+ read_replica: false
+ region: '{{ region_dest }}'
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Promote the read replica
+ rds_instance:
+ id: '{{ instance_id }}-replica'
+ state: present
+ read_replica: false
+ region: '{{ region_dest }}'
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Test idempotence - check_mode
+ rds_instance:
+ id: '{{ instance_id }}-replica'
+ state: present
+ read_replica: false
+ region: '{{ region_dest }}'
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Test idempotence
+ rds_instance:
+ id: '{{ instance_id }}-replica'
+ state: present
+ read_replica: false
+ region: '{{ region_dest }}'
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ always:
+
+ - name: Remove the DB instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ region: '{{ region_src }}'
+ wait: false
+ ignore_errors: yes
+
+ - name: Remove the DB replica
+ rds_instance:
+ id: '{{ instance_id }}-replica'
+ state: absent
+ skip_final_snapshot: true
+ region: '{{ region_dest }}'
+ wait: false
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/aliases
new file mode 100644
index 000000000..b9702d285
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+time=20m
+rds_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/defaults/main.yml
new file mode 100644
index 000000000..5540ffb89
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/defaults/main.yml
@@ -0,0 +1,5 @@
+instance_id: ansible-test-{{ tiny_prefix }}
+username: test
+password: test12345678
+db_instance_class: db.t3.micro
+allocated_storage: 20
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/tasks/main.yml
new file mode 100644
index 000000000..c872db880
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/tasks/main.yml
@@ -0,0 +1,131 @@
+- name: rds_instance / restore integration tests
+ collections:
+ - community.aws
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ # TODO: snapshot, s3
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create a source DB instance
+ rds_instance:
+ id: '{{ instance_id }}-s'
+ state: present
+ engine: mysql
+ backup_retention_period: 1
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ register: source_db
+
+ - assert:
+ that:
+ - source_db.changed
+ - source_db.db_instance_identifier == '{{ instance_id }}-s'
+
+ - name: Create a point in time DB instance - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ source_db_instance_identifier: '{{ instance_id }}-s'
+ creation_source: instance
+ engine: mysql
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ use_latest_restorable_time: true
+ register: result
+ check_mode: yes
+
+ - assert:
+ that: result.changed
+
+ - name: Create a point in time DB instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ source_db_instance_identifier: '{{ instance_id }}-s'
+ creation_source: instance
+ engine: mysql
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ use_latest_restorable_time: true
+ register: result
+
+ - assert:
+ that: result.changed
+
+ - name: Create a point in time DB instance (idempotence) - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ source_db_instance_identifier: '{{ instance_id }}-s'
+ creation_source: instance
+ engine: mysql
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ restore_time: '{{ result.latest_restorable_time }}'
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Create a point in time DB instance (idempotence)
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ source_db_instance_identifier: '{{ instance_id }}-s'
+ creation_source: instance
+ engine: mysql
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ restore_time: '{{ result.latest_restorable_time }}'
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+
+ always:
+
+ - name: Remove the DB instance
+ rds_instance:
+ id: '{{ instance_id }}-s'
+ state: absent
+ skip_final_snapshot: true
+ wait: false
+ ignore_errors: yes
+
+ - name: Remove the point in time restored DB
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ wait: false
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/aliases
new file mode 100644
index 000000000..777fbe40a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+time=15m
+rds_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/defaults/main.yml
new file mode 100644
index 000000000..5540ffb89
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/defaults/main.yml
@@ -0,0 +1,5 @@
+instance_id: ansible-test-{{ tiny_prefix }}
+username: test
+password: test12345678
+db_instance_class: db.t3.micro
+allocated_storage: 20
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/tasks/main.yml
new file mode 100644
index 000000000..761f71d2a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/tasks/main.yml
@@ -0,0 +1,332 @@
+- name: rds_instance / sgroups integration tests
+ collections:
+ - community.aws
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ - name: create a VPC
+ ec2_vpc_net:
+ name: '{{ resource_prefix }}-vpc'
+ state: present
+ cidr_block: 10.122.122.128/26
+ tags:
+ Name: '{{ resource_prefix }}-vpc'
+ Description: created by rds_instance integration tests
+ register: vpc_result
+
+ - name: create subnets
+ ec2_vpc_subnet:
+ cidr: '{{ item.cidr }}'
+ az: '{{ item.zone }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ tags:
+ Name: '{{ resource_prefix }}-subnet'
+ Description: created by rds_instance integration tests
+ state: present
+ register: subnets_result
+ loop:
+ - {cidr: 10.122.122.128/28, zone: '{{ aws_region }}a'}
+ - {cidr: 10.122.122.144/28, zone: '{{ aws_region }}b'}
+ - {cidr: 10.122.122.160/28, zone: '{{ aws_region }}c'}
+
+ - name: Create security groups
+ ec2_group:
+ name: '{{ item }}'
+ description: created by rds_instance integration tests
+ state: present
+ register: sgs_result
+ loop:
+ - '{{ resource_prefix }}-sg-1'
+ - '{{ resource_prefix }}-sg-2'
+ - '{{ resource_prefix }}-sg-3'
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Create a DB instance in the VPC with two security groups - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ vpc_security_group_ids:
+ - '{{ sgs_result.results.0.group_id }}'
+ - '{{ sgs_result.results.1.group_id }}'
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Create a DB instance in the VPC with two security groups
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ vpc_security_group_ids:
+ - '{{ sgs_result.results.0.group_id }}'
+ - '{{ sgs_result.results.1.group_id }}'
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+ - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding'])
+ | list | length == 2
+
+ - name: Create a DB instance in the VPC with two security groups (idempotence) -
+ check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ vpc_security_group_ids:
+ - '{{ sgs_result.results.0.group_id }}'
+ - '{{ sgs_result.results.1.group_id }}'
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Create a DB instance in the VPC with two security groups (idempotence)
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ vpc_security_group_ids:
+ - '{{ sgs_result.results.0.group_id }}'
+ - '{{ sgs_result.results.1.group_id }}'
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+ - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding'])
+ | list | length == 2
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Add a new security group without purge - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ vpc_security_group_ids:
+ - '{{ sgs_result.results.2.group_id }}'
+ apply_immediately: true
+ purge_security_groups: false
+ check_mode: true
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+
+ - name: Add a new security group without purge
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ vpc_security_group_ids:
+ - '{{ sgs_result.results.2.group_id }}'
+ apply_immediately: true
+ purge_security_groups: false
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+ - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding'])
+ | list | length == 3
+
+ - name: Add a new security group without purge (idempotence) - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ vpc_security_group_ids:
+ - '{{ sgs_result.results.2.group_id }}'
+ apply_immediately: true
+ purge_security_groups: false
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+
+ - name: Add a new security group without purge (idempotence)
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ vpc_security_group_ids:
+ - '{{ sgs_result.results.2.group_id }}'
+ apply_immediately: true
+ purge_security_groups: false
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+ - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding'])
+ | list | length == 3
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Add a security group with purge - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ vpc_security_group_ids:
+ - '{{ sgs_result.results.2.group_id }}'
+ apply_immediately: true
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Add a security group with purge
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ vpc_security_group_ids:
+ - '{{ sgs_result.results.2.group_id }}'
+ apply_immediately: true
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+ - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding'])
+ | list | length == 1
+ - result.vpc_security_groups | selectattr('status', 'equalto', 'removing') |
+ list | length == 2
+
+ - name: Add a security group with purge (idempotence) - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ vpc_security_group_ids:
+ - '{{ sgs_result.results.2.group_id }}'
+ apply_immediately: true
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Add a security group with purge (idempotence)
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ vpc_security_group_ids:
+ - '{{ sgs_result.results.2.group_id }}'
+ apply_immediately: true
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+ - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding'])
+ | list | length == 1
+
+ always:
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+ ignore_errors: yes
+
+ - name: Remove security groups
+ ec2_group:
+ name: '{{ item }}'
+ description: created by rds_instance integration tests
+ state: absent
+ register: sgs_result
+ loop:
+ - '{{ resource_prefix }}-sg-1'
+ - '{{ resource_prefix }}-sg-2'
+ - '{{ resource_prefix }}-sg-3'
+ ignore_errors: yes
+ retries: 30
+ until: sgs_result is not failed
+ delay: 10
+
+ - name: remove subnets
+ ec2_vpc_subnet:
+ cidr: '{{ item.cidr }}'
+ az: '{{ item.zone }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ tags:
+ Name: '{{ resource_prefix }}-subnet'
+ Description: created by rds_instance integration tests
+ state: absent
+ register: subnets
+ ignore_errors: yes
+ retries: 30
+ until: subnets is not failed
+ delay: 10
+ loop:
+ - {cidr: 10.122.122.128/28, zone: '{{ aws_region }}a'}
+ - {cidr: 10.122.122.144/28, zone: '{{ aws_region }}b'}
+ - {cidr: 10.122.122.160/28, zone: '{{ aws_region }}c'}
+ - {cidr: 10.122.122.176/28, zone: '{{ aws_region }}d'}
+
+ - name: Delete VPC
+ ec2_vpc_net:
+ name: '{{ resource_prefix }}-vpc'
+ state: absent
+ cidr_block: 10.122.122.128/26
+ tags:
+ Name: '{{ resource_prefix }}-vpc'
+ Description: created by rds_instance integration tests
+ register: vpc_result
+ ignore_errors: yes
+ retries: 30
+ until: vpc_result is not failed
+ delay: 10
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/aliases
new file mode 100644
index 000000000..df5ff67a2
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/aliases
@@ -0,0 +1,5 @@
+time=25m
+
+cloud/aws
+
+rds_snapshot_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml
new file mode 100644
index 000000000..b480137fc
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+# defaults file for rds_instance_snapshot
+
+# Create RDS instance
+instance_id: '{{ resource_prefix }}-instance'
+username: 'testrdsusername'
+password: "{{ lookup('password', '/dev/null') }}"
+db_instance_class: db.t3.micro
+allocated_storage: 10
+engine: 'mariadb'
+mariadb_engine_version: 10.6.10
+
+# Create snapshot
+snapshot_id: '{{ instance_id }}-snapshot'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/tasks/main.yml
new file mode 100644
index 000000000..c639291a5
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/tasks/main.yml
@@ -0,0 +1,505 @@
+---
+- module_defaults:
+ group/aws:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ collections:
+ - community.aws
+ - amazon.aws
+
+ block:
+ - name: Create a source mariadb instance
+ rds_instance:
+ id: "{{ instance_id }}"
+ state: present
+ engine: "{{ engine}}"
+ engine_version: "{{ mariadb_engine_version }}"
+ allow_major_version_upgrade: true
+ username: "{{ username }}"
+ password: "{{ password }}"
+ db_instance_class: "{{ db_instance_class }}"
+ allocated_storage: "{{ allocated_storage }}"
+ register: _result_create_instance
+
+ - assert:
+ that:
+ - _result_create_instance.changed
+ - _result_create_instance.db_instance_identifier == "{{ instance_id }}"
+
+ - name: Get all RDS snapshots for the existing instance
+ rds_snapshot_info:
+ db_instance_identifier: "{{ instance_id }}"
+ register: _result_instance_snapshot_info
+
+ - assert:
+ that:
+ - _result_instance_snapshot_info is successful
+ - _result_instance_snapshot_info.snapshots | length == 1
+
+ - name: Take a snapshot of the existing RDS instance (CHECK_MODE)
+ rds_instance_snapshot:
+ state: present
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ snapshot_id }}"
+ check_mode: yes
+ register: _result_instance_snapshot
+
+ - assert:
+ that:
+ - _result_instance_snapshot.changed
+
+ - name: Take a snapshot of the existing RDS instance
+ rds_instance_snapshot:
+ state: present
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ snapshot_id }}"
+ wait: true
+ register: _result_instance_snapshot
+
+ - assert:
+ that:
+ - _result_instance_snapshot.changed
+ - "'availability_zone' in _result_instance_snapshot"
+ - "'instance_create_time' in _result_instance_snapshot"
+ - "'db_instance_identifier' in _result_instance_snapshot"
+ - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}"
+ - "'db_snapshot_identifier' in _result_instance_snapshot"
+ - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}"
+ - "'db_snapshot_arn' in _result_instance_snapshot"
+ - "'dbi_resource_id' in _result_instance_snapshot"
+ - "'encrypted' in _result_instance_snapshot"
+ - "'engine' in _result_instance_snapshot"
+ - _result_instance_snapshot.engine == "{{ engine }}"
+ - "'engine_version' in _result_instance_snapshot"
+ - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}"
+ - "'iam_database_authentication_enabled' in _result_instance_snapshot"
+ - "'license_model' in _result_instance_snapshot"
+ - "'master_username' in _result_instance_snapshot"
+ - _result_instance_snapshot.master_username == "{{ username }}"
+ - "'snapshot_create_time' in _result_instance_snapshot"
+ - "'snapshot_type' in _result_instance_snapshot"
+ - "'status' in _result_instance_snapshot"
+ - _result_instance_snapshot.status == "available"
+ - "'snapshot_type' in _result_instance_snapshot"
+ - _result_instance_snapshot.snapshot_type == "manual"
+ - "'status' in _result_instance_snapshot"
+ - "'storage_type' in _result_instance_snapshot"
+ - _result_instance_snapshot.storage_type == "gp2"
+ - "'tags' in _result_instance_snapshot"
+ - "'vpc_id' in _result_instance_snapshot"
+
+ - name: Take a snapshot of the existing RDS instance (CHECK_MODE - IDEMPOTENCE)
+ rds_instance_snapshot:
+ state: present
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ snapshot_id }}"
+ check_mode: yes
+ register: _result_instance_snapshot
+
+ - assert:
+ that:
+ - not _result_instance_snapshot.changed
+
+ - name: Take a snapshot of the existing RDS instance (IDEMPOTENCE)
+ rds_instance_snapshot:
+ state: present
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ snapshot_id }}"
+ wait: true
+ register: _result_instance_snapshot
+
+ - assert:
+ that:
+ - not _result_instance_snapshot.changed
+ - "'availability_zone' in _result_instance_snapshot"
+ - "'instance_create_time' in _result_instance_snapshot"
+ - "'db_instance_identifier' in _result_instance_snapshot"
+ - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}"
+ - "'db_snapshot_identifier' in _result_instance_snapshot"
+ - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}"
+ - "'db_snapshot_arn' in _result_instance_snapshot"
+ - "'dbi_resource_id' in _result_instance_snapshot"
+ - "'encrypted' in _result_instance_snapshot"
+ - "'engine' in _result_instance_snapshot"
+ - _result_instance_snapshot.engine == "{{ engine }}"
+ - "'engine_version' in _result_instance_snapshot"
+ - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}"
+ - "'iam_database_authentication_enabled' in _result_instance_snapshot"
+ - "'license_model' in _result_instance_snapshot"
+ - "'master_username' in _result_instance_snapshot"
+ - _result_instance_snapshot.master_username == "{{ username }}"
+ - "'snapshot_create_time' in _result_instance_snapshot"
+ - "'snapshot_type' in _result_instance_snapshot"
+ - "'status' in _result_instance_snapshot"
+ - _result_instance_snapshot.status == "available"
+ - "'snapshot_type' in _result_instance_snapshot"
+ - _result_instance_snapshot.snapshot_type == "manual"
+ - "'status' in _result_instance_snapshot"
+ - "'storage_type' in _result_instance_snapshot"
+ - _result_instance_snapshot.storage_type == "gp2"
+ - "'tags' in _result_instance_snapshot"
+ - "'vpc_id' in _result_instance_snapshot"
+
+ - name: Get information about the existing DB snapshot
+ rds_snapshot_info:
+ db_snapshot_identifier: "{{ snapshot_id }}"
+ register: _result_instance_snapshot_info
+
+ - assert:
+ that:
+ - _result_instance_snapshot_info is successful
+ - _result_instance_snapshot_info.snapshots[0].db_instance_identifier == "{{ instance_id }}"
+ - _result_instance_snapshot_info.snapshots[0].db_snapshot_identifier == "{{ snapshot_id }}"
+
+ - name: Take another snapshot of the existing RDS instance
+ rds_instance_snapshot:
+ state: present
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ snapshot_id }}-b"
+ wait: true
+ register: _result_instance_snapshot
+
+ - assert:
+ that:
+ - _result_instance_snapshot.changed
+ - "'availability_zone' in _result_instance_snapshot"
+ - "'instance_create_time' in _result_instance_snapshot"
+ - "'db_instance_identifier' in _result_instance_snapshot"
+ - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}"
+ - "'db_snapshot_identifier' in _result_instance_snapshot"
+ - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-b"
+ - "'db_snapshot_arn' in _result_instance_snapshot"
+ - "'dbi_resource_id' in _result_instance_snapshot"
+ - "'encrypted' in _result_instance_snapshot"
+ - "'engine' in _result_instance_snapshot"
+ - _result_instance_snapshot.engine == "{{ engine }}"
+ - "'engine_version' in _result_instance_snapshot"
+ - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}"
+ - "'iam_database_authentication_enabled' in _result_instance_snapshot"
+ - "'license_model' in _result_instance_snapshot"
+ - "'master_username' in _result_instance_snapshot"
+ - _result_instance_snapshot.master_username == "{{ username }}"
+ - "'snapshot_create_time' in _result_instance_snapshot"
+ - "'snapshot_type' in _result_instance_snapshot"
+ - "'status' in _result_instance_snapshot"
+ - _result_instance_snapshot.status == "available"
+ - "'snapshot_type' in _result_instance_snapshot"
+ - _result_instance_snapshot.snapshot_type == "manual"
+ - "'status' in _result_instance_snapshot"
+ - "'storage_type' in _result_instance_snapshot"
+ - _result_instance_snapshot.storage_type == "gp2"
+ - "'tags' in _result_instance_snapshot"
+ - "'vpc_id' in _result_instance_snapshot"
+
+ - name: Get all snapshots for the existing RDS instance
+ rds_snapshot_info:
+ db_instance_identifier: "{{ instance_id }}"
+ register: _result_instance_snapshot_info
+
+ - assert:
+ that:
+ - _result_instance_snapshot_info is successful
+ #- _result_instance_snapshot_info.cluster_snapshots | length == 3
+
+ - name: Delete existing DB instance snapshot (CHECK_MODE)
+ rds_instance_snapshot:
+ state: absent
+ db_snapshot_identifier: "{{ snapshot_id }}-b"
+ register: _result_delete_snapshot
+ check_mode: yes
+
+ - assert:
+ that:
+ - _result_delete_snapshot.changed
+
+ - name: Delete the existing DB instance snapshot
+ rds_instance_snapshot:
+ state: absent
+ db_snapshot_identifier: "{{ snapshot_id }}-b"
+ register: _result_delete_snapshot
+
+ - assert:
+ that:
+ - _result_delete_snapshot.changed
+
+ - name: Delete existing DB instance snapshot (CHECK_MODE - IDEMPOTENCE)
+ rds_instance_snapshot:
+ state: absent
+ db_snapshot_identifier: "{{ snapshot_id }}-b"
+ register: _result_delete_snapshot
+ check_mode: yes
+
+ - assert:
+ that:
+ - not _result_delete_snapshot.changed
+
+ - name: Delete the existing DB instance snapshot (IDEMPOTENCE)
+ rds_instance_snapshot:
+ state: absent
+ db_snapshot_identifier: "{{ snapshot_id }}-b"
+ register: _result_delete_snapshot
+
+ - assert:
+ that:
+ - not _result_delete_snapshot.changed
+
+ - name: Take another snapshot of the existing RDS instance and assign tags
+ rds_instance_snapshot:
+ state: present
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ snapshot_id }}-b"
+ wait: true
+ tags:
+ tag_one: '{{ snapshot_id }}-b One'
+ "Tag Two": 'two {{ snapshot_id }}-b'
+ register: _result_instance_snapshot
+
+ - assert:
+ that:
+ - _result_instance_snapshot.changed
+ - "'availability_zone' in _result_instance_snapshot"
+ - "'instance_create_time' in _result_instance_snapshot"
+ - "'db_instance_identifier' in _result_instance_snapshot"
+ - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}"
+ - "'db_snapshot_identifier' in _result_instance_snapshot"
+ - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-b"
+ - "'db_snapshot_arn' in _result_instance_snapshot"
+ - "'dbi_resource_id' in _result_instance_snapshot"
+ - "'encrypted' in _result_instance_snapshot"
+ - "'engine' in _result_instance_snapshot"
+ - _result_instance_snapshot.engine == "{{ engine }}"
+ - "'engine_version' in _result_instance_snapshot"
+ - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}"
+ - "'iam_database_authentication_enabled' in _result_instance_snapshot"
+ - "'license_model' in _result_instance_snapshot"
+ - "'master_username' in _result_instance_snapshot"
+ - _result_instance_snapshot.master_username == "{{ username }}"
+ - "'snapshot_create_time' in _result_instance_snapshot"
+ - "'snapshot_type' in _result_instance_snapshot"
+ - "'status' in _result_instance_snapshot"
+ - _result_instance_snapshot.status == "available"
+ - "'snapshot_type' in _result_instance_snapshot"
+ - _result_instance_snapshot.snapshot_type == "manual"
+ - "'status' in _result_instance_snapshot"
+ - "'storage_type' in _result_instance_snapshot"
+ - _result_instance_snapshot.storage_type == "gp2"
+ - "'tags' in _result_instance_snapshot"
+ - _result_instance_snapshot.tags | length == 2
+ - _result_instance_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One"
+ - _result_instance_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b"
+ - "'vpc_id' in _result_instance_snapshot"
+
+ - name: Attempt to take another snapshot of the existing RDS instance and assign tags (idempotence)
+ rds_instance_snapshot:
+ state: present
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ snapshot_id }}-b"
+ wait: true
+ tags:
+ tag_one: '{{ snapshot_id }}-b One'
+ "Tag Two": 'two {{ snapshot_id }}-b'
+ register: _result_instance_snapshot
+
+ - assert:
+ that:
+ - not _result_instance_snapshot.changed
+
+ - name: Take another snapshot of the existing RDS instance and update tags
+ rds_instance_snapshot:
+ state: present
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ snapshot_id }}-b"
+ tags:
+ tag_three: '{{ snapshot_id }}-b Three'
+ "Tag Two": 'two {{ snapshot_id }}-b'
+ register: _result_instance_snapshot
+
+ - assert:
+ that:
+ - _result_instance_snapshot.changed
+ - "'availability_zone' in _result_instance_snapshot"
+ - "'instance_create_time' in _result_instance_snapshot"
+ - "'db_instance_identifier' in _result_instance_snapshot"
+ - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}"
+ - "'db_snapshot_identifier' in _result_instance_snapshot"
+ - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-b"
+ - "'db_snapshot_arn' in _result_instance_snapshot"
+ - "'dbi_resource_id' in _result_instance_snapshot"
+ - "'encrypted' in _result_instance_snapshot"
+ - "'engine' in _result_instance_snapshot"
+ - _result_instance_snapshot.engine == "{{ engine }}"
+ - "'engine_version' in _result_instance_snapshot"
+ - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}"
+ - "'iam_database_authentication_enabled' in _result_instance_snapshot"
+ - "'license_model' in _result_instance_snapshot"
+ - "'master_username' in _result_instance_snapshot"
+ - _result_instance_snapshot.master_username == "{{ username }}"
+ - "'snapshot_create_time' in _result_instance_snapshot"
+ - "'snapshot_type' in _result_instance_snapshot"
+ - "'status' in _result_instance_snapshot"
+ - _result_instance_snapshot.status == "available"
+ - "'snapshot_type' in _result_instance_snapshot"
+ - _result_instance_snapshot.snapshot_type == "manual"
+ - "'status' in _result_instance_snapshot"
+ - "'storage_type' in _result_instance_snapshot"
+ - _result_instance_snapshot.storage_type == "gp2"
+ - "'tags' in _result_instance_snapshot"
+ - _result_instance_snapshot.tags | length == 2
+ - _result_instance_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three"
+ - _result_instance_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b"
+ - "'vpc_id' in _result_instance_snapshot"
+
+ - name: Take another snapshot of the existing RDS instance and update tags without purge
+ rds_instance_snapshot:
+ state: present
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ snapshot_id }}-b"
+ purge_tags: no
+ tags:
+ tag_one: '{{ snapshot_id }}-b One'
+ register: _result_instance_snapshot
+
+ - assert:
+ that:
+ - _result_instance_snapshot.changed
+ - "'availability_zone' in _result_instance_snapshot"
+ - "'instance_create_time' in _result_instance_snapshot"
+ - "'db_instance_identifier' in _result_instance_snapshot"
+ - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}"
+ - "'db_snapshot_identifier' in _result_instance_snapshot"
+ - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-b"
+ - "'db_snapshot_arn' in _result_instance_snapshot"
+ - "'dbi_resource_id' in _result_instance_snapshot"
+ - "'encrypted' in _result_instance_snapshot"
+ - "'engine' in _result_instance_snapshot"
+ - _result_instance_snapshot.engine == "{{ engine }}"
+ - "'engine_version' in _result_instance_snapshot"
+ - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}"
+ - "'iam_database_authentication_enabled' in _result_instance_snapshot"
+ - "'license_model' in _result_instance_snapshot"
+ - "'master_username' in _result_instance_snapshot"
+ - _result_instance_snapshot.master_username == "{{ username }}"
+ - "'snapshot_create_time' in _result_instance_snapshot"
+ - "'snapshot_type' in _result_instance_snapshot"
+ - "'status' in _result_instance_snapshot"
+ - _result_instance_snapshot.status == "available"
+ - "'snapshot_type' in _result_instance_snapshot"
+ - _result_instance_snapshot.snapshot_type == "manual"
+ - "'status' in _result_instance_snapshot"
+ - "'storage_type' in _result_instance_snapshot"
+ - _result_instance_snapshot.storage_type == "gp2"
+ - "'tags' in _result_instance_snapshot"
+ - _result_instance_snapshot.tags | length == 3
+ - _result_instance_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One"
+ - _result_instance_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b"
+ - _result_instance_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three"
+ - "'vpc_id' in _result_instance_snapshot"
+
+ - name: Take another snapshot of the existing RDS instance and do not specify any tag to ensure previous tags are not removed
+ rds_instance_snapshot:
+ state: present
+ db_instance_identifier: "{{ instance_id }}"
+ db_snapshot_identifier: "{{ snapshot_id }}-b"
+ register: _result_instance_snapshot
+
+ - assert:
+ that:
+ - not _result_instance_snapshot.changed
+
+ # ------------------------------------------------------------------------------------------
+ # Test copying a snapshot
+ ### Note - copying a snapshot from a different region is supported, but not in CI runs,
+ ### because the aws-terminator only terminates resources in one region.
+
+ - set_fact:
+ _snapshot_arn: "{{ _result_instance_snapshot.db_snapshot_arn }}"
+
+ - name: Copy a snapshot (check mode)
+ rds_instance_snapshot:
+ id: "{{ snapshot_id }}-copy"
+ source_id: "{{ snapshot_id }}-b"
+ copy_tags: yes
+ wait: true
+ register: _result_instance_snapshot
+ check_mode: yes
+
+ - assert:
+ that:
+ - _result_instance_snapshot.changed
+
+ - name: Copy a snapshot
+ rds_instance_snapshot:
+ id: "{{ snapshot_id }}-copy"
+ source_id: "{{ snapshot_id }}-b"
+ copy_tags: yes
+ wait: true
+ register: _result_instance_snapshot
+
+ - assert:
+ that:
+ - _result_instance_snapshot.changed
+ - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}"
+ - _result_instance_snapshot.source_db_snapshot_identifier == "{{ _snapshot_arn }}"
+ - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-copy"
+ - "'tags' in _result_instance_snapshot"
+ - _result_instance_snapshot.tags | length == 3
+ - _result_instance_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One"
+ - _result_instance_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b"
+ - _result_instance_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three"
+
+ - name: Copy a snapshot (idempotence - check mode)
+ rds_instance_snapshot:
+ id: "{{ snapshot_id }}-copy"
+ source_id: "{{ snapshot_id }}-b"
+ copy_tags: yes
+ wait: true
+ register: _result_instance_snapshot
+ check_mode: yes
+
+ - assert:
+ that:
+ - not _result_instance_snapshot.changed
+
+ - name: Copy a snapshot (idempotence)
+ rds_instance_snapshot:
+ id: "{{ snapshot_id }}-copy"
+ source_id: "{{ snapshot_id }}-b"
+ copy_tags: yes
+ wait: true
+ register: _result_instance_snapshot
+
+ - assert:
+ that:
+ - not _result_instance_snapshot.changed
+ - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}"
+ - _result_instance_snapshot.source_db_snapshot_identifier == "{{ _snapshot_arn }}"
+ - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-copy"
+ - "'tags' in _result_instance_snapshot"
+ - _result_instance_snapshot.tags | length == 3
+ - _result_instance_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One"
+ - _result_instance_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b"
+ - _result_instance_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three"
+
+ always:
+ - name: Delete the existing DB instance snapshots
+ rds_instance_snapshot:
+ state: absent
+ db_snapshot_identifier: "{{ item }}"
+ wait: false
+ register: _result_delete_snapshot
+ ignore_errors: true
+ loop:
+ - "{{ snapshot_id }}"
+ - "{{ snapshot_id }}-b"
+ - "{{ snapshot_id }}-copy"
+
+ - name: Delete the existing RDS instance without creating a final snapshot
+ rds_instance:
+ state: absent
+ instance_id: "{{ instance_id }}"
+ skip_final_snapshot: True
+ wait: false
+ register: _result_delete_instance
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/vars/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/vars/main.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/aliases
new file mode 100644
index 000000000..c4ee3f5a6
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/aliases
@@ -0,0 +1,4 @@
+cloud/aws
+rds_instance_info
+time=30m
+rds_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/defaults/main.yml
new file mode 100644
index 000000000..d2ebe7f18
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/defaults/main.yml
@@ -0,0 +1,9 @@
+instance_id: ansible-test-{{ tiny_prefix }}
+modified_instance_id: '{{ instance_id }}-updated'
+username: test
+password: test12345678
+db_instance_class: db.t3.micro
+allocated_storage: 20
+
+# For snapshot tests
+snapshot_id: '{{ instance_id }}-ss'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/tasks/main.yml
new file mode 100644
index 000000000..f8ac5d5f9
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/tasks/main.yml
@@ -0,0 +1,224 @@
+- name: rds_instance / snapshot_mgmt integration tests
+ collections:
+ - community.aws
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create a mariadb instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ tags:
+ Name: '{{ instance_id }}'
+ Created_by: Ansible rds_instance tests
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+ - result.tags | length == 2
+ - result.tags.Name == '{{ instance_id }}'
+ - result.tags.Created_by == 'Ansible rds_instance tests'
+
+ - name: Create a snapshot
+ rds_instance_snapshot:
+ instance_id: '{{ instance_id }}'
+ snapshot_id: '{{ snapshot_id }}'
+ state: present
+ wait: yes
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == "{{ instance_id }}"
+ - result.db_snapshot_identifier == "{{ snapshot_id }}"
+
+ # ------------------------------------------------------------------------------------------
+ # Test restoring db from snapshot
+
+ - name: Restore DB from snapshot - check_mode
+ rds_instance:
+ id: '{{ snapshot_id }}'
+ creation_source: snapshot
+ snapshot_identifier: '{{ snapshot_id }}'
+ engine: mariadb
+ state: present
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Restore DB from snapshot
+ rds_instance:
+ id: '{{ snapshot_id }}'
+ creation_source: snapshot
+ snapshot_identifier: '{{ snapshot_id }}'
+ engine: mariadb
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == "{{ snapshot_id }}"
+ - result.tags | length == 2
+ - result.tags.Name == "{{ instance_id }}"
+ - result.tags.Created_by == 'Ansible rds_instance tests'
+ - result.db_instance_status == 'available'
+
+ - name: Restore DB from snapshot (idempotence) - check_mode
+ rds_instance:
+ id: '{{ snapshot_id }}'
+ creation_source: snapshot
+ snapshot_identifier: '{{ snapshot_id }}'
+ engine: mariadb
+ state: present
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Restore DB from snapshot (idempotence)
+ rds_instance:
+ id: '{{ snapshot_id }}'
+ creation_source: snapshot
+ snapshot_identifier: '{{ snapshot_id }}'
+ engine: mariadb
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.db_instance_identifier == "{{ snapshot_id }}"
+ - result.tags | length == 2
+ - result.tags.Name == "{{ instance_id }}"
+ - result.tags.Created_by == 'Ansible rds_instance tests'
+ - result.db_instance_status == 'available'
+
+ # ------------------------------------------------------------------------------------------
+ # Test final snapshot on deletion
+
+ - name: Ensure instance exists prior to deleting
+ rds_instance_info:
+ db_instance_identifier: '{{ instance_id }}'
+ register: db_info
+
+ - assert:
+ that:
+ - db_info.instances | length == 1
+
+ - name: Delete the instance keeping snapshot - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ final_snapshot_identifier: '{{ instance_id }}'
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Delete the instance keeping snapshot
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ final_snapshot_identifier: '{{ instance_id }}'
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.final_snapshot.db_instance_identifier == '{{ instance_id }}'
+
+ - name: Check that snapshot exists
+ rds_snapshot_info:
+ db_snapshot_identifier: '{{ instance_id }}'
+ register: result
+
+ - assert:
+ that:
+ - result.snapshots | length == 1
+ - result.snapshots.0.engine == 'mariadb'
+
+ - name: Ensure instance was deleted
+ rds_instance_info:
+ db_instance_identifier: '{{ instance_id }}'
+ register: db_info
+
+ - assert:
+ that:
+ - db_info.instances | length == 0
+
+ - name: Delete the instance (idempotence) - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Delete the instance (idempotence)
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ always:
+ - name: Remove snapshots
+ rds_instance_snapshot:
+ db_snapshot_identifier: '{{ item }}'
+ state: absent
+ wait: false
+ ignore_errors: yes
+ with_items:
+ - '{{ instance_id }}'
+ - '{{ snapshot_id }}'
+
+ - name: Remove DB instances
+ rds_instance:
+ id: '{{ item }}'
+ state: absent
+ skip_final_snapshot: true
+ wait: false
+ ignore_errors: yes
+ with_items:
+ - '{{ instance_id }}'
+ - '{{ snapshot_id }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/aliases
new file mode 100644
index 000000000..c4ee3f5a6
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/aliases
@@ -0,0 +1,4 @@
+cloud/aws
+rds_instance_info
+time=30m
+rds_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/defaults/main.yml
new file mode 100644
index 000000000..5540ffb89
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/defaults/main.yml
@@ -0,0 +1,5 @@
+instance_id: ansible-test-{{ tiny_prefix }}
+username: test
+password: test12345678
+db_instance_class: db.t3.micro
+allocated_storage: 20
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/tasks/main.yml
new file mode 100644
index 000000000..fdcfcbf8a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/tasks/main.yml
@@ -0,0 +1,320 @@
+- name: rds_instance / states integration tests
+ collections:
+ - community.aws
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create a mariadb instance - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ deletion_protection: true
+ tags:
+ Name: '{{ instance_id }}'
+ Created_by: Ansible rds_instance tests
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Create a mariadb instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ deletion_protection: true
+ tags:
+ Name: '{{ instance_id }}'
+ Created_by: Ansible rds_instance tests
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+ - result.tags | length == 2
+ - result.tags.Name == '{{ instance_id }}'
+ - result.tags.Created_by == 'Ansible rds_instance tests'
+ - result.deletion_protection == True
+
+ - name: Create a mariadb instance (idempotence) - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ deletion_protection: true
+ tags:
+ Name: '{{ instance_id }}'
+ Created_by: Ansible rds_instance tests
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Create a mariadb instance (idempotence)
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ deletion_protection: true
+ tags:
+ Name: '{{ instance_id }}'
+ Created_by: Ansible rds_instance tests
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+ - result.tags | length == 2
+ - result.tags.Name == '{{ instance_id }}'
+ - result.tags.Created_by == 'Ansible rds_instance tests'
+ - result.deletion_protection == True
+
+ # ------------------------------------------------------------------------------------------
+ # Test stopping / rebooting instances
+
+ - name: Reboot a stopped instance - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: rebooted
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Reboot a stopped instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: rebooted
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Stop the instance - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: stopped
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Stop the instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: stopped
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Stop the instance (idempotence) - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: stopped
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Stop the instance (idempotence)
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: stopped
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ # ------------------------------------------------------------------------------------------
+
+ - name: Start the instance - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: started
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Start the instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: started
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Start the instance (idempotence) - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: started
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Start the instance (idempotence)
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: started
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ # ------------------------------------------------------------------------------------------
+ # Test deletion protection / deletion
+
+ - name: Ensure instance exists prior to deleting
+ rds_instance_info:
+ db_instance_identifier: '{{ instance_id }}'
+ register: db_info
+
+ - assert:
+ that:
+ - db_info.instances | length == 1
+
+ - name: Attempt to delete DB instance with deletion protection (should fail)
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result.failed
+
+ - name: Turn off deletion protection
+ rds_instance:
+ id: '{{ instance_id }}'
+ deletion_protection: false
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.deletion_protection == False
+
+ - name: Delete the instance - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Delete the instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Ensure instance was deleted
+ rds_instance_info:
+ db_instance_identifier: '{{ instance_id }}'
+ register: db_info
+
+ - assert:
+ that:
+ - db_info.instances | length == 0
+
+ - name: Delete the instance (idempotence) - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Delete the instance (idempotence)
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ always:
+ - name: Remove DB instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ wait: false
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/aliases
new file mode 100644
index 000000000..777fbe40a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+time=15m
+rds_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/defaults/main.yml
new file mode 100644
index 000000000..d9fb41aa7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/defaults/main.yml
@@ -0,0 +1,7 @@
+instance_id: ansible-test-{{ tiny_prefix }}
+instance_id_gp3: ansible-test-{{ tiny_prefix }}-gp3
+modified_instance_id: '{{ instance_id }}-updated'
+username: test
+password: test12345678
+db_instance_class: db.t3.micro
+allocated_storage: 20
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/main.yml
new file mode 100644
index 000000000..14c1872d6
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/main.yml
@@ -0,0 +1,202 @@
+- name: rds_instance / tagging integration tests
+ collections:
+ - community.aws
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ - name: Test tagging db with storage type gp3
+ import_tasks: test_tagging_gp3.yml
+
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ # Test invalid bad options
+ - name: Create a DB instance with an invalid engine
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: thisisnotavalidengine
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result.failed
+ - '"value of engine must be one of" in result.msg'
+
+ # Test creation, adding tags and enabling encryption
+ - name: Create a mariadb instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ storage_encrypted: true
+ tags:
+ Name: '{{ instance_id }}'
+ Created_by: Ansible rds_instance tests
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+ - result.tags | length == 2
+ - result.tags.Name == '{{ instance_id }}'
+ - result.tags.Created_by == 'Ansible rds_instance tests'
+ - result.kms_key_id
+ - result.storage_encrypted == true
+
+ - name: Test impotency omitting tags - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Test impotency omitting tags
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.db_instance_identifier
+ - result.tags | length == 2
+
+ - name: Idempotence with minimal options
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.db_instance_identifier
+ - result.tags | length == 2
+
+ - name: Test tags are not purged if purge_tags is False
+ rds_instance:
+ db_instance_identifier: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ tags: {}
+ purge_tags: false
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.tags | length == 2
+
+ - name: Add a tag and remove a tag - check_mode
+ rds_instance:
+ db_instance_identifier: '{{ instance_id }}'
+ state: present
+ tags:
+ Name: '{{ instance_id }}-new'
+ Created_by: Ansible rds_instance tests
+ purge_tags: true
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Add a tag and remove a tag
+ rds_instance:
+ db_instance_identifier: '{{ instance_id }}'
+ state: present
+ tags:
+ Name: '{{ instance_id }}-new'
+ Created_by: Ansible rds_instance tests
+ purge_tags: true
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.tags | length == 2
+ - result.tags.Name == '{{ instance_id }}-new'
+
+ - name: Add a tag and remove a tag (idempotence) - check_mode
+ rds_instance:
+ db_instance_identifier: '{{ instance_id }}'
+ state: present
+ tags:
+ Name: '{{ instance_id }}-new'
+ Created_by: Ansible rds_instance tests
+ purge_tags: true
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Add a tag and remove a tag (idempotence)
+ rds_instance:
+ db_instance_identifier: '{{ instance_id }}'
+ state: present
+ tags:
+ Name: '{{ instance_id }}-new'
+ Created_by: Ansible rds_instance tests
+ purge_tags: true
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.tags | length == 2
+ - result.tags.Name == '{{ instance_id }}-new'
+
+ always:
+ - name: Remove DB instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ wait: false
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/test_tagging_gp3.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/test_tagging_gp3.yml
new file mode 100644
index 000000000..5d4e6c883
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/test_tagging_gp3.yml
@@ -0,0 +1,190 @@
+- block:
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: '{{ instance_id_gp3 }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ # Test invalid bad options
+ - name: Create a DB instance with an invalid engine
+ rds_instance:
+ id: '{{ instance_id_gp3 }}'
+ state: present
+ engine: thisisnotavalidengine
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result.failed
+ - '"value of engine must be one of" in result.msg'
+
+ # Test creation, adding tags and enabling encryption
+ - name: Create a mariadb instance
+ rds_instance:
+ id: '{{ instance_id_gp3 }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ storage_encrypted: true
+ tags:
+ Name: '{{ instance_id_gp3 }}'
+ Created_by: Ansible rds_instance tests
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == '{{ instance_id_gp3 }}'
+ - result.tags | length == 2
+ - result.tags.Name == '{{ instance_id_gp3 }}'
+ - result.tags.Created_by == 'Ansible rds_instance tests'
+ - result.kms_key_id
+ - result.storage_encrypted == true
+
+ - name: Test idempotency omitting tags - check_mode
+ rds_instance:
+ id: '{{ instance_id_gp3 }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Test idempotency omitting tags
+ rds_instance:
+ id: '{{ instance_id_gp3 }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.db_instance_identifier == '{{ instance_id_gp3 }}'
+ - result.tags | length == 2
+
+ - name: Idempotence with minimal options
+ rds_instance:
+ id: '{{ instance_id_gp3 }}'
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.db_instance_identifier == '{{ instance_id_gp3 }}'
+ - result.tags | length == 2
+
+ - name: Test tags are not purged if purge_tags is False
+ rds_instance:
+ db_instance_identifier: '{{ instance_id_gp3 }}'
+ state: present
+ engine: mariadb
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ tags: {}
+ purge_tags: false
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.tags | length == 2
+
+ - name: Add a tag and remove a tag - check_mode
+ rds_instance:
+ db_instance_identifier: '{{ instance_id_gp3 }}'
+ state: present
+ tags:
+ Name: '{{ instance_id_gp3 }}-new'
+ Created_by: Ansible rds_instance tests
+ purge_tags: true
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Add a tag and remove a tag
+ rds_instance:
+ db_instance_identifier: '{{ instance_id_gp3 }}'
+ state: present
+ tags:
+ Name: '{{ instance_id_gp3 }}-new'
+ Created_by: Ansible rds_instance tests
+ purge_tags: true
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.tags | length == 2
+ - result.tags.Name == '{{ instance_id_gp3 }}-new'
+
+ - name: Add a tag and remove a tag (idempotence) - check_mode
+ rds_instance:
+ db_instance_identifier: '{{ instance_id_gp3 }}'
+ state: present
+ tags:
+ Name: '{{ instance_id_gp3 }}-new'
+ Created_by: Ansible rds_instance tests
+ purge_tags: true
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: Add a tag and remove a tag (idempotence)
+ rds_instance:
+ db_instance_identifier: '{{ instance_id_gp3 }}'
+ state: present
+ tags:
+ Name: '{{ instance_id_gp3 }}-new'
+ Created_by: Ansible rds_instance tests
+ purge_tags: true
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - result.tags | length == 2
+ - result.tags.Name == '{{ instance_id_gp3 }}-new'
+
+ always:
+ - name: Remove DB instance
+ rds_instance:
+ id: '{{ instance_id_gp3 }}'
+ state: absent
+ skip_final_snapshot: true
+ wait: false
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/aliases
new file mode 100644
index 000000000..777fbe40a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+time=15m
+rds_instance
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/defaults/main.yml
new file mode 100644
index 000000000..ff9bc3b47
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/defaults/main.yml
@@ -0,0 +1,10 @@
+instance_id: ansible-test-{{ tiny_prefix }}
+modified_instance_id: '{{ instance_id }}-updated'
+username: test
+password: test12345678
+db_instance_class: db.t3.micro
+allocated_storage: 20
+
+# For mariadb tests
+mariadb_engine_version: 10.5.17
+mariadb_engine_version_2: 10.6.10
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/tasks/main.yml
new file mode 100644
index 000000000..5a2112543
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/tasks/main.yml
@@ -0,0 +1,128 @@
+- name: rds_instance / upgrade integration tests
+ collections:
+ - community.aws
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ - name: Ensure the resource doesn't exist
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ ignore_errors: yes
+
+ - name: Create a mariadb instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ engine_version: '{{ mariadb_engine_version }}'
+ allow_major_version_upgrade: true
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.db_instance_identifier == '{{ instance_id }}'
+
+ # Test upgrade of DB instance
+
+ - name: Upgrade a mariadb instance - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ engine_version: '{{ mariadb_engine_version_2 }}'
+ allow_major_version_upgrade: true
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ apply_immediately: true
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: Upgrade a mariadb instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ engine_version: '{{ mariadb_engine_version_2 }}'
+ allow_major_version_upgrade: true
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ apply_immediately: true
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - '"engine_version" in result.pending_modified_values or result.engine_version
+ == mariadb_engine_version_2'
+
+ - name: Idempotence upgrading a mariadb instance - check_mode
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ engine_version: '{{ mariadb_engine_version_2 }}'
+ allow_major_version_upgrade: true
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ register: result
+ check_mode: yes
+
+ ### Specifying allow_major_version_upgrade with check_mode will always result in changed=True
+ ### since it's not returned in describe_db_instances api call
+ # - assert:
+ # that:
+ # - not result.changed
+
+ - name: Idempotence upgrading a mariadb instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: present
+ engine: mariadb
+ engine_version: '{{ mariadb_engine_version_2 }}'
+ allow_major_version_upgrade: true
+ username: '{{ username }}'
+ password: '{{ password }}'
+ db_instance_class: '{{ db_instance_class }}'
+ allocated_storage: '{{ allocated_storage }}'
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+ - '"engine_version" in result.pending_modified_values or result.engine_version
+ == mariadb_engine_version_2'
+
+ always:
+ - name: Delete the instance
+ rds_instance:
+ id: '{{ instance_id }}'
+ state: absent
+ skip_final_snapshot: true
+ wait: false
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/aliases
new file mode 100644
index 000000000..658684afb
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+
+rds_option_group_info \ No newline at end of file
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml
new file mode 100644
index 000000000..d99a37964
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml
@@ -0,0 +1,17 @@
+option_group_name: '{{ resource_prefix }}rds-option-group'
+engine_name: mysql
+major_engine_version: 5.6
+option_group_description: '{{ resource_prefix }}rds-option-group test'
+instance_id: '{{ resource_prefix }}'
+username: test
+password: test12345678
+db_instance_class: db.t2.small
+storage_encrypted_db_instance_class: db.t2.small
+allocated_storage: 20
+vpc_name: '{{ resource_prefix }}-vpc'
+vpc_seed: '{{ resource_prefix }}'
+vpc_cidr: 10.0.0.0/16
+subnet_cidr: 10.0.{{ 256 | random(seed=vpc_seed) }}.0/24
+sg_1_name: '{{ resource_prefix }}-sg-1'
+sg_2_name: '{{ resource_prefix }}-sg-2'
+sg_3_name: '{{ resource_prefix }}-sg-3'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/tasks/main.yml
new file mode 100644
index 000000000..72981cd63
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/tasks/main.yml
@@ -0,0 +1,948 @@
+- name: rds_option_group tests
+ module_defaults:
+ group/aws:
+ region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+
+
+ block:
+ - name: create a VPC
+ ec2_vpc_net:
+ name: '{{ vpc_name }}'
+ state: present
+ cidr_block: '{{ vpc_cidr }}'
+ register: vpc_result
+
+ - name: Assert success
+ assert:
+ that:
+ - vpc_result is successful
+ - '"vpc" in vpc_result'
+ - '"cidr_block" in vpc_result.vpc'
+ - vpc_result.vpc.cidr_block == vpc_cidr
+ - '"id" in vpc_result.vpc'
+ - vpc_result.vpc.id.startswith("vpc-")
+ - '"state" in vpc_result.vpc'
+ - vpc_result.vpc.state == 'available'
+ - '"tags" in vpc_result.vpc'
+
+ - name: 'set fact: VPC ID'
+ set_fact:
+ vpc_id: '{{ vpc_result.vpc.id }}'
+
+ - name: create subnet
+ ec2_vpc_subnet:
+ cidr: '{{ subnet_cidr}}'
+ vpc_id: '{{ vpc_id }}'
+ state: present
+ register: subnet_result
+
+ - name: Assert success
+ assert:
+ that:
+ - subnet_result is successful
+ - '"subnet" in subnet_result'
+ - '"cidr_block" in subnet_result.subnet'
+ - subnet_result.subnet.cidr_block == subnet_cidr
+ - '"id" in subnet_result.subnet'
+ - subnet_result.subnet.id.startswith("subnet-")
+ - '"state" in subnet_result.subnet'
+ - subnet_result.subnet.state == 'available'
+ - '"tags" in subnet_result.subnet'
+ - subnet_result.subnet.vpc_id == vpc_id
+
+ - name: 'set fact: VPC subnet ID'
+ set_fact:
+ subnet_id: '{{ subnet_result.subnet.id }}'
+
+
+ - name: Create security groups
+ ec2_group:
+ name: '{{ item }}'
+ description: created by rds_instance integration tests
+ state: present
+ vpc_id: '{{ vpc_id }}'
+ register: sgs_result
+ loop:
+ - '{{ sg_1_name }}'
+ - '{{ sg_2_name }}'
+ - '{{ sg_3_name }}'
+
+ - name: Assert success
+ assert:
+ that:
+ - sgs_result is successful
+
+ - name: 'set fact: security groups ID'
+ set_fact:
+ sg_1: '{{ sgs_result.results.0.group_id }}'
+ sg_2: '{{ sgs_result.results.1.group_id }}'
+ sg_3: '{{ sgs_result.results.2.group_id }}'
+
+
+ - name: List all the option groups - CHECK_MODE
+ rds_option_group_info:
+ register: option_groups_result
+ check_mode: true
+
+ - name: Assert success - CHECK_MODE
+ assert:
+ that:
+ - option_groups_result is successful
+
+
+ - name: List all the option groups
+ rds_option_group_info:
+ register: option_groups_result
+ check_mode: true
+
+ - name: Assert success
+ assert:
+ that:
+ - option_groups_result is successful
+
+ - name: Create an RDS Mysql option group - CHECK_MODE
+ rds_option_group:
+ state: present
+ option_group_name: '{{ option_group_name }}'
+ engine_name: '{{ engine_name }}'
+ major_engine_version: '{{ major_engine_version }}'
+ option_group_description: '{{ option_group_description }}'
+ apply_immediately: true
+ options:
+ - option_name: MEMCACHED
+ port: 11211
+ vpc_security_group_memberships:
+ - '{{ sg_1 }}'
+ option_settings:
+ - name: MAX_SIMULTANEOUS_CONNECTIONS
+ value: '20'
+ - name: CHUNK_SIZE_GROWTH_FACTOR
+ value: '1.25'
+ check_mode: true
+ register: new_rds_mysql_option_group
+
+ - name: Assert success - CHECK_MODE
+ assert:
+ that:
+ - new_rds_mysql_option_group.changed
+
+
+ - name: Create an RDS Mysql option group
+ rds_option_group:
+ state: present
+ option_group_name: '{{ option_group_name }}'
+ engine_name: '{{ engine_name }}'
+ major_engine_version: '{{ major_engine_version }}'
+ option_group_description: '{{ option_group_description }}'
+ apply_immediately: true
+ options:
+ - option_name: MEMCACHED
+ port: 11211
+ vpc_security_group_memberships:
+ - '{{ sg_1 }}'
+ option_settings:
+ - name: MAX_SIMULTANEOUS_CONNECTIONS
+ value: '20'
+ - name: CHUNK_SIZE_GROWTH_FACTOR
+ value: '1.25'
+ register: new_rds_mysql_option_group
+
+ - assert:
+ that:
+ - new_rds_mysql_option_group.changed
+ - "'engine_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.engine_name == "{{ engine_name }}"
+ - "'major_engine_version' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version
+ }}"
+ - "'option_group_arn' in new_rds_mysql_option_group"
+ - "'option_group_description' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_description == "{{ option_group_description
+ }}"
+ - "'option_group_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}"
+ - "'vpc_id' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.vpc_id == vpc_id
+ - "'options' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.options | length) > 0
+ - "'option_name' in option"
+ - option.option_name == "MEMCACHED"
+ - "'permanent' in option"
+ - "'persistent' in option"
+ - "'port' in option"
+ - option.port == 11211
+ - "'vpc_security_group_memberships' in option"
+ - (option.vpc_security_group_memberships | length) == 1
+ - option.vpc_security_group_memberships[0].vpc_security_group_id == "{{ sg_1
+ }}"
+ - "'option_settings' in option"
+ - (option_settings | length) > 0
+ - option_settings | selectattr('name','equalto','MAX_SIMULTANEOUS_CONNECTIONS')
+ | list | count > 0
+ - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR')
+ | list | count > 0
+ vars:
+ option: '{{ new_rds_mysql_option_group.options[0] }}'
+ option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}'
+
+
+ - name: List specific option group
+ rds_option_group_info:
+ option_group_name: '{{ option_group_name }}'
+ register: option_groups_result
+
+ - name: Assert success
+ assert:
+ that:
+ - option_groups_result is successful
+ - (option_groups_result.result | length) == 1
+ - "'engine_name' in option_groups_list"
+ - option_groups_list.engine_name == "{{ engine_name }}"
+ - "'major_engine_version' in option_groups_list"
+ - option_groups_list.major_engine_version == "{{ major_engine_version }}"
+ - "'option_group_arn' in option_groups_list"
+ - "'option_group_description' in option_groups_list"
+ - option_groups_list.option_group_description == "{{ option_group_description
+ }}"
+ - "'option_group_name' in option_groups_list"
+ - option_groups_list.option_group_name == "{{ option_group_name }}"
+ - "'vpc_id' in option_groups_list"
+ - new_rds_mysql_option_group.vpc_id == vpc_id
+ - "'options' in option_groups_list"
+ - (option_groups_list.options | length) > 0
+ - "'option_name' in options"
+ - options.option_name == "MEMCACHED"
+ - "'permanent' in options"
+ - "'persistent' in options"
+ - "'port' in options"
+ - options.port == 11211
+ - "'vpc_security_group_memberships' in options"
+ - (options.vpc_security_group_memberships | length) == 1
+ - options.vpc_security_group_memberships[0].vpc_security_group_id == "{{ sg_1
+ }}"
+ - "'option_settings' in options"
+ - (options.option_settings | length) > 0
+ vars:
+ option_groups_list: '{{ option_groups_result.result[0] }}'
+ options: '{{ option_groups_result.result[0].options[0] }}'
+
+
+ - name: Create an RDS Mysql option group (idempotency) - CHECK_MODE
+ rds_option_group:
+ state: present
+ option_group_name: '{{ option_group_name }}'
+ engine_name: '{{ engine_name }}'
+ major_engine_version: '{{ major_engine_version }}'
+ option_group_description: '{{ option_group_description }}'
+ apply_immediately: true
+ options:
+ - option_name: MEMCACHED
+ port: 11211
+ vpc_security_group_memberships:
+ - '{{ sg_1 }}'
+ option_settings:
+ - name: MAX_SIMULTANEOUS_CONNECTIONS
+ value: '20'
+ - name: CHUNK_SIZE_GROWTH_FACTOR
+ value: '1.25'
+ check_mode: true
+ register: new_rds_mysql_option_group
+
+ - name: Assert success - CHECK_MODE
+ assert:
+ that:
+ - not new_rds_mysql_option_group.changed
+ - "'engine_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.engine_name == "{{ engine_name }}"
+ - "'major_engine_version' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version
+ }}"
+ - "'option_group_arn' in new_rds_mysql_option_group"
+ - "'option_group_description' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_description == "{{ option_group_description
+ }}"
+ - "'option_group_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}"
+ - "'vpc_id' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.vpc_id == vpc_id
+ - "'options' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.options | length) > 0
+ - "'option_name' in option"
+ - option.option_name == "MEMCACHED"
+ - "'permanent' in option"
+ - "'persistent' in option"
+ - "'port' in option"
+ - option.port == 11211
+ - "'vpc_security_group_memberships' in option"
+ - (option.vpc_security_group_memberships | length) == 1
+ - option.vpc_security_group_memberships[0].vpc_security_group_id == "{{ sg_1
+ }}"
+ - "'option_settings' in option"
+ - (option_settings | length) > 0
+ - option_settings | selectattr('name','equalto','MAX_SIMULTANEOUS_CONNECTIONS')
+ | list | count > 0
+ - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR')
+ | list | count > 0
+ vars:
+ option: '{{ new_rds_mysql_option_group.options[0] }}'
+ option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}'
+ - name: Create an RDS Mysql option group (idempotency)
+ rds_option_group:
+ state: present
+ option_group_name: '{{ option_group_name }}'
+ engine_name: '{{ engine_name }}'
+ major_engine_version: '{{ major_engine_version }}'
+ option_group_description: '{{ option_group_description }}'
+ apply_immediately: true
+ options:
+ - option_name: MEMCACHED
+ port: 11211
+ vpc_security_group_memberships:
+ - '{{ sg_1 }}'
+ option_settings:
+ - name: MAX_SIMULTANEOUS_CONNECTIONS
+ value: '20'
+ - name: CHUNK_SIZE_GROWTH_FACTOR
+ value: '1.25'
+ register: new_rds_mysql_option_group
+
+ - assert:
+ that:
+ - not new_rds_mysql_option_group.changed
+ - "'engine_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.engine_name == "{{ engine_name }}"
+ - "'major_engine_version' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version
+ }}"
+ - "'option_group_arn' in new_rds_mysql_option_group"
+ - "'option_group_description' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_description == "{{ option_group_description
+ }}"
+ - "'option_group_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}"
+ - "'vpc_id' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.vpc_id == vpc_id
+ - "'options' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.options | length) > 0
+ - "'option_name' in option"
+ - option.option_name == "MEMCACHED"
+ - "'permanent' in option"
+ - "'persistent' in option"
+ - "'port' in option"
+ - option.port == 11211
+ - "'vpc_security_group_memberships' in option"
+ - (option.vpc_security_group_memberships | length) == 1
+ - option.vpc_security_group_memberships[0].vpc_security_group_id == "{{ sg_1
+ }}"
+ - "'option_settings' in option"
+ - (option_settings | length) > 0
+ - option_settings | selectattr('name','equalto','MAX_SIMULTANEOUS_CONNECTIONS')
+ | list | count > 0
+ - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR')
+ | list | count > 0
+ vars:
+ option: '{{ new_rds_mysql_option_group.options[0] }}'
+ option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}'
+
+
+ - name: List option groups with specific (engine_name and major_engine_version)
+ rds_option_group_info:
+ engine_name: '{{ engine_name }}'
+ major_engine_version: '{{ major_engine_version }}'
+ register: option_groups_result
+
+ - name: Assert success
+ assert:
+ that:
+ - option_groups_result is successful
+ - (option_groups_result.result | length) > 0
+
+
+ - name: Create an RDS Mysql option group - apply different changes (expected changed=true)
+ rds_option_group:
+ state: present
+ option_group_name: '{{ option_group_name }}'
+ engine_name: '{{ engine_name }}'
+ major_engine_version: '{{ major_engine_version }}'
+ option_group_description: '{{ option_group_description }}'
+ apply_immediately: true
+ options:
+ - option_name: MEMCACHED
+ port: 11211
+ vpc_security_group_memberships:
+ - '{{ sg_1 }}'
+ - '{{ sg_2 }}'
+ - '{{ sg_3 }}'
+ option_settings:
+ - name: MAX_SIMULTANEOUS_CONNECTIONS
+ value: '30'
+ register: new_rds_mysql_option_group
+
+ - assert:
+ that:
+ - new_rds_mysql_option_group.changed
+ - "'engine_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.engine_name == "{{ engine_name }}"
+ - "'major_engine_version' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version
+ }}"
+ - "'option_group_arn' in new_rds_mysql_option_group"
+ - "'option_group_description' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_description == "{{ option_group_description
+ }}"
+ - "'option_group_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}"
+ - "'vpc_id' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.vpc_id == vpc_id
+ - "'options' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.options | length) > 0
+ - "'option_name' in option"
+ - option.option_name == "MEMCACHED"
+ - "'permanent' in option"
+ - "'persistent' in option"
+ - "'port' in option"
+ - option.port == 11211
+ - "'vpc_security_group_memberships' in option"
+ - (option.vpc_security_group_memberships | length) == 3
+ - "'option_settings' in option"
+ - (option_settings | length) > 0
+ - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR')
+ | list | count > 0
+ vars:
+ option: '{{ new_rds_mysql_option_group.options[0] }}'
+ option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}'
+ - name: Get info about an option group - CHECK_MODE
+ rds_option_group_info:
+ option_group_name: '{{ option_group_name }}'
+ check_mode: true
+ register: option_groups_result
+
+ - name: Assert success - CHECK_MODE
+ assert:
+ that:
+ - option_groups_result is successful
+ - (option_groups_result.result | length) == 1
+ - "'engine_name' in option_groups_list"
+ - option_groups_list.engine_name == "{{ engine_name }}"
+ - "'major_engine_version' in option_groups_list"
+ - option_groups_list.major_engine_version == "{{ major_engine_version }}"
+ - "'option_group_arn' in option_groups_list"
+ - "'option_group_description' in option_groups_list"
+ - option_groups_list.option_group_description == "{{ option_group_description
+ }}"
+ - "'option_group_name' in option_groups_list"
+ - option_groups_list.option_group_name == "{{ option_group_name }}"
+ - "'vpc_id' in option_groups_list"
+ - new_rds_mysql_option_group.vpc_id == vpc_id
+ - "'options' in option_groups_list"
+ - (option_groups_list.options | length) > 0
+ - "'option_name' in options"
+ - options.option_name == "MEMCACHED"
+ - "'permanent' in options"
+ - "'persistent' in options"
+ - "'port' in options"
+ - options.port == 11211
+ - "'vpc_security_group_memberships' in options"
+ - (options.vpc_security_group_memberships | length) == 3
+ - "'option_settings' in options"
+ - (options.option_settings | length) > 0
+ vars:
+ option_groups_list: '{{ option_groups_result.result[0] }}'
+ options: '{{ option_groups_result.result[0].options[0] }}'
+
+
+ - name: RDS Mysql option group - apply tags - CHECK_MODE
+ rds_option_group:
+ state: present
+ option_group_name: '{{ option_group_name }}'
+ engine_name: '{{ engine_name }}'
+ major_engine_version: '{{ major_engine_version }}'
+ option_group_description: '{{ option_group_description }}'
+ apply_immediately: true
+ options:
+ - option_name: MEMCACHED
+ port: 11211
+ vpc_security_group_memberships:
+ - '{{ sg_1 }}'
+ - '{{ sg_2 }}'
+ - '{{ sg_3 }}'
+ option_settings:
+ - name: CHUNK_SIZE_GROWTH_FACTOR
+ value: '1.2'
+ tags:
+ tag_one: '{{ option_group_name }} One'
+ Tag Two: two {{ option_group_name }}
+ check_mode: true
+ register: new_rds_mysql_option_group
+
+ - name: Assert success - CHECK_MODE
+ assert:
+ that:
+ - new_rds_mysql_option_group.changed
+ - "'engine_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.engine_name == "{{ engine_name }}"
+ - "'major_engine_version' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version
+ }}"
+ - "'option_group_arn' in new_rds_mysql_option_group"
+ - "'option_group_description' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_description == "{{ option_group_description
+ }}"
+ - "'option_group_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}"
+ - "'vpc_id' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.vpc_id == vpc_id
+ - "'tags' in new_rds_mysql_option_group"
+ - "'options' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.options | length) > 0
+ - "'option_name' in option"
+ - option.option_name == "MEMCACHED"
+ - "'permanent' in option"
+ - "'persistent' in option"
+ - "'port' in option"
+ - option.port == 11211
+ - "'vpc_security_group_memberships' in option"
+ - (option.vpc_security_group_memberships | length) == 3
+ - "'option_settings' in option"
+ - (option_settings | length) > 0
+ - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR')
+ | list | count > 0
+ vars:
+ option: '{{ new_rds_mysql_option_group.options[0] }}'
+ option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}'
+ - name: RDS Mysql option group - apply tags
+ rds_option_group:
+ state: present
+ option_group_name: '{{ option_group_name }}'
+ engine_name: '{{ engine_name }}'
+ major_engine_version: '{{ major_engine_version }}'
+ option_group_description: '{{ option_group_description }}'
+ apply_immediately: true
+ options:
+ - option_name: MEMCACHED
+ port: 11211
+ vpc_security_group_memberships:
+ - '{{ sg_1 }}'
+ - '{{ sg_2 }}'
+ - '{{ sg_3 }}'
+ option_settings:
+ - name: CHUNK_SIZE_GROWTH_FACTOR
+ value: '1.2'
+ tags:
+ tag_one: '{{ option_group_name }} One'
+ Tag Two: two {{ option_group_name }}
+ register: new_rds_mysql_option_group
+
+ - assert:
+ that:
+ - new_rds_mysql_option_group.changed
+ - "'engine_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.engine_name == "{{ engine_name }}"
+ - "'major_engine_version' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version
+ }}"
+ - "'option_group_arn' in new_rds_mysql_option_group"
+ - "'option_group_description' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_description == "{{ option_group_description
+ }}"
+ - "'option_group_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}"
+ - "'vpc_id' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.vpc_id == vpc_id
+ - "'tags' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.tags | length) == 2
+ - new_rds_mysql_option_group.tags["tag_one"] == "{{ option_group_name }} One"
+ - new_rds_mysql_option_group.tags["Tag Two"] == "two {{ option_group_name }}"
+ - "'options' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.options | length) > 0
+ - "'option_name' in option"
+ - option.option_name == "MEMCACHED"
+ - "'permanent' in option"
+ - "'persistent' in option"
+ - "'port' in option"
+ - option.port == 11211
+ - "'vpc_security_group_memberships' in option"
+ - (option.vpc_security_group_memberships | length) == 3
+ - "'option_settings' in option"
+ - (option_settings | length) > 0
+ - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR')
+ | list | count > 0
+ vars:
+ option: '{{ new_rds_mysql_option_group.options[0] }}'
+ option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}'
+
+
+ - name: RDS Mysql option group - apply tags (idempotency)
+ rds_option_group:
+ state: present
+ option_group_name: '{{ option_group_name }}'
+ engine_name: '{{ engine_name }}'
+ major_engine_version: '{{ major_engine_version }}'
+ option_group_description: '{{ option_group_description }}'
+ apply_immediately: true
+ options:
+ - option_name: MEMCACHED
+ port: 11211
+ vpc_security_group_memberships:
+ - '{{ sg_1 }}'
+ - '{{ sg_2 }}'
+ - '{{ sg_3 }}'
+ option_settings:
+ - name: CHUNK_SIZE_GROWTH_FACTOR
+ value: '1.2'
+ tags:
+ tag_one: '{{ option_group_name }} One'
+ Tag Two: two {{ option_group_name }}
+ register: new_rds_mysql_option_group
+
+ - assert:
+ that:
+ - not new_rds_mysql_option_group.changed
+ - "'engine_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.engine_name == "{{ engine_name }}"
+ - "'major_engine_version' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version
+ }}"
+ - "'option_group_arn' in new_rds_mysql_option_group"
+ - "'option_group_description' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_description == "{{ option_group_description
+ }}"
+ - "'option_group_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}"
+ - "'vpc_id' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.vpc_id == vpc_id
+ - "'tags' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.tags | length) == 2
+ - new_rds_mysql_option_group.tags["tag_one"] == "{{ option_group_name }} One"
+ - new_rds_mysql_option_group.tags["Tag Two"] == "two {{ option_group_name }}"
+ - "'options' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.options | length) > 0
+ - "'option_name' in option"
+ - option.option_name == "MEMCACHED"
+ - "'permanent' in option"
+ - "'persistent' in option"
+ - "'port' in option"
+ - option.port == 11211
+ - "'vpc_security_group_memberships' in option"
+ - (option.vpc_security_group_memberships | length) == 3
+ - "'option_settings' in option"
+ - (option_settings | length) > 0
+ - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR')
+ | list | count > 0
+ vars:
+ option: '{{ new_rds_mysql_option_group.options[0] }}'
+ option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}'
+
+
+ - name: RDS Mysql option group - update tags
+ rds_option_group:
+ state: present
+ option_group_name: '{{ option_group_name }}'
+ engine_name: '{{ engine_name }}'
+ major_engine_version: '{{ major_engine_version }}'
+ option_group_description: '{{ option_group_description }}'
+ apply_immediately: true
+ options:
+ - option_name: MEMCACHED
+ port: 11211
+ vpc_security_group_memberships:
+ - '{{ sg_1 }}'
+ - '{{ sg_2 }}'
+ - '{{ sg_3 }}'
+ option_settings:
+ - name: CHUNK_SIZE_GROWTH_FACTOR
+ value: '1.2'
+ tags:
+ tag_three: '{{ option_group_name }} Three'
+ Tag Two: two {{ option_group_name }}
+ register: new_rds_mysql_option_group
+
+ - assert:
+ that:
+ - new_rds_mysql_option_group.changed
+ - "'engine_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.engine_name == "{{ engine_name }}"
+ - "'major_engine_version' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version
+ }}"
+ - "'option_group_arn' in new_rds_mysql_option_group"
+ - "'option_group_description' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_description == "{{ option_group_description
+ }}"
+ - "'option_group_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}"
+ - "'vpc_id' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.vpc_id == vpc_id
+ - "'tags' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.tags | length) == 2
+ - new_rds_mysql_option_group.tags["tag_three"] == "{{ option_group_name }} Three"
+ - new_rds_mysql_option_group.tags["Tag Two"] == "two {{ option_group_name }}"
+ - "'options' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.options | length) > 0
+ - "'option_name' in option"
+ - option.option_name == "MEMCACHED"
+ - "'permanent' in option"
+ - "'persistent' in option"
+ - "'port' in option"
+ - option.port == 11211
+ - "'vpc_security_group_memberships' in option"
+ - (option.vpc_security_group_memberships | length) == 3
+ - "'option_settings' in option"
+ - (option_settings | length) > 0
+ - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR')
+ | list | count > 0
+ vars:
+ option: '{{ new_rds_mysql_option_group.options[0] }}'
+ option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}'
+
+
+ - name: RDS Mysql option group - update tags without purge (expected changed=true)
+ rds_option_group:
+ state: present
+ option_group_name: '{{ option_group_name }}'
+ engine_name: '{{ engine_name }}'
+ major_engine_version: '{{ major_engine_version }}'
+ option_group_description: '{{ option_group_description }}'
+ apply_immediately: true
+ options:
+ - option_name: MEMCACHED
+ port: 11211
+ vpc_security_group_memberships:
+ - '{{ sg_1 }}'
+ - '{{ sg_2 }}'
+ - '{{ sg_3 }}'
+ option_settings:
+ - name: CHUNK_SIZE_GROWTH_FACTOR
+ value: '1.2'
+ purge_tags: no
+ tags:
+ tag_one: '{{ option_group_name }} One'
+ register: new_rds_mysql_option_group
+
+ - assert:
+ that:
+ - new_rds_mysql_option_group.changed
+ - "'engine_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.engine_name == "{{ engine_name }}"
+ - "'major_engine_version' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version
+ }}"
+ - "'option_group_arn' in new_rds_mysql_option_group"
+ - "'option_group_description' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_description == "{{ option_group_description
+ }}"
+ - "'option_group_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}"
+ - "'vpc_id' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.vpc_id == vpc_id
+ - "'tags' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.tags | length) == 3
+ - new_rds_mysql_option_group.tags["Tag Two"] == "two {{ option_group_name }}"
+ - new_rds_mysql_option_group.tags["tag_one"] == "{{ option_group_name }} One"
+ - new_rds_mysql_option_group.tags["tag_three"] == "{{ option_group_name }} Three"
+ - "'options' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.options | length) > 0
+ - "'option_name' in option"
+ - option.option_name == "MEMCACHED"
+ - "'permanent' in option"
+ - "'persistent' in option"
+ - "'port' in option"
+ - option.port == 11211
+ - "'vpc_security_group_memberships' in option"
+ - (option.vpc_security_group_memberships | length) == 3
+ - "'option_settings' in option"
+ - (option_settings | length) > 0
+ - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR')
+ | list | count > 0
+ vars:
+ option: '{{ new_rds_mysql_option_group.options[0] }}'
+ option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}'
+
+
+ - name: RDS Mysql option group - update with CamelCase tags (expected changed=true)
+ rds_option_group:
+ state: present
+ option_group_name: '{{ option_group_name }}'
+ engine_name: '{{ engine_name }}'
+ major_engine_version: '{{ major_engine_version }}'
+ option_group_description: '{{ option_group_description }}'
+ apply_immediately: true
+ options:
+ - option_name: MEMCACHED
+ port: 11211
+ vpc_security_group_memberships:
+ - '{{ sg_1 }}'
+ - '{{ sg_2 }}'
+ - '{{ sg_3 }}'
+ option_settings:
+ - name: CHUNK_SIZE_GROWTH_FACTOR
+ value: '1.2'
+ tags:
+ lowercase spaced: hello cruel world
+ Title Case: Hello Cruel World
+ CamelCase: SimpleCamelCase
+ snake_case: simple_snake_case
+ register: new_rds_mysql_option_group
+
+ - assert:
+ that:
+ - new_rds_mysql_option_group.changed
+ - "'engine_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.engine_name == "{{ engine_name }}"
+ - "'major_engine_version' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version
+ }}"
+ - "'option_group_arn' in new_rds_mysql_option_group"
+ - "'option_group_description' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_description == "{{ option_group_description
+ }}"
+ - "'option_group_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}"
+ - "'vpc_id' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.vpc_id == vpc_id
+ - "'tags' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.tags | length) == 4
+ - new_rds_mysql_option_group.tags["lowercase spaced"] == 'hello cruel world'
+ - new_rds_mysql_option_group.tags["Title Case"] == 'Hello Cruel World'
+ - new_rds_mysql_option_group.tags["CamelCase"] == 'SimpleCamelCase'
+ - new_rds_mysql_option_group.tags["snake_case"] == 'simple_snake_case'
+ - "'options' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.options | length) > 0
+ - "'option_name' in option"
+ - option.option_name == "MEMCACHED"
+ - "'permanent' in option"
+ - "'persistent' in option"
+ - "'port' in option"
+ - option.port == 11211
+ - "'vpc_security_group_memberships' in option"
+ - (option.vpc_security_group_memberships | length) == 3
+ - "'option_settings' in option"
+ - (option_settings | length) > 0
+ - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR')
+ | list | count > 0
+ vars:
+ option: '{{ new_rds_mysql_option_group.options[0] }}'
+ option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}'
+ - name: RDS Mysql option group - do not specify any tag to ensure previous tags
+ are not removed
+ rds_option_group:
+ state: present
+ option_group_name: '{{ option_group_name }}'
+ engine_name: '{{ engine_name }}'
+ major_engine_version: '{{ major_engine_version }}'
+ option_group_description: '{{ option_group_description }}'
+ apply_immediately: true
+ options:
+ - option_name: MEMCACHED
+ port: 11211
+ vpc_security_group_memberships:
+ - '{{ sg_1 }}'
+ - '{{ sg_2 }}'
+ - '{{ sg_3 }}'
+ option_settings:
+ - name: CHUNK_SIZE_GROWTH_FACTOR
+ value: '1.2'
+ register: new_rds_mysql_option_group
+
+ - assert:
+ that:
+ - not new_rds_mysql_option_group.changed
+ - "'engine_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.engine_name == "{{ engine_name }}"
+ - "'major_engine_version' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version
+ }}"
+ - "'option_group_arn' in new_rds_mysql_option_group"
+ - "'option_group_description' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_description == "{{ option_group_description
+ }}"
+ - "'option_group_name' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}"
+ - "'vpc_id' in new_rds_mysql_option_group"
+ - new_rds_mysql_option_group.vpc_id == vpc_id
+ - "'tags' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.tags | length) == 4
+ - new_rds_mysql_option_group.tags["lowercase spaced"] == 'hello cruel world'
+ - new_rds_mysql_option_group.tags["Title Case"] == 'Hello Cruel World'
+ - new_rds_mysql_option_group.tags["CamelCase"] == 'SimpleCamelCase'
+ - new_rds_mysql_option_group.tags["snake_case"] == 'simple_snake_case'
+ - "'options' in new_rds_mysql_option_group"
+ - (new_rds_mysql_option_group.options | length) > 0
+ - "'option_name' in option"
+ - option.option_name == "MEMCACHED"
+ - "'permanent' in option"
+ - "'persistent' in option"
+ - "'port' in option"
+ - option.port == 11211
+ - "'vpc_security_group_memberships' in option"
+ - (option.vpc_security_group_memberships | length) == 3
+ - "'option_settings' in option"
+ - (option_settings | length) > 0
+ - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR')
+ | list | count > 0
+ vars:
+ option: '{{ new_rds_mysql_option_group.options[0] }}'
+ option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}'
+ - name: Delete an RDS Mysql option group - CHECK_MODE
+ rds_option_group:
+ state: absent
+ option_group_name: '{{ option_group_name }}'
+ check_mode: yes
+ register: deleted_rds_mysql_option_group
+
+ - name: Assert success - CHECK_MODE
+ assert:
+ that:
+ - deleted_rds_mysql_option_group.changed
+
+
+ - name: Delete an RDS Mysql option group
+ rds_option_group:
+ state: absent
+ option_group_name: '{{ option_group_name }}'
+ register: deleted_rds_mysql_option_group
+
+ - name: Assert success
+ assert:
+ that:
+ - deleted_rds_mysql_option_group.changed
+
+
+ always:
+
+ - name: Delete an RDS Mysql option group
+ rds_option_group:
+ state: absent
+ option_group_name: '{{ option_group_name }}'
+ register: deleted_rds_mysql_option_group
+ ignore_errors: yes
+
+ - name: Remove security groups
+ ec2_group:
+ name: '{{ item }}'
+ description: created by rds_instance integration tests
+ state: absent
+ register: sgs_result
+ loop:
+ - '{{ sg_1_name }}'
+ - '{{ sg_2_name }}'
+ - '{{ sg_3_name }}'
+ ignore_errors: yes
+
+ - name: remove subnet
+ ec2_vpc_subnet:
+ cidr: '{{ subnet_cidr }}'
+ vpc_id: '{{ vpc_id }}'
+ state: absent
+ ignore_errors: yes
+
+ - name: Delete VPC
+ ec2_vpc_net:
+ name: '{{ vpc_name }}'
+ cidr_block: '{{ vpc_cidr }}'
+ state: absent
+ purge_cidrs: yes
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/defaults/main.yml
new file mode 100644
index 000000000..d9636646b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/defaults/main.yml
@@ -0,0 +1,29 @@
+rds_param_group:
+ name: '{{ resource_prefix}}rds-param-group'
+ description: Test group for rds_param_group Ansible module
+ engine: postgres9.6
+
+rds_long_param_list:
+ application_name: Test
+ logging_collector: on
+ log_directory: /var/log/postgresql
+ log_filename: postgresql.log.%Y-%m-%d-%H
+ log_file_mode: 0600
+ event_source: RDS
+ log_min_messages: INFO
+ log_min_duration_statement: 500
+ log_rotation_age: 60
+ debug_print_parse: on
+ debug_print_rewritten: on
+ debug_print_plan: on
+ debug_pretty_print: on
+ log_checkpoints: on
+ log_connections: on
+ log_disconnections: on
+ log_duration: on
+ log_error_verbosity: VERBOSE
+ log_lock_waits: on
+ log_temp_files: 10K
+ log_timezone: UTC
+ log_statement: all
+ log_replication_commands: on
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml
new file mode 100644
index 000000000..889bf876a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml
@@ -0,0 +1,517 @@
+# A Note about ec2 environment variable name preference:
+# - EC2_URL -> AWS_URL
+# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
+# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
+# - EC2_REGION -> AWS_REGION
+#
+# TODO - name: test 'region' parameter
+# TODO - name: test 'state=absent' parameter for existing key
+# TODO - name: test 'state=absent' parameter for missing key
+# TODO - name: test 'validate_certs' parameter
+
+# ============================================================
+
+- name: rds_option_group tests
+ module_defaults:
+ group/aws:
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ ec2_region }}'
+ block:
+
+ # ============================================================
+ - name: test empty parameter group - CHECK_MODE
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ state: present
+ check_mode: true
+ register: result
+
+ - name: assert rds parameter group changed - CHECK_MODE
+ assert:
+ that:
+ - result.changed
+
+ - name: test empty parameter group
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ state: present
+ register: result
+
+ - name: assert rds parameter group changed
+ assert:
+ that:
+ - result.changed
+ - '"db_parameter_group_arn" in result'
+ - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\
+ \ | lower }}'"
+ - '"description" in result'
+ - result.tags == {}
+
+ # ============================================================
+ - name: test empty parameter group with no arguments changes nothing - CHECK_MODE
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ state: present
+ check_mode: true
+ register: result
+
+ - name: assert no change when running empty parameter group a second time - CHECK_MODE
+ assert:
+ that:
+ - not result.changed
+
+ - name: test empty parameter group with no arguments changes nothing
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ state: present
+ register: result
+
+ - name: assert no change when running empty parameter group a second time
+ assert:
+ that:
+ - not result.changed
+
+ # ============================================================
+ - name: test adding numeric tag - CHECK_MODE
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ state: present
+ tags:
+ Environment: test
+ Test: 123
+ check_mode: true
+ register: result
+
+ - name: adding numeric tag just silently converts - CHECK_MODE
+ assert:
+ that:
+ - result.changed
+ - name: test adding numeric tag
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ state: present
+ tags:
+ Environment: test
+ Test: 123
+ register: result
+
+ - name: adding numeric tag just silently converts
+ assert:
+ that:
+ - result.changed
+ - '"db_parameter_group_arn" in result'
+ - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\
+ \ | lower }}'"
+ - '"description" in result'
+ - '"tags" in result'
+ - result.tags | length == 2
+ - result.tags["Environment"] == 'test'
+ - result.tags["Test"] == '123'
+
+ # ============================================================
+ - name: test tagging existing group - CHECK_MODE
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ state: present
+ tags:
+ Environment: test
+ Test: '123'
+ NewTag: hello
+ check_mode: true
+ register: result
+
+ - name: assert tagging existing group changes it and adds tags - CHECK_MODE
+ assert:
+ that:
+ - result.changed
+ - name: test tagging existing group
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ state: present
+ tags:
+ Environment: test
+ Test: '123'
+ NewTag: hello
+ register: result
+
+ - name: assert tagging existing group changes it and adds tags
+ assert:
+ that:
+ - result.changed
+ - '"db_parameter_group_arn" in result'
+ - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\
+ \ | lower }}'"
+ - '"description" in result'
+ - '"tags" in result'
+ - result.tags | length == 3
+ - result.tags["Environment"] == 'test'
+ - result.tags["Test"] == '123'
+ - result.tags["NewTag"] == 'hello'
+
+ # ============================================================
+ - name: test repeating tagging existing group - CHECK_MODE
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ state: present
+ tags:
+ Environment: test
+ Test: '123'
+ NewTag: hello
+ check_mode: true
+ register: result
+
+ - name: assert tagging existing group changes it and adds tags - CHECK_MODE
+ assert:
+ that:
+ - not result.changed
+ - '"db_parameter_group_arn" in result'
+ - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\
+ \ | lower }}'"
+ - '"description" in result'
+ - '"tags" in result'
+ - result.tags | length == 3
+ - result.tags["Environment"] == 'test'
+ - result.tags["Test"] == '123'
+ - result.tags["NewTag"] == 'hello'
+
+ - name: test repeating tagging existing group
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ state: present
+ tags:
+ Environment: test
+ Test: '123'
+ NewTag: hello
+ register: result
+
+ - name: assert tagging existing group changes it and adds tags
+ assert:
+ that:
+ - not result.changed
+ - '"db_parameter_group_arn" in result'
+ - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\
+ \ | lower }}'"
+ - '"description" in result'
+ - '"tags" in result'
+ - result.tags | length == 3
+ - result.tags["Environment"] == 'test'
+ - result.tags["Test"] == '123'
+ - result.tags["NewTag"] == 'hello'
+
+ # ============================================================
+ - name: test deleting tags from existing group - CHECK_MODE
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ state: present
+ tags:
+ Environment: test
+ purge_tags: yes
+ check_mode: true
+ register: result
+
+ - name: assert removing tags from existing group changes it - CHECK_MODE
+ assert:
+ that:
+ - result.changed
+ - name: test deleting tags from existing group
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ state: present
+ tags:
+ Environment: test
+ purge_tags: yes
+ register: result
+
+ - name: assert removing tags from existing group changes it
+ assert:
+ that:
+ - result.changed
+ - '"db_parameter_group_arn" in result'
+ - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\
+ \ | lower }}'"
+ - '"description" in result'
+ - '"tags" in result'
+ - result.tags | length == 1
+ - result.tags["Environment"] == 'test'
+
+ # ============================================================
+ - name: test state=absent with engine defined (expect changed=true) - CHECK_MODE
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ state: absent
+ check_mode: true
+ register: result
+
+ - name: assert state=absent with engine defined (expect changed=true) - CHECK_MODE
+ assert:
+ that:
+ - result.changed
+
+ - name: test state=absent with engine defined (expect changed=true)
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ state: absent
+ register: result
+
+ - name: assert state=absent with engine defined (expect changed=true)
+ assert:
+ that:
+ - result.changed
+
+ # ============================================================
+ - name: test creating group with parameters - CHECK_MODE
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ state: present
+ params:
+ log_directory: /var/log/postgresql
+ log_statement: all
+ log_duration: on
+ this_param_does_not_exist: oh_no
+ tags:
+ Environment: test
+ Test: '123'
+ check_mode: true
+ register: result
+
+ - name: assert creating a new group with parameter changes it - CHECK_MODE
+ assert:
+ that:
+ - result.changed
+
+ - name: test creating group with parameters
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ state: present
+ params:
+ log_directory: /var/log/postgresql
+ log_statement: all
+ log_duration: on
+ this_param_does_not_exist: oh_no
+ tags:
+ Environment: test
+ Test: '123'
+ register: result
+
+ - name: assert creating a new group with parameter changes it
+ assert:
+ that:
+ - result.changed
+ - '"db_parameter_group_arn" in result'
+ - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\
+ \ | lower }}'"
+ - '"description" in result'
+ - '"tags" in result'
+ - result.tags | length == 2
+ - result.tags["Environment"] == 'test'
+ - result.tags["Test"] == '123'
+ - result.errors|length == 2
+
+ # ============================================================
+ - name: test repeating group with parameters - CHECK_MODE
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ state: present
+ params:
+ log_directory: /var/log/postgresql
+ log_statement: all
+ log_duration: on
+ this_param_does_not_exist: oh_no
+ tags:
+ Environment: test
+ Test: '123'
+ check_mode: true
+ register: result
+
+ - name: assert repeating group with parameters does not change it - CHECK_MODE
+ assert:
+ that:
+ - not result.changed
+
+ - name: test repeating group with parameters
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ state: present
+ params:
+ log_directory: /var/log/postgresql
+ log_statement: all
+ log_duration: on
+ this_param_does_not_exist: oh_no
+ tags:
+ Environment: test
+ Test: '123'
+ register: result
+
+ - name: assert repeating group with parameters does not change it
+ assert:
+ that:
+ - not result.changed
+ - '"db_parameter_group_arn" in result'
+ - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\
+ \ | lower }}'"
+ - '"description" in result'
+ - '"tags" in result'
+ - result.tags | length == 2
+ - result.tags["Environment"] == 'test'
+ - result.tags["Test"] == '123'
+ - result.errors|length == 2
+
+ # ============================================================
+ - name: test state=absent with engine defined (expect changed=true) - CHECK_MODE
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ state: absent
+ check_mode: true
+ register: result
+
+ - name: assert state=absent with engine defined (expect changed=true) - CHECK_MODE
+ assert:
+ that:
+ - result.changed
+ - name: test state=absent with engine defined (expect changed=true)
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ state: absent
+ register: result
+
+ - name: assert state=absent with engine defined (expect changed=true)
+ assert:
+ that:
+ - result.changed
+
+ # ============================================================
+ - name: test repeating state=absent (expect changed=false) - CHECK_MODE
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ state: absent
+ register: result
+ check_mode: true
+ ignore_errors: true
+
+ - name: assert repeating state=absent (expect changed=false) - CHECK_MODE
+ assert:
+ that:
+ - not result.changed
+ - name: test repeating state=absent (expect changed=false)
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ state: absent
+ register: result
+ ignore_errors: true
+
+ - name: assert repeating state=absent (expect changed=false)
+ assert:
+ that:
+ - not result.changed
+
+ # ============================================================
+ - name: test creating group with more than 20 parameters - CHECK_MODE
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ params: '{{ rds_long_param_list }}'
+ state: present
+ check_mode: true
+ register: result
+
+ - name: assert creating a new group with lots of parameter changes it - CHECK_MODE
+ assert:
+ that:
+ - result.changed
+ - name: test creating group with more than 20 parameters
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ params: '{{ rds_long_param_list }}'
+ state: present
+ register: result
+
+ - name: assert creating a new group with lots of parameter changes it
+ assert:
+ that:
+ - result.changed
+
+ # ============================================================
+ - name: test creating group with more than 20 parameters - CHECK_MODE
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ params: '{{ rds_long_param_list }}'
+ state: present
+ check_mode: true
+ register: result
+
+ - name: assert repeating a group with lots of parameter does not change it - CHECK_MODE
+ assert:
+ that:
+ - not result.changed
+ - name: test creating group with more than 20 parameters
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ engine: '{{ rds_param_group.engine }}'
+ description: '{{ rds_param_group.description }}'
+ params: '{{ rds_long_param_list }}'
+ state: present
+ register: result
+
+ - name: assert repeating a group with lots of parameter does not change it
+ assert:
+ that:
+ - not result.changed
+
+ always:
+ # ============================================================
+ - name: test state=absent (expect changed=false)
+ rds_param_group:
+ name: '{{ rds_param_group.name }}'
+ state: absent
+ register: result
+ ignore_errors: true
+
+ - name: assert state=absent (expect changed=false)
+ assert:
+ that:
+ - result.changed
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/defaults/main.yml
new file mode 100644
index 000000000..156c9f903
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/defaults/main.yml
@@ -0,0 +1,9 @@
+vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16
+subnet_a: 10.{{ 256 | random(seed=resource_prefix) }}.10.0/24
+subnet_b: 10.{{ 256 | random(seed=resource_prefix) }}.11.0/24
+subnet_c: 10.{{ 256 | random(seed=resource_prefix) }}.12.0/24
+subnet_d: 10.{{ 256 | random(seed=resource_prefix) }}.13.0/24
+
+group_description: 'Created by integration test : {{ resource_prefix }}'
+group_description_changed: 'Created by integration test : {{ resource_prefix }} -
+ changed'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/main.yml
new file mode 100644
index 000000000..207b150af
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/main.yml
@@ -0,0 +1,112 @@
+# Tests for rds_subnet_group
+#
+# Note: (From Amazon's documentation)
+# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.modify_db_subnet_group
+# DB subnet groups must contain at least one subnet in at least two AZs in the
+# AWS Region.
+
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ # ============================================================
+
+ - name: Fetch AZ availability
+ aws_az_info:
+ register: az_info
+
+ - name: Assert that we have multiple AZs available to us
+ assert:
+ that: az_info.availability_zones | length >= 2
+
+ - name: Pick AZs
+ set_fact:
+ az_one: '{{ az_info.availability_zones[0].zone_name }}'
+ az_two: '{{ az_info.availability_zones[1].zone_name }}'
+
+ # ============================================================
+
+ - name: Create a VPC
+ ec2_vpc_net:
+ state: present
+ cidr_block: '{{ vpc_cidr }}'
+ name: '{{ resource_prefix }}'
+ register: vpc
+
+ - name: Create subnets
+ ec2_vpc_subnet:
+ state: present
+ cidr: '{{ item.cidr }}'
+ az: '{{ item.az }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ tags:
+ Name: '{{ item.name }}'
+ with_items:
+ - cidr: '{{ subnet_a }}'
+ az: '{{ az_one }}'
+ name: '{{ resource_prefix }}-subnet-a'
+ - cidr: '{{ subnet_b }}'
+ az: '{{ az_two }}'
+ name: '{{ resource_prefix }}-subnet-b'
+ - cidr: '{{ subnet_c }}'
+ az: '{{ az_one }}'
+ name: '{{ resource_prefix }}-subnet-c'
+ - cidr: '{{ subnet_d }}'
+ az: '{{ az_two }}'
+ name: '{{ resource_prefix }}-subnet-d'
+ register: subnets
+
+ - set_fact:
+ subnet_ids: '{{ subnets.results | map(attribute="subnet.id") | list }}'
+
+ # ============================================================
+
+ - include_tasks: params.yml
+
+ - include_tasks: tests.yml
+
+ # ============================================================
+
+ always:
+ - name: Remove subnet group
+ rds_subnet_group:
+ state: absent
+ name: '{{ resource_prefix }}'
+ ignore_errors: yes
+
+ - name: Remove subnets
+ ec2_vpc_subnet:
+ state: absent
+ cidr: '{{ item.cidr }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ with_items:
+ - cidr: '{{ subnet_a }}'
+ name: '{{ resource_prefix }}-subnet-a'
+ - cidr: '{{ subnet_b }}'
+ name: '{{ resource_prefix }}-subnet-b'
+ - cidr: '{{ subnet_c }}'
+ name: '{{ resource_prefix }}-subnet-c'
+ - cidr: '{{ subnet_d }}'
+ name: '{{ resource_prefix }}-subnet-d'
+ ignore_errors: yes
+ register: removed_subnets
+ until: removed_subnets is succeeded
+ retries: 5
+ delay: 5
+
+ - name: Remove the VPC
+ ec2_vpc_net:
+ state: absent
+ cidr_block: '{{ vpc_cidr }}'
+ name: '{{ resource_prefix }}'
+ ignore_errors: yes
+ register: removed_vpc
+ until: removed_vpc is success
+ retries: 5
+ delay: 5
+
+ # ============================================================
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/params.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/params.yml
new file mode 100644
index 000000000..109703f38
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/params.yml
@@ -0,0 +1,29 @@
+# Try creating without a description
+- name: Create a subnet group (no description)
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ ignore_errors: yes
+ register: create_missing_param
+- assert:
+ that:
+ - create_missing_param is failed
+ - "'description' in create_missing_param.msg"
+ - "'state is present but all of the following are missing' in create_missing_param.msg"
+
+# Try creating without subnets
+- name: Create a subnet group (no subnets)
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ ignore_errors: yes
+ register: create_missing_param
+- assert:
+ that:
+ - create_missing_param is failed
+ - "'subnets' in create_missing_param.msg"
+ - "'state is present but all of the following are missing' in create_missing_param.msg"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/tests.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/tests.yml
new file mode 100644
index 000000000..ce710ed3b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/tests.yml
@@ -0,0 +1,675 @@
+# ============================================================
+# Basic creation
+- name: Create a subnet group - CHECK_MODE
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Create a subnet group
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.subnet_group.description == group_description
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+
+- name: Create a subnet group (idempotency) - CHECK_MODE
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Create a subnet group (idempotency)
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.subnet_group.description == group_description
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+
+# ============================================================
+# Update description
+- name: Update subnet group description - CHECK_MODE
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Update subnet group description
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.subnet_group.description == group_description_changed
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+
+- name: Update subnet group description (idempotency) - CHECK_MODE
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Update subnet group description (idempotency)
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.subnet_group.description == group_description_changed
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+
+- name: Restore subnet group description - CHECK_MODE
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Restore subnet group description
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.subnet_group.description == group_description
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+
+# ============================================================
+# Update subnets
+- name: Update subnet group list - CHECK_MODE
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[2] }}'
+ - '{{ subnet_ids[3] }}'
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Update subnet group list
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[2] }}'
+ - '{{ subnet_ids[3] }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.subnet_group.description == group_description
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[2] in result.subnet_group.subnet_ids
+ - subnet_ids[3] in result.subnet_group.subnet_ids
+
+- name: Update subnet group list (idempotency) - CHECK_MODE
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[2] }}'
+ - '{{ subnet_ids[3] }}'
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Update subnet group list (idempotency)
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[2] }}'
+ - '{{ subnet_ids[3] }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.subnet_group.description == group_description
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[2] in result.subnet_group.subnet_ids
+ - subnet_ids[3] in result.subnet_group.subnet_ids
+
+- name: Add more subnets subnet group list - CHECK_MODE
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ - '{{ subnet_ids[2] }}'
+ - '{{ subnet_ids[3] }}'
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Add more subnets subnet group list
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ - '{{ subnet_ids[2] }}'
+ - '{{ subnet_ids[3] }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.subnet_group.description == group_description
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 4
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+ - subnet_ids[2] in result.subnet_group.subnet_ids
+ - subnet_ids[3] in result.subnet_group.subnet_ids
+
+- name: Add more members to subnet group list (idempotency) - CHECK_MODE
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ - '{{ subnet_ids[2] }}'
+ - '{{ subnet_ids[3] }}'
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Add more members to subnet group list (idempotency)
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ - '{{ subnet_ids[2] }}'
+ - '{{ subnet_ids[3] }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.subnet_group.description == group_description
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 4
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+ - subnet_ids[2] in result.subnet_group.subnet_ids
+ - subnet_ids[3] in result.subnet_group.subnet_ids
+
+# ============================================================
+# Add tags to subnets
+- name: Update subnet with tags - CHECK_MODE
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ Tag Two: two {{ resource_prefix }}
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Update subnet with tags
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ Tag Two: two {{ resource_prefix }}
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.subnet_group.description == group_description_changed
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+ - '"tags" in result.subnet_group'
+ - result.subnet_group.tags | length == 2
+ - result.subnet_group.tags["tag_one"] == '{{ resource_prefix }} One'
+ - result.subnet_group.tags["Tag Two"] == 'two {{ resource_prefix }}'
+
+- name: Update subnet with tags (idempotency) - CHECK_MODE
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ Tag Two: two {{ resource_prefix }}
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Update subnet with tags (idempotency)
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ Tag Two: two {{ resource_prefix }}
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.subnet_group.description == group_description_changed
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+ - '"tags" in result.subnet_group'
+ - result.subnet_group.tags | length == 2
+ - result.subnet_group.tags["tag_one"] == '{{ resource_prefix }} One'
+ - result.subnet_group.tags["Tag Two"] == 'two {{ resource_prefix }}'
+
+- name: Update (add/remove) tags - CHECK_MODE
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ tags:
+ tag_three: '{{ resource_prefix }} Three'
+ Tag Two: two {{ resource_prefix }}
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Update (add/remove) tags
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ tags:
+ tag_three: '{{ resource_prefix }} Three'
+ Tag Two: two {{ resource_prefix }}
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.subnet_group.description == group_description_changed
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+ - '"tags" in result.subnet_group'
+ - result.subnet_group.tags | length == 2
+ - result.subnet_group.tags["tag_three"] == '{{ resource_prefix }} Three'
+ - result.subnet_group.tags["Tag Two"] == 'two {{ resource_prefix }}'
+
+- name: Update tags without purge - CHECK_MODE
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ purge_tags: no
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Update tags without purge
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ purge_tags: no
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.subnet_group.description == group_description_changed
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+ - '"tags" in result.subnet_group'
+ - result.subnet_group.tags | length == 3
+ - result.subnet_group.tags["tag_three"] == '{{ resource_prefix }} Three'
+ - result.subnet_group.tags["Tag Two"] == 'two {{ resource_prefix }}'
+ - result.subnet_group.tags["tag_one"] == '{{ resource_prefix }} One'
+
+- name: Remove all the tags - CHECK_MODE
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ tags: {}
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Remove all the tags
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ tags: {}
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.subnet_group.description == group_description_changed
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+ - '"tags" in result.subnet_group'
+
+- name: Update with CamelCase tags - CHECK_MODE
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ tags:
+ lowercase spaced: hello cruel world
+ Title Case: Hello Cruel World
+ CamelCase: SimpleCamelCase
+ snake_case: simple_snake_case
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Update with CamelCase tags
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ tags:
+ lowercase spaced: hello cruel world
+ Title Case: Hello Cruel World
+ CamelCase: SimpleCamelCase
+ snake_case: simple_snake_case
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.subnet_group.description == group_description_changed
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+ - '"tags" in result.subnet_group'
+ - result.subnet_group.tags | length == 4
+ - result.subnet_group.tags["lowercase spaced"] == 'hello cruel world'
+ - result.subnet_group.tags["Title Case"] == 'Hello Cruel World'
+ - result.subnet_group.tags["CamelCase"] == 'SimpleCamelCase'
+ - result.subnet_group.tags["snake_case"] == 'simple_snake_case'
+
+- name: Do not specify any tag to ensure previous tags are not removed
+ rds_subnet_group:
+ state: present
+ name: '{{ resource_prefix }}'
+ description: '{{ group_description_changed }}'
+ subnets:
+ - '{{ subnet_ids[0] }}'
+ - '{{ subnet_ids[1] }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.subnet_group.description == group_description_changed
+ - result.subnet_group.name == resource_prefix
+ - result.subnet_group.vpc_id == vpc.vpc.id
+ - result.subnet_group.subnet_ids | length == 2
+ - subnet_ids[0] in result.subnet_group.subnet_ids
+ - subnet_ids[1] in result.subnet_group.subnet_ids
+ - '"tags" in result.subnet_group'
+ - result.subnet_group.tags | length == 4
+ - result.subnet_group.tags["lowercase spaced"] == 'hello cruel world'
+ - result.subnet_group.tags["Title Case"] == 'Hello Cruel World'
+ - result.subnet_group.tags["CamelCase"] == 'SimpleCamelCase'
+ - result.subnet_group.tags["snake_case"] == 'simple_snake_case'
+
+
+# ============================================================
+# Deletion
+- name: Delete a subnet group - CHECK_MODE
+ rds_subnet_group:
+ state: absent
+ name: '{{ resource_prefix }}'
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Delete a subnet group
+ rds_subnet_group:
+ state: absent
+ name: '{{ resource_prefix }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Delete a subnet group - CHECK_MODE (idempotency)
+ rds_subnet_group:
+ state: absent
+ name: '{{ resource_prefix }}'
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Delete a subnet group (idempotency)
+ rds_subnet_group:
+ state: absent
+ name: '{{ resource_prefix }}'
+ register: result
+
+- assert:
+ that:
+ - result is not changed
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53/aliases b/ansible_collections/amazon/aws/tests/integration/targets/route53/aliases
new file mode 100644
index 000000000..c6a082944
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/route53/aliases
@@ -0,0 +1,4 @@
+cloud/aws
+
+route53_info
+module_utils_route53
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53/defaults/main.yml
new file mode 100644
index 000000000..cc0d3b78d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/route53/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for route53 tests
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/route53/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53/tasks/main.yml
new file mode 100644
index 000000000..08ec59d93
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/route53/tasks/main.yml
@@ -0,0 +1,1126 @@
+# tasks file for Route53 integration tests
+
+- set_fact:
+ zone_one: '{{ resource_prefix | replace("-", "") }}.one.ansible.test.'
+ zone_two: '{{ resource_prefix | replace("-", "") }}.two.ansible.test.'
+- debug:
+ msg: Set zones {{ zone_one }} and {{ zone_two }}
+
+- name: Test basics (new zone, A and AAAA records)
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ amazon.aws.route53:
+ # Route53 is explicitly a global service
+ region:
+ block:
+ - name: create VPC
+ ec2_vpc_net:
+ cidr_block: 192.0.2.0/24
+ name: '{{ resource_prefix }}_vpc'
+ state: present
+ register: vpc
+
+ - name: Create a zone
+ route53_zone:
+ zone: '{{ zone_one }}'
+ comment: Created in Ansible test {{ resource_prefix }}
+ tags:
+ TestTag: '{{ resource_prefix }}.z1'
+ register: z1
+ - assert:
+ that:
+ - z1 is success
+ - z1 is changed
+ - z1.comment == 'Created in Ansible test {{ resource_prefix }}'
+ - z1.tags.TestTag == '{{ resource_prefix }}.z1'
+
+ - name: Get zone details
+ route53_info:
+ query: hosted_zone
+ hosted_zone_id: '{{ z1.zone_id }}'
+ hosted_zone_method: details
+ register: hosted_zones
+ - name: Assert newly created hosted zone only has NS and SOA records
+ assert:
+ that:
+ - hosted_zones.HostedZone.ResourceRecordSetCount == 2
+
+ - name: Create a second zone
+ route53_zone:
+ zone: '{{ zone_two }}'
+ vpc_id: '{{ vpc.vpc.id }}'
+ vpc_region: '{{ aws_region }}'
+ comment: Created in Ansible test {{ resource_prefix }}
+ tags:
+ TestTag: '{{ resource_prefix }}.z2'
+ register: z2
+ - assert:
+ that:
+ - z2 is success
+ - z2 is changed
+ - z2.comment == 'Created in Ansible test {{ resource_prefix }}'
+ - z2.tags.TestTag == '{{ resource_prefix }}.z2'
+
+ - name: Get zone details
+ route53_info:
+ query: hosted_zone
+ hosted_zone_id: '{{ z2.zone_id }}'
+ hosted_zone_method: details
+ register: hosted_zones
+
+ - name: Assert newly created hosted zone only has NS and SOA records
+ assert:
+ that:
+ - hosted_zones.HostedZone.ResourceRecordSetCount == 2
+ - hosted_zones.HostedZone.Config.PrivateZone
+
+ # Ensure that we can use the non-paginated list_by_name method with max_items
+ - name: Get zone 1 details only
+ route53_info:
+ query: hosted_zone
+ hosted_zone_method: list_by_name
+ dns_name: '{{ zone_one }}'
+ max_items: 1
+ register: list_by_name_result
+
+ - name: Assert that we found exactly one zone when querying by name
+ assert:
+ that:
+ - list_by_name_result.HostedZones | length == 1
+ - list_by_name_result.HostedZones[0].Name == '{{ zone_one }}'
+
+ - name: Create A record using zone fqdn
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: qdn_test.{{ zone_one }}
+ type: A
+ value: 192.0.2.1
+ register: qdn
+ - assert:
+ that:
+ - qdn is not failed
+ - qdn is changed
+
+ - name: Get A record using "get" method of route53 module
+ route53:
+ state: get
+ zone: '{{ zone_one }}'
+ record: qdn_test.{{ zone_one }}
+ type: A
+ register: get_result
+ - name: Check boto3 type get data
+ assert:
+ that:
+ - get_result.nameservers | length > 0
+ - get_result.resource_record_sets | length == 1
+ - '"name" in record_set'
+ - record_set.name == qdn_record
+ - '"resource_records" in record_set'
+ - record_set.resource_records | length == 1
+ - '"value" in record_set.resource_records[0]'
+ - record_set.resource_records[0].value == '192.0.2.1'
+ - '"ttl" in record_set'
+ - record_set.ttl == 3600
+ - '"type" in record_set'
+ - record_set.type == 'A'
+ vars:
+ record_set: '{{ get_result.resource_record_sets[0] }}'
+ qdn_record: qdn_test.{{ zone_one }}
+
+ - name: Check boto3 compat get data
+ assert:
+ that:
+ - '"set" in get_result'
+ - '"Name" in record_set'
+ - record_set.Name == qdn_record
+ - '"ResourceRecords" in record_set'
+ - record_set.ResourceRecords | length == 1
+ - '"Value" in record_set.ResourceRecords[0]'
+ - record_set.ResourceRecords[0].Value == '192.0.2.1'
+ - '"TTL" in record_set'
+ - record_set.TTL == 3600
+ - record_set.Type == 'A'
+ vars:
+ record_set: '{{ get_result.set }}'
+ qdn_record: qdn_test.{{ zone_one }}
+
+ - name: Check boto2 compat get data
+ assert:
+ that:
+ - '"set" in get_result'
+ - '"alias" in record_set'
+ - record_set.alias == False
+ - '"failover" in record_set'
+ - '"health_check" in record_set'
+ - '"hosted_zone_id" in record_set'
+ - record_set.hosted_zone_id == z1.zone_id
+ - '"identifier" in record_set'
+ - '"record" in record_set'
+ - record_set.record == qdn_record
+ - '"ttl" in record_set'
+ - record_set.ttl == "3600"
+ - '"type" in record_set'
+ - record_set.type == 'A'
+ - '"value" in record_set'
+ - record_set.value == '192.0.2.1'
+ - '"values" in record_set'
+ - record_set['values'] | length == 1
+ - record_set['values'][0] == '192.0.2.1'
+ - '"weight" in record_set'
+ - '"zone" in record_set'
+ - record_set.zone == zone_one
+ vars:
+ record_set: '{{ get_result.set }}'
+ qdn_record: qdn_test.{{ zone_one }}
+
+ ## test A recordset creation and order adjustments
+ - name: Create same A record using zone non-qualified domain
+ route53:
+ state: present
+ zone: '{{ zone_one[:-1] }}'
+ record: qdn_test.{{ zone_one[:-1] }}
+ type: A
+ value: 192.0.2.1
+ register: non_qdn
+ - assert:
+ that:
+ - non_qdn is not failed
+ - non_qdn is not changed
+
+ - name: Create A record using zone ID
+ route53:
+ state: present
+ hosted_zone_id: '{{ z1.zone_id }}'
+ record: zid_test.{{ zone_one }}
+ type: A
+ value: 192.0.2.1
+ register: zid
+ - assert:
+ that:
+ - zid is not failed
+ - zid is changed
+
+ - name: Create a multi-value A record with values in different order
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: order_test.{{ zone_one }}
+ type: A
+ value:
+ - 192.0.2.2
+ - 192.0.2.1
+ register: mv_a_record
+ - assert:
+ that:
+ - mv_a_record is not failed
+ - mv_a_record is changed
+
+ - name: Create same multi-value A record with values in different order
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: order_test.{{ zone_one }}
+ type: A
+ value:
+ - 192.0.2.2
+ - 192.0.2.1
+ register: mv_a_record
+ - assert:
+ that:
+ - mv_a_record is not failed
+ - mv_a_record is not changed
+
+ # Get resulting A record and ensure max_items is applied
+ - name: get Route53 A record information
+ route53_info:
+ type: A
+ query: record_sets
+ hosted_zone_id: '{{ z1.zone_id }}'
+ start_record_name: order_test.{{ zone_one }}
+ max_items: 1
+ register: records
+
+ - assert:
+ that:
+ - records.ResourceRecordSets|length == 1
+ - records.ResourceRecordSets[0].ResourceRecords|length == 2
+ - records.ResourceRecordSets[0].ResourceRecords[0].Value == '192.0.2.2'
+ - records.ResourceRecordSets[0].ResourceRecords[1].Value == '192.0.2.1'
+
+ - name: Remove a member from multi-value A record with values in different order
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: order_test.{{ zone_one }}
+ type: A
+ value:
+ - 192.0.2.2
+ register: del_a_record
+ ignore_errors: true
+ - name: This should fail, because `overwrite` is false
+ assert:
+ that:
+ - del_a_record is failed
+
+ - name: Remove a member from multi-value A record with values in different order
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: order_test.{{ zone_one }}
+ overwrite: true
+ type: A
+ value:
+ - 192.0.2.2
+ register: del_a_record
+ ignore_errors: true
+
+ - name: This should not fail, because `overwrite` is true
+ assert:
+ that:
+ - del_a_record is not failed
+ - del_a_record is changed
+
+ - name: get Route53 zone A record information
+ route53_info:
+ type: A
+ query: record_sets
+ hosted_zone_id: '{{ z1.zone_id }}'
+ start_record_name: order_test.{{ zone_one }}
+ max_items: 50
+ register: records
+
+ - assert:
+ that:
+ - records.ResourceRecordSets|length == 3
+ - records.ResourceRecordSets[0].ResourceRecords|length == 1
+ - records.ResourceRecordSets[0].ResourceRecords[0].Value == '192.0.2.2'
+
+ ## Test CNAME record creation and retrive info
+ - name: Create CNAME record
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ type: CNAME
+ record: cname_test.{{ zone_one }}
+ value: order_test.{{ zone_one }}
+ register: cname_record
+
+ - assert:
+ that:
+ - cname_record is not failed
+ - cname_record is changed
+
+ - name: Get Route53 CNAME record information
+ route53_info:
+ type: CNAME
+ query: record_sets
+ hosted_zone_id: '{{ z1.zone_id }}'
+ start_record_name: cname_test.{{ zone_one }}
+ max_items: 1
+ register: cname_records
+
+ - assert:
+ that:
+ - cname_records.ResourceRecordSets|length == 1
+ - cname_records.ResourceRecordSets[0].ResourceRecords|length == 1
+ - cname_records.ResourceRecordSets[0].ResourceRecords[0].Value == "order_test.{{
+ zone_one }}"
+
+ ## Test CAA record creation
+ - name: Create a LetsEncrypt CAA record
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: '{{ zone_one }}'
+ type: CAA
+ value:
+ - 0 issue "letsencrypt.org;"
+ - 0 issuewild "letsencrypt.org;"
+ overwrite: true
+ register: caa
+ - assert:
+ that:
+ - caa is not failed
+ - caa is changed
+
+ - name: Re-create the same LetsEncrypt CAA record
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: '{{ zone_one }}'
+ type: CAA
+ value:
+ - 0 issue "letsencrypt.org;"
+ - 0 issuewild "letsencrypt.org;"
+ overwrite: true
+ register: caa
+ - assert:
+ that:
+ - caa is not failed
+ - caa is not changed
+
+ - name: Re-create the same LetsEncrypt CAA record in opposite-order
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: '{{ zone_one }}'
+ type: CAA
+ value:
+ - 0 issuewild "letsencrypt.org;"
+ - 0 issue "letsencrypt.org;"
+ overwrite: true
+ register: caa
+ - name: This should not be changed, as CAA records are not order sensitive
+ assert:
+ that:
+ - caa is not failed
+ - caa is not changed
+
+ - name: Create an A record for a wildcard prefix
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: '*.wildcard_test.{{ zone_one }}'
+ type: A
+ value:
+ - 192.0.2.1
+ register: wc_a_record
+ - assert:
+ that:
+ - wc_a_record is not failed
+ - wc_a_record is changed
+
+ - name: Create an A record for a wildcard prefix (idempotency)
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: '*.wildcard_test.{{ zone_one }}'
+ type: A
+ value:
+ - 192.0.2.1
+ register: wc_a_record
+ - assert:
+ that:
+ - wc_a_record is not failed
+ - wc_a_record is not changed
+
+ - name: Create an A record for a wildcard prefix (change)
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: '*.wildcard_test.{{ zone_one }}'
+ type: A
+ value:
+ - 192.0.2.2
+ overwrite: true
+ register: wc_a_record
+ - assert:
+ that:
+ - wc_a_record is not failed
+ - wc_a_record is changed
+
+ - name: Delete an A record for a wildcard prefix
+ route53:
+ state: absent
+ zone: '{{ zone_one }}'
+ record: '*.wildcard_test.{{ zone_one }}'
+ type: A
+ value:
+ - 192.0.2.2
+ register: wc_a_record
+ - assert:
+ that:
+ - wc_a_record is not failed
+ - wc_a_record is changed
+ - wc_a_record.diff.after == {}
+
+ - name: create a record with different TTL
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: localhost.{{ zone_one }}
+ type: A
+ value: 127.0.0.1
+ ttl: 30
+ register: ttl30
+ - name: check return values
+ assert:
+ that:
+ - ttl30.diff.resource_record_sets[0].ttl == 30
+ - ttl30 is changed
+
+ - name: delete previous record without mention ttl and value
+ route53:
+ state: absent
+ zone: '{{ zone_one }}'
+ record: localhost.{{ zone_one }}
+ type: A
+ register: ttl30
+ - name: check if record is deleted
+ assert:
+ that:
+ - ttl30 is changed
+
+ - name: immutable delete previous record without mention ttl and value
+ route53:
+ state: absent
+ zone: '{{ zone_one }}'
+ record: localhost.{{ zone_one }}
+ type: A
+ register: ttl30
+ - name: check if record was deleted
+ assert:
+ that:
+ - ttl30 is not changed
+
+ # Tests on zone two (private zone)
+ - name: Create A record using zone fqdn
+ route53:
+ state: present
+ zone: '{{ zone_two }}'
+ record: qdn_test.{{ zone_two }}
+ type: A
+ value: 192.0.2.1
+ private_zone: true
+ register: qdn
+ - assert:
+ that:
+ - qdn is not failed
+ - qdn is changed
+
+ - name: Get A record using 'get' method of route53 module
+ route53:
+ state: get
+ zone: '{{ zone_two }}'
+ record: qdn_test.{{ zone_two }}
+ type: A
+ private_zone: true
+ register: get_result
+ - assert:
+ that:
+ - get_result.nameservers|length > 0
+ - get_result.set.Name == "qdn_test.{{ zone_two }}"
+ - get_result.set.ResourceRecords[0].Value == "192.0.2.1"
+ - get_result.set.Type == "A"
+
+ - name: Get a record that does not exist
+ route53:
+ state: get
+ zone: '{{ zone_two }}'
+ record: notfound.{{ zone_two }}
+ type: A
+ private_zone: true
+ register: get_result
+ - assert:
+ that:
+ - get_result.nameservers|length > 0
+ - get_result.set|length == 0
+ - get_result.resource_record_sets|length == 0
+
+ - name: Create same A record using zone non-qualified domain
+ route53:
+ state: present
+ zone: '{{ zone_two[:-1] }}'
+ record: qdn_test.{{ zone_two[:-1] }}
+ type: A
+ value: 192.0.2.1
+ private_zone: true
+ register: non_qdn
+ - assert:
+ that:
+ - non_qdn is not failed
+ - non_qdn is not changed
+
+ - name: Create A record using zone ID
+ route53:
+ state: present
+ hosted_zone_id: '{{ z2.zone_id }}'
+ record: zid_test.{{ zone_two }}
+ type: A
+ value: 192.0.2.2
+ private_zone: true
+ register: zid
+ - assert:
+ that:
+ - zid is not failed
+ - zid is changed
+
+ - name: Create A record using zone fqdn and vpc_id
+ route53:
+ state: present
+ zone: '{{ zone_two }}'
+ record: qdn_test_vpc.{{ zone_two }}
+ type: A
+ value: 192.0.2.3
+ private_zone: true
+ vpc_id: '{{ vpc.vpc.id }}'
+ register: qdn
+ - assert:
+ that:
+ - qdn is not failed
+ - qdn is changed
+
+ - name: Create A record using zone ID and vpc_id
+ route53:
+ state: present
+ hosted_zone_id: '{{ z2.zone_id }}'
+ record: zid_test_vpc.{{ zone_two }}
+ type: A
+ value: 192.0.2.4
+ private_zone: true
+ vpc_id: '{{ vpc.vpc.id }}'
+ register: zid
+ - assert:
+ that:
+ - zid is not failed
+ - zid is changed
+
+ - name: Create an Alias record
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: alias.{{ zone_one }}
+ type: A
+ alias: true
+ alias_hosted_zone_id: '{{ z1.zone_id }}'
+ value: zid_test.{{ zone_one }}
+ overwrite: true
+ register: alias_record
+ - name: This should be changed
+ assert:
+ that:
+ - alias_record is not failed
+ - alias_record is changed
+
+ - name: Re-Create an Alias record
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: alias.{{ zone_one }}
+ type: A
+ alias: true
+ alias_hosted_zone_id: '{{ z1.zone_id }}'
+ value: zid_test.{{ zone_one }}
+ overwrite: true
+ register: alias_record
+ - name: This should not be changed
+ assert:
+ that:
+ - alias_record is not failed
+ - alias_record is not changed
+
+ - name: Create a weighted record
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: weighted.{{ zone_one }}
+ type: CNAME
+ value: zid_test.{{ zone_one }}
+ overwrite: true
+ identifier: host1@www
+ weight: 100
+ region: '{{ omit }}'
+ register: weighted_record
+ - name: This should be changed
+ assert:
+ that:
+ - weighted_record is not failed
+ - weighted_record is changed
+
+ - name: Re-Create a weighted record
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: weighted.{{ zone_one }}
+ type: CNAME
+ value: zid_test.{{ zone_one }}
+ overwrite: true
+ identifier: host1@www
+ weight: 100
+ region: '{{ omit }}'
+ register: weighted_record
+ - name: This should not be changed
+ assert:
+ that:
+ - weighted_record is not failed
+ - weighted_record is not changed
+
+ - name: Create a zero weighted record
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: zero_weighted.{{ zone_one }}
+ type: CNAME
+ value: zid_test.{{ zone_one }}
+ overwrite: true
+ identifier: host1@www
+ weight: 0
+ region: '{{ omit }}'
+ register: weighted_record
+ - name: This should be changed
+ assert:
+ that:
+ - weighted_record is not failed
+ - weighted_record is changed
+
+ - name: Re-Create a zero weighted record
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: zero_weighted.{{ zone_one }}
+ type: CNAME
+ value: zid_test.{{ zone_one }}
+ overwrite: true
+ identifier: host1@www
+ weight: 0
+ region: '{{ omit }}'
+ register: weighted_record
+ - name: This should not be changed
+ assert:
+ that:
+ - weighted_record is not failed
+ - weighted_record is not changed
+
+#Test Geo Location - Continent Code
+ - name: Create a record with geo_location - continent_code (check_mode)
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: geo-test-1.{{ zone_one }}
+ identifier: geohost1@www
+ type: A
+ value: 127.0.0.1
+ ttl: 30
+ geo_location:
+ continent_code: NA
+ check_mode: true
+ register: create_geo_continent_check_mode
+ - assert:
+ that:
+ - create_geo_continent_check_mode is changed
+ - create_geo_continent_check_mode is not failed
+ - '"route53:ChangeResourceRecordSets" not in create_geo_continent_check_mode.resource_actions'
+
+ - name: Create a record with geo_location - continent_code
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: geo-test-1.{{ zone_one }}
+ identifier: geohost1@www
+ type: A
+ value: 127.0.0.1
+ ttl: 30
+ geo_location:
+ continent_code: NA
+ register: create_geo_continent
+ # Get resulting A record and geo_location parameters are applied
+ - name: get Route53 A record information
+ route53_info:
+ type: A
+ query: record_sets
+ hosted_zone_id: '{{ z1.zone_id }}'
+ start_record_name: geo-test-1.{{ zone_one }}
+ max_items: 1
+ register: result
+
+ - assert:
+ that:
+ - create_geo_continent is changed
+ - create_geo_continent is not failed
+ - '"route53:ChangeResourceRecordSets" in create_geo_continent.resource_actions'
+ - result.ResourceRecordSets[0].GeoLocation.ContinentCode == "NA"
+
+ - name: Create a record with geo_location - continent_code (idempotency)
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: geo-test-1.{{ zone_one }}
+ identifier: geohost1@www
+ type: A
+ value: 127.0.0.1
+ ttl: 30
+ geo_location:
+ continent_code: NA
+ register: create_geo_continent_idem
+ - assert:
+ that:
+ - create_geo_continent_idem is not changed
+ - create_geo_continent_idem is not failed
+ - '"route53:ChangeResourceRecordSets" not in create_geo_continent_idem.resource_actions'
+
+ - name: Create a record with geo_location - continent_code (idempotency - check_mode)
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: geo-test-1.{{ zone_one }}
+ identifier: geohost1@www
+ type: A
+ value: 127.0.0.1
+ ttl: 30
+ geo_location:
+ continent_code: NA
+ check_mode: true
+ register: create_geo_continent_idem_check
+
+ - assert:
+ that:
+ - create_geo_continent_idem_check is not changed
+ - create_geo_continent_idem_check is not failed
+ - '"route53:ChangeResourceRecordSets" not in create_geo_continent_idem_check.resource_actions'
+
+#Test Geo Location - Country Code
+ - name: Create a record with geo_location - country_code (check_mode)
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: geo-test-2.{{ zone_one }}
+ identifier: geohost2@www
+ type: A
+ value: 127.0.0.1
+ ttl: 30
+ geo_location:
+ country_code: US
+ check_mode: true
+ register: create_geo_country_check_mode
+ - assert:
+ that:
+ - create_geo_country_check_mode is changed
+ - create_geo_country_check_mode is not failed
+ - '"route53:ChangeResourceRecordSets" not in create_geo_country_check_mode.resource_actions'
+
+ - name: Create a record with geo_location - country_code
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: geo-test-2.{{ zone_one }}
+ identifier: geohost2@www
+ type: A
+ value: 127.0.0.1
+ ttl: 30
+ geo_location:
+ country_code: US
+ register: create_geo_country
+ # Get resulting A record and geo_location parameters are applied
+ - name: get Route53 A record information
+ route53_info:
+ type: A
+ query: record_sets
+ hosted_zone_id: '{{ z1.zone_id }}'
+ start_record_name: geo-test-2.{{ zone_one }}
+ max_items: 1
+ register: result
+ - assert:
+ that:
+ - create_geo_country is changed
+ - create_geo_country is not failed
+ - '"route53:ChangeResourceRecordSets" in create_geo_country.resource_actions'
+ - result.ResourceRecordSets[0].GeoLocation.CountryCode == "US"
+
+ - name: Create a record with geo_location - country_code (idempotency)
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: geo-test-2.{{ zone_one }}
+ identifier: geohost2@www
+ type: A
+ value: 127.0.0.1
+ ttl: 30
+ geo_location:
+ country_code: US
+ register: create_geo_country_idem
+ - assert:
+ that:
+ - create_geo_country_idem is not changed
+ - create_geo_country_idem is not failed
+ - '"route53:ChangeResourceRecordSets" not in create_geo_country_idem.resource_actions'
+
+ - name: Create a record with geo_location - country_code (idempotency - check_mode)
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: geo-test-2.{{ zone_one }}
+ identifier: geohost2@www
+ type: A
+ value: 127.0.0.1
+ ttl: 30
+ geo_location:
+ country_code: US
+ check_mode: true
+ register: create_geo_country_idem_check
+
+ - assert:
+ that:
+ - create_geo_country_idem_check is not changed
+ - create_geo_country_idem_check is not failed
+ - '"route53:ChangeResourceRecordSets" not in create_geo_country_idem_check.resource_actions'
+
+#Test Geo Location - Subdivision Code
+ - name: Create a record with geo_location - subdivision_code (check_mode)
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: geo-test-3.{{ zone_one }}
+ identifier: geohost3@www
+ type: A
+ value: 127.0.0.1
+ ttl: 30
+ geo_location:
+ country_code: US
+ subdivision_code: TX
+ check_mode: true
+ register: create_geo_subdivision_check_mode
+ - assert:
+ that:
+ - create_geo_subdivision_check_mode is changed
+ - create_geo_subdivision_check_mode is not failed
+ - '"route53:ChangeResourceRecordSets" not in create_geo_subdivision_check_mode.resource_actions'
+
+ - name: Create a record with geo_location - subdivision_code
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: geo-test-3.{{ zone_one }}
+ identifier: geohost3@www
+ type: A
+ value: 127.0.0.1
+ ttl: 30
+ geo_location:
+ country_code: US
+ subdivision_code: TX
+ register: create_geo_subdivision
+ # Get resulting A record and geo_location parameters are applied
+ - name: get Route53 A record information
+ route53_info:
+ type: A
+ query: record_sets
+ hosted_zone_id: '{{ z1.zone_id }}'
+ start_record_name: geo-test-3.{{ zone_one }}
+ max_items: 1
+ register: result
+ - assert:
+ that:
+ - create_geo_subdivision is changed
+ - create_geo_subdivision is not failed
+ - '"route53:ChangeResourceRecordSets" in create_geo_subdivision.resource_actions'
+ - result.ResourceRecordSets[0].GeoLocation.CountryCode == "US"
+ - result.ResourceRecordSets[0].GeoLocation.SubdivisionCode == "TX"
+
+ - name: Create a record with geo_location - subdivision_code (idempotency)
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: geo-test-3.{{ zone_one }}
+ identifier: geohost3@www
+ type: A
+ value: 127.0.0.1
+ ttl: 30
+ geo_location:
+ country_code: US
+ subdivision_code: TX
+ register: create_geo_subdivision_idem
+ - assert:
+ that:
+ - create_geo_subdivision_idem is not changed
+ - create_geo_subdivision_idem is not failed
+ - '"route53:ChangeResourceRecordSets" not in create_geo_subdivision_idem.resource_actions'
+
+ - name: Create a record with geo_location - subdivision_code (idempotency - check_mode)
+ route53:
+ state: present
+ zone: '{{ zone_one }}'
+ record: geo-test-3.{{ zone_one }}
+ identifier: geohost3@www
+ type: A
+ value: 127.0.0.1
+ ttl: 30
+ geo_location:
+ country_code: US
+ subdivision_code: TX
+ check_mode: true
+ register: create_geo_subdivision_idem_check
+
+ - assert:
+ that:
+ - create_geo_subdivision_idem_check is not changed
+ - create_geo_subdivision_idem_check is not failed
+ - '"route53:ChangeResourceRecordSets" not in create_geo_subdivision_idem_check.resource_actions'
+
+#Cleanup------------------------------------------------------
+
+ always:
+
+ - name: delete a record with geo_location - continent_code
+ route53:
+ state: absent
+ zone: '{{ zone_one }}'
+ record: geo-test-1.{{ zone_one }}
+ identifier: geohost1@www
+ type: A
+ value: 127.0.0.1
+ ttl: 30
+ geo_location:
+ continent_code: NA
+ ignore_errors: true
+
+ - name: delete a record with geo_location - country_code
+ route53:
+ state: absent
+ zone: '{{ zone_one }}'
+ record: geo-test-2.{{ zone_one }}
+ identifier: geohost2@www
+ type: A
+ value: 127.0.0.1
+ ttl: 30
+ geo_location:
+ country_code: US
+ ignore_errors: true
+
+ - name: delete a record with geo_location - subdivision_code
+ route53:
+ state: absent
+ zone: '{{ zone_one }}'
+ record: geo-test-3.{{ zone_one }}
+ identifier: geohost3@www
+ type: A
+ value: 127.0.0.1
+ ttl: 30
+ geo_location:
+ country_code: US
+ subdivision_code: TX
+ ignore_errors: true
+
+ - route53_info:
+ query: record_sets
+ hosted_zone_id: '{{ z1.zone_id }}'
+ register: z1_records
+
+ - name: Loop over A/AAAA/CNAME Alias records and delete them
+ route53:
+ state: absent
+ alias: true
+ alias_hosted_zone_id: '{{ item.AliasTarget.HostedZoneId }}'
+ zone: '{{ zone_one }}'
+ record: '{{ item.Name }}'
+ type: '{{ item.Type }}'
+ value: '{{ item.AliasTarget.DNSName }}'
+ ignore_errors: true
+ loop: '{{ z1_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA",
+ "CNAME", "CAA"]) | list }}'
+ when:
+ - '"AliasTarget" in item'
+
+ - name: Loop over A/AAAA/CNAME records and delete them
+ route53:
+ state: absent
+ zone: '{{ zone_one }}'
+ record: '{{ item.Name }}'
+ type: '{{ item.Type }}'
+ value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}'
+ weight: '{{ item.Weight | default(omit) }}'
+ identifier: '{{ item.SetIdentifier }}'
+ region: '{{ omit }}'
+ ignore_errors: true
+ loop: '{{ z1_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA",
+ "CNAME", "CAA"]) | list }}'
+ when:
+ - '"ResourceRecords" in item'
+ - '"SetIdentifier" in item'
+
+ - name: Loop over A/AAAA/CNAME records and delete them
+ route53:
+ state: absent
+ zone: '{{ zone_one }}'
+ record: '{{ item.Name }}'
+ type: '{{ item.Type }}'
+ value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}'
+ ignore_errors: true
+ loop: '{{ z1_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA",
+ "CNAME", "CAA"]) | list }}'
+ when:
+ - '"ResourceRecords" in item'
+
+ - route53_info:
+ query: record_sets
+ hosted_zone_id: '{{ z2.zone_id }}'
+ register: z2_records
+
+ - name: Loop over A/AAAA/CNAME Alias records and delete them
+ route53:
+ state: absent
+ alias: true
+ alias_hosted_zone_id: '{{ item.AliasTarget.HostedZoneId }}'
+ zone: '{{ zone_two }}'
+ record: '{{ item.Name }}'
+ type: '{{ item.Type }}'
+ value: '{{ item.AliasTarget.DNSName }}'
+ private_zone: true
+ ignore_errors: true
+ loop: '{{ z2_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA",
+ "CNAME", "CAA"]) | list }}'
+ when:
+ - '"AliasTarget" in item'
+
+ - name: Loop over A/AAAA/CNAME records and delete them
+ route53:
+ state: absent
+ zone: '{{ zone_two }}'
+ record: '{{ item.Name }}'
+ type: '{{ item.Type }}'
+ value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}'
+ identifier: '{{ item.SetIdentifier }}'
+ region: '{{ omit }}'
+ private_zone: true
+ ignore_errors: true
+ loop: '{{ z2_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA",
+ "CNAME", "CAA"]) | list }}'
+ when:
+ - '"ResourceRecords" in item'
+ - '"SetIdentifier" in item'
+
+ - name: Loop over A/AAAA/CNAME records and delete them
+ route53:
+ state: absent
+ zone: '{{ zone_two }}'
+ record: '{{ item.Name }}'
+ type: '{{ item.Type }}'
+ value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}'
+ private_zone: true
+ ignore_errors: true
+ loop: '{{ z2_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA",
+ "CNAME", "CAA"]) | list }}'
+ when:
+ - '"ResourceRecords" in item'
+
+ - name: Delete test zone one {{ zone_one }}
+ route53_zone:
+ state: absent
+ zone: '{{ zone_one }}'
+ register: delete_one
+ ignore_errors: true
+ retries: 10
+ until: delete_one is not failed
+
+ - name: Delete test zone two {{ zone_two }}
+ route53_zone:
+ state: absent
+ zone: '{{ zone_two }}'
+ register: delete_two
+ ignore_errors: true
+ retries: 10
+ until: delete_two is not failed
+
+ - name: destroy VPC
+ ec2_vpc_net:
+ cidr_block: 192.0.2.0/24
+ name: '{{ resource_prefix }}_vpc'
+ state: absent
+ register: remove_vpc
+ retries: 10
+ delay: 5
+ until: remove_vpc is success
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53/vars/main.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/route53/vars/main.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/aliases b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/defaults/main.yml
new file mode 100644
index 000000000..769e5079d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/defaults/main.yml
@@ -0,0 +1,36 @@
+---
+# route53_health_check integration tests
+#
+# Module uses the following as an 'ID'
+# (the real ID is automatically assigned after creation)
+# - ip_address
+# - fqdn
+# - port
+# - type
+# - request_interval
+
+#ip_address: We allocate an EIP due to route53 restrictions
+fqdn: '{{ tiny_prefix }}.route53-health.ansible.test'
+fqdn_1: '{{ tiny_prefix }}-1.route53-health.ansible.test'
+port: 8080
+type: 'TCP'
+request_interval: 30
+
+# modifiable
+# - resource_path
+# - string_match
+# - failure_threshold
+
+failure_threshold: 5
+failure_threshold_updated: 1
+
+# For resource_path we need an HTTP/HTTPS type check
+# for string_match we need an _STR_MATCH type
+type_https_match: 'HTTPS_STR_MATCH'
+type_http_match: 'HTTP_STR_MATCH'
+type_http: 'HTTP'
+resource_path: '/health.php'
+resource_path_1: '/new-health.php'
+resource_path_updated: '/healthz'
+string_match: 'Hello'
+string_match_updated: 'Hello World'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/meta/main.yml
new file mode 100644
index 000000000..1471b11f6
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_ec2_facts
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml
new file mode 100644
index 000000000..42bdb6562
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml
@@ -0,0 +1,134 @@
+---
+- block:
+ - name: 'Create multiple HTTP health checks with different resource_path - check_mode'
+ route53_health_check:
+ state: present
+ name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found'
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http }}'
+ resource_path: '{{ item }}'
+ use_unique_names: true
+ register: create_check
+ check_mode: true
+ with_items:
+ - '{{ resource_path }}'
+ - '{{ resource_path_1 }}'
+
+ - name: 'Check result - Create a HTTP health check - check_mode'
+ assert:
+ that:
+ - create_check is not failed
+ - create_check is changed
+ - '"route53:CreateHealthCheck" not in create_check.results[0].resource_actions'
+ - '"route53:CreateHealthCheck" not in create_check.results[1].resource_actions'
+
+ - name: 'Create multiple HTTP health checks with different resource_path'
+ route53_health_check:
+ state: present
+ name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found'
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http }}'
+ resource_path: '{{ item }}'
+ use_unique_names: true
+ register: create_result
+ with_items:
+ - '{{ resource_path }}'
+ - '{{ resource_path_1 }}'
+
+ - name: Get ID's for health_checks created in above task
+ set_fact:
+ health_check_1_id: "{{ create_result.results[0].health_check.id }}"
+ health_check_2_id: "{{ create_result.results[1].health_check.id }}"
+
+ - name: Get health_check 1 info
+ amazon.aws.route53_info:
+ query: health_check
+ health_check_id: "{{ health_check_1_id }}"
+ health_check_method: details
+ register: health_check_1_info
+
+ - name: Get health_check 2 info
+ amazon.aws.route53_info:
+ query: health_check
+ health_check_id: "{{ health_check_2_id }}"
+ health_check_method: details
+ register: health_check_2_info
+
+ - name: 'Check result - Create multiple HTTP health check'
+ assert:
+ that:
+ - create_result is not failed
+ - create_result is changed
+ - '"route53:UpdateHealthCheck" not in create_result.results[0].resource_actions'
+ - '"route53:UpdateHealthCheck" not in create_result.results[1].resource_actions'
+ - health_check_1_id != health_check_2_id
+ - health_check_1_info.HealthCheck.HealthCheckConfig.ResourcePath == '{{ resource_path }}'
+ - health_check_2_info.HealthCheck.HealthCheckConfig.ResourcePath == '{{ resource_path_1 }}'
+
+ - name: 'Create multiple HTTP health checks with different resource_path - idempotency - check_mode'
+ route53_health_check:
+ state: present
+ name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found'
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http }}'
+ resource_path: '{{ item }}'
+ use_unique_names: true
+ register: create_idem_check
+ check_mode: true
+ with_items:
+ - '{{ resource_path }}'
+ - '{{ resource_path_1 }}'
+
+ - name: 'Check result - Create multiple HTTP health check - idempotency - check_mode'
+ assert:
+ that:
+ - create_idem_check is not failed
+ - create_idem_check is not changed
+ - '"route53:CreateHealthCheck" not in create_idem_check.results[0].resource_actions'
+ - '"route53:CreateHealthCheck" not in create_idem_check.results[1].resource_actions'
+ - '"route53:UpdateHealthCheck" not in create_idem_check.results[0].resource_actions'
+ - '"route53:UpdateHealthCheck" not in create_idem_check.results[1].resource_actions'
+
+ - name: 'Create multiple HTTP health checks with different resource_path - idempotency'
+ route53_health_check:
+ state: present
+ name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found'
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http }}'
+ resource_path: '{{ item }}'
+ use_unique_names: true
+ register: create_idem
+ check_mode: true
+ with_items:
+ - '{{ resource_path }}'
+ - '{{ resource_path_1 }}'
+
+ - name: 'Check result - Create multiple HTTP health check - idempotency - check_mode'
+ assert:
+ that:
+ - create_idem is not failed
+ - create_idem is not changed
+ - '"route53:CreateHealthCheck" not in create_idem.results[0].resource_actions'
+ - '"route53:CreateHealthCheck" not in create_idem.results[1].resource_actions'
+ - '"route53:UpdateHealthCheck" not in create_idem.results[0].resource_actions'
+ - '"route53:UpdateHealthCheck" not in create_idem.results[1].resource_actions'
+
+ always:
+ # Cleanup starts here
+ - name: 'Delete multiple HTTP health checks with different resource_path'
+ route53_health_check:
+ state: absent
+ name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found'
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http }}'
+ resource_path: '{{ item }}'
+ use_unique_names: true
+ register: delete_result
+ with_items:
+ - '{{ resource_path }}'
+ - '{{ resource_path_1 }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/main.yml
new file mode 100644
index 000000000..1b1ecd805
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/main.yml
@@ -0,0 +1,1822 @@
+---
+# route53_health_check integration tests
+#
+# Module uses the following as an 'ID'
+# (the real ID is automatically assigned after creation)
+# - ip_address
+# - fqdn
+# - port
+# - type (immutable)
+# - request_interval (immutable)
+#
+# modifiable
+# - resource_path
+# - string_match
+# - failure_threshold
+# - disabled
+#
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ # Route53 can only test against routable IPs. Request an EIP so some poor
+ # soul doesn't get randomly hit by our testing.
+ - name: Allocate an EIP we can test against
+ ec2_eip:
+ state: present
+ register: eip
+
+ - set_fact:
+ ip_address: '{{ eip.public_ip }}'
+
+ - name: Run tests for creating multiple health checks with name as unique identifier
+ include_tasks: create_multiple_health_checks.yml
+
+ - name: Run tests for update and delete health check by ID
+ include_tasks: update_delete_by_id.yml
+
+ # Minimum possible definition
+ - name: 'Create a TCP health check - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ register: create_check
+ check_mode: true
+
+ - name: 'Check result - Create a TCP health check - check_mode'
+ assert:
+ that:
+ - create_check is successful
+ - create_check is changed
+
+ - name: 'Create a TCP health check'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ register: create_check
+
+ - name: 'Check result - Create a TCP health check'
+ assert:
+ that:
+ - create_check is successful
+ - create_check is changed
+ - '"health_check" in create_check'
+ - '"id" in _health_check'
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - create_check.health_check.action == 'create'
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" not in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" not in _check_config'
+ - '"search_string" not in _check_config'
+ - _check_config.disabled == false
+ - _check_config.type == 'TCP'
+ - _check_config.failure_threshold == 3
+ - _check_config.request_interval == 30
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ vars:
+ _health_check: '{{ create_check.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - set_fact:
+ tcp_check_id: '{{ create_check.health_check.id }}'
+
+ - name: 'Create a TCP health check - idempotency - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ register: create_check
+ check_mode: true
+
+ - name: 'Check result - Create a TCP health check - idempotency - check_mode'
+ assert:
+ that:
+ - create_check is successful
+ - create_check is not changed
+
+ - name: 'Create a TCP health check - idempotency'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ register: create_check
+
+ - name: 'Check result - Create a TCP health check - idempotency'
+ assert:
+ that:
+ - create_check is successful
+ - create_check is not changed
+ - '"health_check" in create_check'
+ - '"id" in create_check.health_check'
+ - _health_check.id == tcp_check_id
+ - '"id" in _health_check'
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" not in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" not in _check_config'
+ - '"search_string" not in _check_config'
+ - _check_config.disabled == false
+ - _check_config.type == 'TCP'
+ - _check_config.request_interval == 30
+ - _check_config.failure_threshold == 3
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ vars:
+ _health_check: '{{ create_check.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ # Update an attribute
+ - name: 'Update TCP health check - set threshold - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ failure_threshold: '{{ failure_threshold_updated }}'
+ register: update_threshold
+ check_mode: true
+
+ - name: 'Check result - Update TCP health check - set threshold - check_mode'
+ assert:
+ that:
+ - update_threshold is successful
+ - update_threshold is changed
+
+ - name: 'Update TCP health check - set threshold'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ failure_threshold: '{{ failure_threshold_updated }}'
+ register: update_threshold
+
+ - name: 'Check result - Update TCP health check - set threshold'
+ assert:
+ that:
+ - update_threshold is successful
+ - update_threshold is changed
+ - '"health_check" in update_threshold'
+ - '"id" in _health_check'
+ - _health_check.id == tcp_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" not in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" not in _check_config'
+ - '"search_string" not in _check_config'
+ - _check_config.disabled == false
+ - _check_config.type == 'TCP'
+ - _check_config.request_interval == 30
+ - _check_config.failure_threshold == failure_threshold_updated
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ vars:
+ _health_check: '{{ update_threshold.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - name: 'Update TCP health check - set threshold - idempotency - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ failure_threshold: '{{ failure_threshold_updated }}'
+ register: update_threshold
+ check_mode: true
+
+ - name: 'Check result - Update TCP health check - set threshold - idempotency - check_mode'
+ assert:
+ that:
+ - update_threshold is successful
+ - update_threshold is not changed
+
+ - name: 'Update TCP health check - set threshold - idempotency'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ failure_threshold: '{{ failure_threshold_updated }}'
+ register: update_threshold
+
+ - name: 'Check result - Update TCP health check - set threshold - idempotency'
+ assert:
+ that:
+ - update_threshold is successful
+ - update_threshold is not changed
+ - '"health_check" in update_threshold'
+ - '"id" in _health_check'
+ - _health_check.id == tcp_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" not in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" not in _check_config'
+ - '"search_string" not in _check_config'
+ - _check_config.disabled == false
+ - _check_config.type == 'TCP'
+ - _check_config.request_interval == 30
+ - _check_config.failure_threshold == failure_threshold_updated
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ vars:
+ _health_check: '{{ update_threshold.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - name: 'Update TCP health check - set disabled - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ disabled: true
+ register: update_disabled
+ check_mode: true
+
+ - name: 'Check result - Update TCP health check - set disabled - check_mode'
+ assert:
+ that:
+ - update_disabled is successful
+ - update_disabled is changed
+
+ - name: 'Update TCP health check - set disabled'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ disabled: true
+ register: update_disabled
+
+ - name: 'Check result - Update TCP health check - set disabled'
+ assert:
+ that:
+ - update_disabled is successful
+ - update_disabled is changed
+ - '"health_check" in update_disabled'
+ - '"id" in _health_check'
+ - _health_check.id == tcp_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" not in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" not in _check_config'
+ - '"search_string" not in _check_config'
+ - _check_config.disabled == true
+ - _check_config.type == 'TCP'
+ - _check_config.request_interval == 30
+ - _check_config.failure_threshold == failure_threshold_updated
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ vars:
+ _health_check: '{{ update_disabled.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - name: 'Update TCP health check - set disabled - idempotency - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ disabled: true
+ register: update_disabled
+ check_mode: true
+
+ - name: 'Check result - Update TCP health check - set disabled - idempotency - check_mode'
+ assert:
+ that:
+ - update_disabled is successful
+ - update_disabled is not changed
+
+ - name: 'Update TCP health check - set disabled - idempotency'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ disabled: true
+ register: update_disabled
+
+ - name: 'Check result - Update TCP health check - set disabled - idempotency'
+ assert:
+ that:
+ - update_disabled is successful
+ - update_disabled is not changed
+ - '"health_check" in update_disabled'
+ - '"id" in _health_check'
+ - _health_check.id == tcp_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" not in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" not in _check_config'
+ - '"search_string" not in _check_config'
+ - _check_config.disabled == true
+ - _check_config.type == 'TCP'
+ - _check_config.request_interval == 30
+ - _check_config.failure_threshold == failure_threshold_updated
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ vars:
+ _health_check: '{{ update_disabled.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - name: 'Update TCP health check - set tags - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ tags:
+ CamelCase: CamelCaseValue
+ snake_case: snake_case_value
+ "with space": Some value
+ purge_tags: false
+ register: update_tags
+ check_mode: true
+
+ - name: 'Check result - Update TCP health check - set tags - check_mode'
+ assert:
+ that:
+ - update_tags is successful
+ - update_tags is changed
+
+ - name: 'Update TCP health check - set tags'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ tags:
+ CamelCase: CamelCaseValue
+ snake_case: snake_case_value
+ "with space": Some value
+ purge_tags: false
+ register: update_tags
+
+ - name: 'Check result - Update TCP health check - set tags'
+ assert:
+ that:
+ - update_tags is successful
+ - update_tags is changed
+ - '"health_check" in update_tags'
+ - '"id" in _health_check'
+ - _health_check.id == tcp_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - '"CamelCase" in _health_check.tags'
+ - _health_check.tags['CamelCase'] == 'CamelCaseValue'
+ - '"snake_case" in _health_check.tags'
+ - _health_check.tags['snake_case'] == 'snake_case_value'
+ - '"with space" in _health_check.tags'
+ - _health_check.tags['with space'] == 'Some value'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" not in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" not in _check_config'
+ - '"search_string" not in _check_config'
+ - _check_config.disabled == true
+ - _check_config.type == 'TCP'
+ - _check_config.request_interval == 30
+ - _check_config.failure_threshold == failure_threshold_updated
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ vars:
+ _health_check: '{{ update_tags.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - name: 'Update TCP health check - set tags - idempotency - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ tags:
+ CamelCase: CamelCaseValue
+ snake_case: snake_case_value
+ "with space": Some value
+ purge_tags: false
+ register: update_tags
+ check_mode: true
+
+ - name: 'Check result - Update TCP health check - set tags - idempotency - check_mode'
+ assert:
+ that:
+ - update_tags is successful
+ - update_tags is not changed
+
+ - name: 'Update TCP health check - set tags - idempotency'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ tags:
+ CamelCase: CamelCaseValue
+ snake_case: snake_case_value
+ "with space": Some value
+ purge_tags: false
+ register: update_tags
+
+ - name: 'Check result - Update TCP health check - set tags - idempotency'
+ assert:
+ that:
+ - update_tags is successful
+ - update_tags is not changed
+ - '"health_check" in update_tags'
+ - '"id" in _health_check'
+ - _health_check.id == tcp_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - '"CamelCase" in _health_check.tags'
+ - _health_check.tags['CamelCase'] == 'CamelCaseValue'
+ - '"snake_case" in _health_check.tags'
+ - _health_check.tags['snake_case'] == 'snake_case_value'
+ - '"with space" in _health_check.tags'
+ - _health_check.tags['with space'] == 'Some value'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" not in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" not in _check_config'
+ - '"search_string" not in _check_config'
+ - _check_config.disabled == true
+ - _check_config.type == 'TCP'
+ - _check_config.request_interval == 30
+ - _check_config.failure_threshold == failure_threshold_updated
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ vars:
+ _health_check: '{{ update_tags.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - name: 'Update TCP health check - add tags - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ tags:
+ anotherTag: anotherValue
+ purge_tags: false
+ register: add_tags
+ check_mode: true
+
+ - name: 'Check result - Update TCP health check - add tags - check_mode'
+ assert:
+ that:
+ - add_tags is successful
+ - add_tags is changed
+
+ - name: 'Update TCP health check - add tags'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ tags:
+ anotherTag: anotherValue
+ purge_tags: false
+ register: add_tags
+
+ - name: 'Check result - Update TCP health check - add tags'
+ assert:
+ that:
+ - add_tags is successful
+ - add_tags is changed
+ - '"health_check" in add_tags'
+ - '"id" in _health_check'
+ - _health_check.id == tcp_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - '"CamelCase" in _health_check.tags'
+ - _health_check.tags['CamelCase'] == 'CamelCaseValue'
+ - '"snake_case" in _health_check.tags'
+ - _health_check.tags['snake_case'] == 'snake_case_value'
+ - '"with space" in _health_check.tags'
+ - _health_check.tags['with space'] == 'Some value'
+ - '"anotherTag" in _health_check.tags'
+ - _health_check.tags['anotherTag'] == 'anotherValue'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" not in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" not in _check_config'
+ - '"search_string" not in _check_config'
+ - _check_config.disabled == true
+ - _check_config.type == 'TCP'
+ - _check_config.request_interval == 30
+ - _check_config.failure_threshold == failure_threshold_updated
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ vars:
+ _health_check: '{{ add_tags.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - name: 'Update TCP health check - add tags - idempotency - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ tags:
+ anotherTag: anotherValue
+ purge_tags: false
+ register: add_tags
+ check_mode: true
+
+ - name: 'Check result - Update TCP health check - add tags - idempotency - check_mode'
+ assert:
+ that:
+ - add_tags is successful
+ - add_tags is not changed
+
+ - name: 'Update TCP health check - add tags - idempotency'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ tags:
+ anotherTag: anotherValue
+ purge_tags: false
+ register: add_tags
+
+ - name: 'Check result - Update TCP health check - add tags - idempotency'
+ assert:
+ that:
+ - add_tags is successful
+ - add_tags is not changed
+ - '"health_check" in add_tags'
+ - '"id" in _health_check'
+ - _health_check.id == tcp_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - '"CamelCase" in _health_check.tags'
+ - _health_check.tags['CamelCase'] == 'CamelCaseValue'
+ - '"snake_case" in _health_check.tags'
+ - _health_check.tags['snake_case'] == 'snake_case_value'
+ - '"with space" in _health_check.tags'
+ - _health_check.tags['with space'] == 'Some value'
+ - '"anotherTag" in _health_check.tags'
+ - _health_check.tags['anotherTag'] == 'anotherValue'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" not in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" not in _check_config'
+ - '"search_string" not in _check_config'
+ - _check_config.disabled == true
+ - _check_config.type == 'TCP'
+ - _check_config.request_interval == 30
+ - _check_config.failure_threshold == failure_threshold_updated
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ vars:
+ _health_check: '{{ add_tags.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - name: 'Update TCP health check - purge tags - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ tags:
+ anotherTag: anotherValue
+ purge_tags: true
+ register: purge_tags
+ check_mode: true
+
+ - name: 'Check result - Update TCP health check - purge tags - check_mode'
+ assert:
+ that:
+ - purge_tags is successful
+ - purge_tags is changed
+
+ - name: 'Update TCP health check - purge tags'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ tags:
+ anotherTag: anotherValue
+ purge_tags: true
+ register: purge_tags
+
+ - name: 'Check result - Update TCP health check - purge tags'
+ assert:
+ that:
+ - purge_tags is successful
+ - purge_tags is changed
+ - '"health_check" in purge_tags'
+ - '"id" in _health_check'
+ - _health_check.id == tcp_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - '"CamelCase" not in _health_check.tags'
+ - '"snake_case" not in _health_check.tags'
+ - '"with space" not in _health_check.tags'
+ - '"anotherTag" in _health_check.tags'
+ - _health_check.tags['anotherTag'] == 'anotherValue'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" not in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" not in _check_config'
+ - '"search_string" not in _check_config'
+ - _check_config.disabled == true
+ - _check_config.type == 'TCP'
+ - _check_config.request_interval == 30
+ - _check_config.failure_threshold == failure_threshold_updated
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ vars:
+ _health_check: '{{ purge_tags.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - name: 'Update TCP health check - purge tags - idempotency - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ tags:
+ anotherTag: anotherValue
+ purge_tags: true
+ register: purge_tags
+ check_mode: true
+
+ - name: 'Check result - Update TCP health check - purge tags - idempotency - check_mode'
+ assert:
+ that:
+ - purge_tags is successful
+ - purge_tags is not changed
+
+ - name: 'Update TCP health check - purge tags - idempotency'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ tags:
+ anotherTag: anotherValue
+ purge_tags: true
+ register: purge_tags
+
+ - name: 'Check result - Update TCP health check - purge tags - idempotency'
+ assert:
+ that:
+ - purge_tags is successful
+ - purge_tags is not changed
+ - '"health_check" in purge_tags'
+ - '"id" in _health_check'
+ - _health_check.id == tcp_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - '"CamelCase" not in _health_check.tags'
+ - '"snake_case" not in _health_check.tags'
+ - '"with space" not in _health_check.tags'
+ - '"anotherTag" in _health_check.tags'
+ - _health_check.tags['anotherTag'] == 'anotherValue'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" not in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" not in _check_config'
+ - '"search_string" not in _check_config'
+ - _check_config.disabled == true
+ - _check_config.type == 'TCP'
+ - _check_config.request_interval == 30
+ - _check_config.failure_threshold == failure_threshold_updated
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ vars:
+ _health_check: '{{ purge_tags.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ # Delete the check
+ - name: 'Delete TCP health check - check_mode'
+ route53_health_check:
+ state: absent
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ register: delete_tcp
+ check_mode: True
+
+ - name: 'Check result - Delete TCP health check - check_mode'
+ assert:
+ that:
+ - delete_tcp is successful
+ - delete_tcp is changed
+
+ - name: 'Delete TCP health check'
+ route53_health_check:
+ state: absent
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ register: delete_tcp
+
+ - name: 'Check result - Delete TCP health check'
+ assert:
+ that:
+ - delete_tcp is successful
+ - delete_tcp is changed
+
+ - name: 'Delete TCP health check - idempotency - check_mode'
+ route53_health_check:
+ state: absent
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ register: delete_tcp
+ check_mode: True
+
+ - name: 'Check result - Delete TCP health check - idempotency - check_mode'
+ assert:
+ that:
+ - delete_tcp is successful
+ - delete_tcp is not changed
+
+ - name: 'Delete TCP health check - idempotency'
+ route53_health_check:
+ state: absent
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ register: delete_tcp
+
+ - name: 'Check result - Delete TCP health check - idempotency'
+ assert:
+ that:
+ - delete_tcp is successful
+ - delete_tcp is not changed
+
+ # Create an HTTPS_STR_MATCH healthcheck so we can try out more settings
+ - name: 'Create a HTTPS_STR_MATCH health check - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ string_match: '{{ string_match }}'
+ register: create_match
+ check_mode: true
+
+ - name: 'Check result - Create a HTTPS_STR_MATCH health check - check_mode'
+ assert:
+ that:
+ - create_match is successful
+ - create_match is changed
+
+ - name: 'Create a HTTPS_STR_MATCH health check'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ string_match: '{{ string_match }}'
+ register: create_match
+
+ - name: 'Check result - Create a HTTPS_STR_MATCH health check'
+ assert:
+ that:
+ - create_match is successful
+ - create_match is changed
+ - '"health_check" in create_match'
+ - '"id" in _health_check'
+ - _health_check.id != tcp_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" not in _check_config'
+ - '"search_string" in _check_config'
+ - _check_config.disabled == false
+ - _check_config.type == 'HTTPS_STR_MATCH'
+ - _check_config.request_interval == request_interval
+ - _check_config.failure_threshold == 3
+ - _check_config.fully_qualified_domain_name == fqdn
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ - _check_config.search_string == string_match
+ vars:
+ _health_check: '{{ create_match.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - set_fact:
+ match_check_id: '{{ create_match.health_check.id }}'
+
+ - name: 'Create a HTTPS_STR_MATCH health check - idempotency - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ string_match: '{{ string_match }}'
+ register: create_match
+ check_mode: true
+
+ - name: 'Check result - Create a HTTPS_STR_MATCH health check - idempotency - check_mode'
+ assert:
+ that:
+ - create_match is successful
+ - create_match is not changed
+
+ - name: 'Create a HTTPS_STR_MATCH health check - idempotency'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ string_match: '{{ string_match }}'
+ register: create_match
+
+ - name: 'Check result - Create a HTTPS_STR_MATCH health check - idempotency'
+ assert:
+ that:
+ - create_match is successful
+ - create_match is not changed
+ - '"health_check" in create_match'
+ - '"id" in _health_check'
+ - _health_check.id == match_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" not in _check_config'
+ - '"search_string" in _check_config'
+ - _check_config.disabled == false
+ - _check_config.type == type_https_match
+ - _check_config.request_interval == request_interval
+ - _check_config.failure_threshold == 3
+ - _check_config.fully_qualified_domain_name == fqdn
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ - _check_config.search_string == string_match
+ vars:
+ _health_check: '{{ create_match.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - name: 'Update HTTPS health check - set resource_path - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ resource_path: '{{ resource_path }}'
+ register: update_resource_path
+ check_mode: true
+
+ - name: 'Check result - Update HTTPS health check - set resource_path - check_mode'
+ assert:
+ that:
+ - update_resource_path is successful
+ - update_resource_path is changed
+
+ - name: 'Update HTTPS health check - set resource_path'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ resource_path: '{{ resource_path }}'
+ register: update_resource_path
+
+ - name: 'Check result - Update HTTPS health check - set resource_path'
+ assert:
+ that:
+ - update_resource_path is successful
+ - update_resource_path is changed
+ - '"health_check" in update_resource_path'
+ - '"id" in _health_check'
+ - _health_check.id == match_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" in _check_config'
+ - '"search_string" in _check_config'
+ - _check_config.disabled == false
+ - _check_config.type == type_https_match
+ - _check_config.request_interval == request_interval
+ - _check_config.failure_threshold == 3
+ - _check_config.fully_qualified_domain_name == fqdn
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ - _check_config.resource_path == resource_path
+ - _check_config.search_string == string_match
+ vars:
+ _health_check: '{{ update_resource_path.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - name: 'Update HTTPS health check - set resource_path - idempotency - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ resource_path: '{{ resource_path }}'
+ register: update_resource_path
+ check_mode: true
+
+ - name: 'Check result - Update HTTPS health check - set resource_path - idempotency - check_mode'
+ assert:
+ that:
+ - update_resource_path is successful
+ - update_resource_path is not changed
+
+ - name: 'Update HTTPS health check - set resource_path - idempotency'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ resource_path: '{{ resource_path }}'
+ register: update_resource_path
+
+ - name: 'Check result - Update HTTPS health check - set resource_path - idempotency'
+ assert:
+ that:
+ - update_resource_path is successful
+ - update_resource_path is not changed
+ - '"health_check" in update_resource_path'
+ - '"id" in _health_check'
+ - _health_check.id == match_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" in _check_config'
+ - '"search_string" in _check_config'
+ - _check_config.disabled == false
+ - _check_config.type == type_https_match
+ - _check_config.request_interval == request_interval
+ - _check_config.failure_threshold == 3
+ - _check_config.fully_qualified_domain_name == fqdn
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ - _check_config.resource_path == resource_path
+ - _check_config.search_string == string_match
+ vars:
+ _health_check: '{{ update_resource_path.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - name: 'Update HTTPS health check - set string_match - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ string_match: '{{ string_match_updated }}'
+ register: update_string_match
+ check_mode: true
+
+ - name: 'Check result - Update HTTPS health check - set string_match - check_mode'
+ assert:
+ that:
+ - update_string_match is successful
+ - update_string_match is changed
+
+ - name: 'Update HTTPS health check - set string_match'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ string_match: '{{ string_match_updated }}'
+ register: update_string_match
+
+ - name: 'Check result - Update HTTPS health check - set string_match'
+ assert:
+ that:
+ - update_string_match is successful
+ - update_string_match is changed
+ - '"health_check" in update_string_match'
+ - '"id" in _health_check'
+ - _health_check.id == match_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" in _check_config'
+ - '"search_string" in _check_config'
+ - _check_config.disabled == false
+ - _check_config.type == type_https_match
+ - _check_config.request_interval == request_interval
+ - _check_config.failure_threshold == 3
+ - _check_config.fully_qualified_domain_name == fqdn
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ - _check_config.resource_path == resource_path
+ - _check_config.search_string == string_match_updated
+ vars:
+ _health_check: '{{ update_string_match.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - name: 'Update HTTPS health check - set string_match - idempotency - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ string_match: '{{ string_match_updated }}'
+ register: update_string_match
+ check_mode: true
+
+ - name: 'Check result - Update HTTPS health check - set string_match - idempotency - check_mode'
+ assert:
+ that:
+ - update_string_match is successful
+ - update_string_match is not changed
+
+ - name: 'Update HTTPS health check - set string_match - idempotency'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ string_match: '{{ string_match_updated }}'
+ register: update_string_match
+
+ - name: 'Check result - Update HTTPS health check - set string_match - idempotency'
+ assert:
+ that:
+ - update_string_match is successful
+ - update_string_match is not changed
+ - '"health_check" in update_string_match'
+ - '"id" in _health_check'
+ - _health_check.id == match_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" in _check_config'
+ - '"search_string" in _check_config'
+ - _check_config.disabled == false
+ - _check_config.type == type_https_match
+ - _check_config.request_interval == request_interval
+ - _check_config.failure_threshold == 3
+ - _check_config.fully_qualified_domain_name == fqdn
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ - _check_config.resource_path == resource_path
+ - _check_config.search_string == string_match_updated
+ vars:
+ _health_check: '{{ update_string_match.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ # Test deletion
+ - name: 'Delete HTTPS health check - check_mode'
+ route53_health_check:
+ state: absent
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ register: delete_match
+ check_mode: true
+
+ - name: 'Check result - Delete HTTPS health check - check_mode'
+ assert:
+ that:
+ - delete_match is successful
+ - delete_match is changed
+
+ - name: 'Delete HTTPS health check'
+ route53_health_check:
+ state: absent
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ register: delete_match
+
+ - name: 'Check result - Delete HTTPS health check'
+ assert:
+ that:
+ - delete_match is successful
+ - delete_match is changed
+
+ - name: 'Delete HTTPS health check - idempotency - check_mode'
+ route53_health_check:
+ state: absent
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ register: delete_match
+ check_mode: true
+
+ - name: 'Check result - Delete HTTPS health check - idempotency - check_mode'
+ assert:
+ that:
+ - delete_match is successful
+ - delete_match is not changed
+
+ - name: 'Delete HTTPS health check - idempotency'
+ route53_health_check:
+ state: absent
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ register: delete_match
+
+ - name: 'Check result - Delete HTTPS health check - idempotency'
+ assert:
+ that:
+ - delete_match is successful
+ - delete_match is not changed
+
+ # Create an HTTP health check with lots of settings we can update
+ - name: 'Create Complex health check - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ string_match: '{{ string_match }}'
+ resource_path: '{{ resource_path }}'
+ failure_threshold: '{{ failure_threshold }}'
+ disabled: true
+ tags:
+ CamelCase: CamelCaseValue
+ snake_case: snake_case_value
+ "with space": Some value
+ purge_tags: false
+ register: create_complex
+ check_mode: true
+
+ - name: 'Check result - Create Complex health check - check_mode'
+ assert:
+ that:
+ - create_complex is successful
+ - create_complex is changed
+
+ - name: 'Create Complex health check'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ string_match: '{{ string_match }}'
+ resource_path: '{{ resource_path }}'
+ failure_threshold: '{{ failure_threshold }}'
+ disabled: true
+ tags:
+ CamelCase: CamelCaseValue
+ snake_case: snake_case_value
+ "with space": Some value
+ purge_tags: false
+ register: create_complex
+
+ - name: 'Check result - Create Complex health check'
+ assert:
+ that:
+ - create_complex is successful
+ - create_complex is changed
+ - '"health_check" in create_complex'
+ - '"id" in _health_check'
+ - _health_check.id != tcp_check_id
+ - _health_check.id != match_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - '"CamelCase" in _health_check.tags'
+ - _health_check.tags['CamelCase'] == 'CamelCaseValue'
+ - '"snake_case" in _health_check.tags'
+ - _health_check.tags['snake_case'] == 'snake_case_value'
+ - '"with space" in _health_check.tags'
+ - _health_check.tags['with space'] == 'Some value'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" in _check_config'
+ - '"search_string" in _check_config'
+ - _check_config.disabled == true
+ - _check_config.type == type_http_match
+ - _check_config.request_interval == request_interval
+ - _check_config.failure_threshold == failure_threshold
+ - _check_config.fully_qualified_domain_name == fqdn
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ - _check_config.resource_path == resource_path
+ - _check_config.search_string == string_match
+ vars:
+ _health_check: '{{ create_complex.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - set_fact:
+ complex_check_id: '{{ create_complex.health_check.id }}'
+
+ - name: 'Create Complex health check - idempotency - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ string_match: '{{ string_match }}'
+ resource_path: '{{ resource_path }}'
+ failure_threshold: '{{ failure_threshold }}'
+ disabled: true
+ tags:
+ CamelCase: CamelCaseValue
+ snake_case: snake_case_value
+ "with space": Some value
+ purge_tags: false
+ register: create_complex
+ check_mode: true
+
+ - name: 'Check result - Create Complex health check - idempotency - check_mode'
+ assert:
+ that:
+ - create_complex is successful
+ - create_complex is not changed
+
+ - name: 'Create Complex health check - idempotency'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ string_match: '{{ string_match }}'
+ resource_path: '{{ resource_path }}'
+ failure_threshold: '{{ failure_threshold }}'
+ disabled: true
+ tags:
+ CamelCase: CamelCaseValue
+ snake_case: snake_case_value
+ "with space": Some value
+ purge_tags: false
+ register: create_complex
+
+ - name: 'Check result - Create Complex health check - idempotency'
+ assert:
+ that:
+ - create_complex is successful
+ - create_complex is not changed
+ - '"health_check" in create_complex'
+ - '"id" in _health_check'
+ - _health_check.id == complex_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - '"CamelCase" in _health_check.tags'
+ - _health_check.tags['CamelCase'] == 'CamelCaseValue'
+ - '"snake_case" in _health_check.tags'
+ - _health_check.tags['snake_case'] == 'snake_case_value'
+ - '"with space" in _health_check.tags'
+ - _health_check.tags['with space'] == 'Some value'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" in _check_config'
+ - '"search_string" in _check_config'
+ - _check_config.disabled == true
+ - _check_config.type == type_http_match
+ - _check_config.request_interval == request_interval
+ - _check_config.failure_threshold == failure_threshold
+ - _check_config.fully_qualified_domain_name == fqdn
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ - _check_config.resource_path == resource_path
+ - _check_config.search_string == string_match
+ vars:
+ _health_check: '{{ create_complex.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - name: 'Update Complex health check - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ string_match: '{{ string_match_updated }}'
+ resource_path: '{{ resource_path_updated }}'
+ failure_threshold: '{{ failure_threshold_updated }}'
+ register: update_complex
+ check_mode: true
+
+ - name: 'Check result - Update Complex health check - check_mode'
+ assert:
+ that:
+ - update_complex is successful
+ - update_complex is changed
+
+ - name: 'Update Complex health check'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ string_match: '{{ string_match_updated }}'
+ resource_path: '{{ resource_path_updated }}'
+ failure_threshold: '{{ failure_threshold_updated }}'
+ register: update_complex
+
+ - name: 'Check result - Update Complex health check'
+ assert:
+ that:
+ - update_complex is successful
+ - update_complex is changed
+ - '"health_check" in update_complex'
+ - '"id" in _health_check'
+ - _health_check.id == complex_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - '"CamelCase" in _health_check.tags'
+ - _health_check.tags['CamelCase'] == 'CamelCaseValue'
+ - '"snake_case" in _health_check.tags'
+ - _health_check.tags['snake_case'] == 'snake_case_value'
+ - '"with space" in _health_check.tags'
+ - _health_check.tags['with space'] == 'Some value'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" in _check_config'
+ - '"search_string" in _check_config'
+ - _check_config.disabled == true
+ - _check_config.type == type_http_match
+ - _check_config.request_interval == request_interval
+ - _check_config.failure_threshold == failure_threshold_updated
+ - _check_config.fully_qualified_domain_name == fqdn
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ - _check_config.resource_path == resource_path_updated
+ - _check_config.search_string == string_match_updated
+ vars:
+ _health_check: '{{ update_complex.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - name: 'Update Complex health check - idempotency - check_mode'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ string_match: '{{ string_match_updated }}'
+ resource_path: '{{ resource_path_updated }}'
+ failure_threshold: '{{ failure_threshold_updated }}'
+ register: update_complex
+ check_mode: true
+
+ - name: 'Check result - Update Complex health check - idempotency - check_mode'
+ assert:
+ that:
+ - update_complex is successful
+ - update_complex is not changed
+
+ - name: 'Update Complex health check - idempotency'
+ route53_health_check:
+ state: present
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ string_match: '{{ string_match_updated }}'
+ resource_path: '{{ resource_path_updated }}'
+ failure_threshold: '{{ failure_threshold_updated }}'
+ register: update_complex
+
+ - name: 'Check result - Update Complex health check - idempotency'
+ assert:
+ that:
+ - update_complex is successful
+ - update_complex is not changed
+ - '"health_check" in update_complex'
+ - '"id" in _health_check'
+ - _health_check.id == complex_check_id
+ - '"action" in _health_check'
+ - '"health_check_version" in _health_check'
+ - '"tags" in _health_check'
+ - '"CamelCase" in _health_check.tags'
+ - _health_check.tags['CamelCase'] == 'CamelCaseValue'
+ - '"snake_case" in _health_check.tags'
+ - _health_check.tags['snake_case'] == 'snake_case_value'
+ - '"with space" in _health_check.tags'
+ - _health_check.tags['with space'] == 'Some value'
+ - create_check.health_check.action is none
+ - '"health_check_config" in create_check.health_check'
+ - '"type" in _check_config'
+ - '"disabled" in _check_config'
+ - '"failure_threshold" in _check_config'
+ - '"request_interval" in _check_config'
+ - '"fully_qualified_domain_name" in _check_config'
+ - '"ip_address" in _check_config'
+ - '"port" in _check_config'
+ - '"resource_path" in _check_config'
+ - '"search_string" in _check_config'
+ - _check_config.disabled == true
+ - _check_config.type == type_http_match
+ - _check_config.request_interval == request_interval
+ - _check_config.failure_threshold == failure_threshold_updated
+ - _check_config.fully_qualified_domain_name == fqdn
+ - _check_config.ip_address == ip_address
+ - _check_config.port == port
+ - _check_config.resource_path == resource_path_updated
+ - _check_config.search_string == string_match_updated
+ vars:
+ _health_check: '{{ update_complex.health_check }}'
+ _check_config: '{{ _health_check.health_check_config }}'
+
+ - name: 'Delete Complex health check - check_mode'
+ route53_health_check:
+ state: absent
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ register: delete_complex
+ check_mode: true
+
+ - name: 'Check result - Delete Complex health check - check_mode'
+ assert:
+ that:
+ - delete_complex is successful
+ - delete_complex is changed
+
+ - name: 'Delete Complex health check'
+ route53_health_check:
+ state: absent
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ register: delete_complex
+
+ - name: 'Check result - Delete Complex health check'
+ assert:
+ that:
+ - delete_complex is successful
+ - delete_complex is changed
+
+ - name: 'Delete Complex health check - idempotency - check_mode'
+ route53_health_check:
+ state: absent
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ register: delete_complex
+ check_mode: true
+
+ - name: 'Check result - Delete Complex health check - idempotency - check_mode'
+ assert:
+ that:
+ - delete_complex is successful
+ - delete_complex is not changed
+
+ - name: 'Delete Complex health check - idempotency'
+ route53_health_check:
+ state: absent
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ register: delete_complex
+
+ - name: 'Check result - Delete Complex health check - idempotency'
+ assert:
+ that:
+ - delete_complex is successful
+ - delete_complex is not changed
+
+ # Minimum possible definition
+ - name: 'Create a TCP health check with latency graphs enabled'
+ route53_health_check:
+ state: present
+ health_check_name: '{{ tiny_prefix }}-hc-latency-graph'
+ use_unique_names: true
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ measure_latency: true
+ register: create_check
+
+ - name: Get health check info
+ amazon.aws.route53_info:
+ query: health_check
+ health_check_id: "{{ create_check.health_check.id }}"
+ health_check_method: details
+ register: health_check_info
+
+ - name: 'Check result - Create a TCP health check with latency graphs enabled'
+ assert:
+ that:
+ - create_check is successful
+ - create_check is changed
+ - health_check_info.health_check.health_check_config.measure_latency == true
+
+ - pause:
+ seconds: 20
+
+ # test route53_info for health_check_method=status
+ - name: Get health check status
+ amazon.aws.route53_info:
+ query: health_check
+ health_check_id: "{{ create_check.health_check.id }}"
+ health_check_method: status
+ register: health_check_status_info
+
+ - assert:
+ that:
+ - health_check_status_info is not failed
+ - '"health_check_observations" in health_check_status_info'
+
+ # test route53_info for health_check_method=failure_reason
+ - name: Get health check failure_reason
+ amazon.aws.route53_info:
+ query: health_check
+ health_check_id: "{{ create_check.health_check.id }}"
+ health_check_method: failure_reason
+ register: health_check_failure_reason_info
+
+ - assert:
+ that:
+ - health_check_failure_reason_info is not failed
+ - '"health_check_observations" in health_check_failure_reason_info'
+
+
+ - name: 'Update above health check to disable latency graphs - immutable, no change'
+ route53_health_check:
+ state: present
+ health_check_name: '{{ tiny_prefix }}-hc-latency-graph'
+ use_unique_names: true
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ measure_latency: false
+ register: update_check
+
+ - name: 'Check result - Update TCP health check to disable latency graphs'
+ assert:
+ that:
+ - update_check is successful
+ - update_check is not changed
+ - health_check_info.health_check.health_check_config.measure_latency == true
+
+ always:
+
+ ################################################
+ # TEARDOWN STARTS HERE
+ ################################################
+
+ - name: 'Delete TCP health check with latency graphs enabled'
+ route53_health_check:
+ state: absent
+ health_check_name: '{{ tiny_prefix }}-hc-latency-graph'
+ use_unique_names: true
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ measure_latency: true
+ ignore_errors: true
+
+ - name: 'Delete TCP health check'
+ route53_health_check:
+ state: absent
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type }}'
+ ignore_errors: true
+
+ - name: 'Delete HTTPS health check'
+ route53_health_check:
+ state: absent
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_https_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ ignore_errors: true
+
+ - name: 'Delete Complex health check'
+ route53_health_check:
+ state: absent
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http_match }}'
+ fqdn: '{{ fqdn }}'
+ request_interval: '{{ request_interval }}'
+ ignore_errors: true
+
+ - name: release EIP
+ ec2_eip:
+ state: absent
+ public_ip: '{{ ip_address }}'
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml
new file mode 100644
index 000000000..e4d242a20
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml
@@ -0,0 +1,303 @@
+---
+- block:
+ - name: 'Create HTTP health check for use in this test'
+ route53_health_check:
+ state: present
+ name: '{{ tiny_prefix }}-test-update-delete-by-id'
+ ip_address: '{{ ip_address }}'
+ port: '{{ port }}'
+ type: '{{ type_http }}'
+ resource_path: '{{ resource_path }}'
+ fqdn: '{{ fqdn }}'
+ use_unique_names: true
+ register: create_result
+
+ - name: 'Check result - Create HTTP health check'
+ assert:
+ that:
+ - create_result is not failed
+ - create_result is changed
+ - '"route53:CreateHealthCheck" in create_result.resource_actions'
+
+ - name: Get ID for health_checks created in above task
+ set_fact:
+ health_check_id: "{{ create_result.health_check.id }}"
+
+ - name: Get health_check info
+ amazon.aws.route53_info:
+ query: health_check
+ health_check_id: "{{ health_check_id }}"
+ health_check_method: details
+ register: health_check_info
+
+ # Update Health Check by ID Tests
+ - name: 'Update Health Check by ID - Update Port - check_mode'
+ route53_health_check:
+ id: "{{ health_check_id }}"
+ port: 8888
+ register: update_result
+ check_mode: true
+
+ - name: 'Check result - Update Health Check Port - check_mode'
+ assert:
+ that:
+ - update_result is not failed
+ - update_result is changed
+ - '"route53:UpdateHealthCheck" not in update_result.resource_actions'
+
+ - name: 'Update Health Check by ID - Update Port'
+ route53_health_check:
+ id: "{{ health_check_id }}"
+ port: 8888
+ register: update_result
+
+ - name: Get health_check info
+ amazon.aws.route53_info:
+ query: health_check
+ health_check_id: "{{ health_check_id }}"
+ health_check_method: details
+ register: health_check_info
+
+ - name: 'Check result - Update Health Check Port'
+ assert:
+ that:
+ - update_result is not failed
+ - update_result is changed
+ - health_check_info.HealthCheck.HealthCheckConfig.Port == 8888
+
+
+ - name: 'Update Health Check by ID - Update Port - idempotency - check_mode'
+ route53_health_check:
+ id: "{{ health_check_id }}"
+ port: 8888
+ register: update_result
+ check_mode: true
+
+ - name: 'Check result - Update Health Check Port - idempotency - check_mode'
+ assert:
+ that:
+ - update_result is not failed
+ - update_result is not changed
+ - '"route53:UpdateHealthCheck" not in update_result.resource_actions'
+
+ - name: 'Update Health Check by ID - Update Port - idempotency'
+ route53_health_check:
+ id: "{{ health_check_id }}"
+ port: 8888
+ register: update_result
+
+ - name: 'Check result - Update Health Check Port - idempotency'
+ assert:
+ that:
+ - update_result is not failed
+ - update_result is not changed
+ - '"route53:UpdateHealthCheck" not in update_result.resource_actions'
+
+ ##
+ - name: 'Update Health Check by ID - Update IP address and FQDN - check_mode'
+ route53_health_check:
+ id: "{{ health_check_id }}"
+ ip_address: 1.2.3.4
+ fqdn: '{{ fqdn_1 }}'
+ register: update_result
+ check_mode: true
+
+ - name: 'Check result - Update Health Check IP address and FQDN - check_mode'
+ assert:
+ that:
+ - update_result is not failed
+ - update_result is changed
+ - '"route53:UpdateHealthCheck" not in update_result.resource_actions'
+
+ - name: 'Update Health Check by ID - Update IP address and FQDN'
+ route53_health_check:
+ id: "{{ health_check_id }}"
+ ip_address: 1.2.3.4
+ fqdn: '{{ fqdn_1 }}'
+ register: update_result
+
+ - name: Get health_check info
+ amazon.aws.route53_info:
+ query: health_check
+ health_check_id: "{{ health_check_id }}"
+ health_check_method: details
+ register: health_check_info
+
+ - name: 'Check result - Update Health Check IP address and FQDN'
+ assert:
+ that:
+ - update_result is not failed
+ - update_result is changed
+ - health_check_info.HealthCheck.HealthCheckConfig.IPAddress == '1.2.3.4'
+ - health_check_info.HealthCheck.HealthCheckConfig.FullyQualifiedDomainName == "{{ fqdn_1 }}"
+
+
+ - name: 'Update Health Check by ID - Update IP address and FQDN - idempotency - check_mode'
+ route53_health_check:
+ id: "{{ health_check_id }}"
+ ip_address: 1.2.3.4
+ fqdn: '{{ fqdn_1 }}'
+ register: update_result
+ check_mode: true
+
+ - name: 'Check result - Update Health Check IP address and FQDN - idempotency - check_mode'
+ assert:
+ that:
+ - update_result is not failed
+ - update_result is not changed
+ - '"route53:UpdateHealthCheck" not in update_result.resource_actions'
+
+ - name: 'Update Health Check by ID - Update IP address and FQDN - idempotency'
+ route53_health_check:
+ id: "{{ health_check_id }}"
+ ip_address: 1.2.3.4
+ fqdn: '{{ fqdn_1 }}'
+ register: update_result
+
+ - name: 'Check result - Update Health Check IP address and FQDN - idempotency'
+ assert:
+ that:
+ - update_result is not failed
+ - update_result is not changed
+ - '"route53:UpdateHealthCheck" not in update_result.resource_actions'
+
+ # Update Health Check (Port) by name
+
+ - name: 'Update Health Check by name - Update Port - check_mode'
+ route53_health_check:
+ state: present
+ port: 8080
+ type: '{{ type_http }}'
+ fqdn: '{{ fqdn }}'
+ health_check_name: '{{ tiny_prefix }}-test-update-delete-by-id'
+ use_unique_names: true
+ register: update_result
+ check_mode: true
+
+ - name: 'Check result - Update Health Check Port - check_mode'
+ assert:
+ that:
+ - update_result is not failed
+ - update_result is changed
+ - '"route53:UpdateHealthCheck" not in update_result.resource_actions'
+
+ - name: 'Update Health Check by name - Update Port'
+ route53_health_check:
+ state: present
+ port: 8080
+ type: '{{ type_http }}'
+ fqdn: '{{ fqdn }}'
+ health_check_name: '{{ tiny_prefix }}-test-update-delete-by-id'
+ use_unique_names: true
+ register: update_result
+
+ - name: Get health_check info
+ amazon.aws.route53_info:
+ query: health_check
+ health_check_id: "{{ health_check_id }}"
+ health_check_method: details
+ register: health_check_info
+
+ - name: 'Check result - Update Health Check Port'
+ assert:
+ that:
+ - update_result is not failed
+ - update_result is changed
+ - health_check_info.HealthCheck.HealthCheckConfig.Port == 8080
+
+ - name: 'Update Health Check by name - Update Port - idempotency - check_mode'
+ route53_health_check:
+ state: present
+ port: 8080
+ type: '{{ type_http }}'
+ fqdn: '{{ fqdn }}'
+ health_check_name: '{{ tiny_prefix }}-test-update-delete-by-id'
+ use_unique_names: true
+ register: update_result
+ check_mode: true
+
+ - name: 'Check result - Update Health Check Port - idempotency - check_mode'
+ assert:
+ that:
+ - update_result is not failed
+ - update_result is not changed
+ - '"route53:UpdateHealthCheck" not in update_result.resource_actions'
+
+ - name: 'Update Health Check by name - Update Port - idempotency'
+ route53_health_check:
+ state: present
+ port: 8080
+ type: '{{ type_http }}'
+ fqdn: '{{ fqdn }}'
+ health_check_name: '{{ tiny_prefix }}-test-update-delete-by-id'
+ use_unique_names: true
+ register: update_result
+
+ - name: 'Check result - Update Health Check Port - idempotency'
+ assert:
+ that:
+ - update_result is not failed
+ - update_result is not changed
+ - '"route53:UpdateHealthCheck" not in update_result.resource_actions'
+
+ # Delete Health Check by ID Tests
+ - name: Delete Health check by ID - check_mode
+ route53_health_check:
+ state: absent
+ id: "{{ health_check_id }}"
+ register: delete_result
+ check_mode: true
+
+ - name: 'Check result - Delete Health Check by ID -check_mode'
+ assert:
+ that:
+ - delete_result is not failed
+ - delete_result is changed
+ - '"route53:DeleteHealthCheck" not in delete_result.resource_actions'
+
+ - name: Delete Health check by ID
+ route53_health_check:
+ state: absent
+ id: "{{ health_check_id }}"
+ register: delete_result
+
+ - name: 'Check result - Delete Health Check by ID'
+ assert:
+ that:
+ - delete_result is not failed
+ - delete_result is changed
+ - '"route53:DeleteHealthCheck" in delete_result.resource_actions'
+
+ - name: Delete Health check by ID - idempotency - check_mode
+ route53_health_check:
+ state: absent
+ id: "{{ health_check_id }}"
+ register: delete_result
+ check_mode: true
+
+ - name: 'Check result - Delete Health Check by ID -idempotency -check_mode'
+ assert:
+ that:
+ - delete_result is not failed
+ - delete_result is not changed
+ - '"route53:DeleteHealthCheck" not in delete_result.resource_actions'
+
+ - name: Delete Health check by ID - idempotency
+ route53_health_check:
+ state: absent
+ id: "{{ health_check_id }}"
+ register: delete_result
+
+ - name: 'Check result - Delete Health Check by ID -idempotency'
+ assert:
+ that:
+ - delete_result is not failed
+ - delete_result is not changed
+ - '"route53:DeleteHealthCheck" not in delete_result.resource_actions'
+
+ # cleanup
+ always:
+ - name: Delete Health check by ID
+ route53_health_check:
+ state: absent
+ id: "{{ health_check_id }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/aliases b/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/tasks/main.yml
new file mode 100644
index 000000000..4aea981d0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/tasks/main.yml
@@ -0,0 +1,617 @@
+---
+- name: 'route53_zone integration tests'
+ collections:
+ - amazon.aws
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ # ============================================================
+
+ - name: Create VPC for use in testing
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: 10.22.32.0/23
+ tags:
+ Name: Ansible ec2_instance Testing VPC
+ tenancy: default
+ register: testing_vpc
+
+ # ============================================================
+ - name: Create a public zone
+ route53_zone:
+ zone: "{{ resource_prefix }}.public"
+ comment: original comment
+ state: present
+ tags:
+ TestTag: "{{ resource_prefix }}"
+ another_tag: "{{ resource_prefix }} again"
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.comment == 'original comment'
+ - output.name == '{{ resource_prefix }}.public.'
+ - output.tags.TestTag == '{{ resource_prefix }}'
+ - output.tags.another_tag == '{{ resource_prefix }} again'
+ - not output.private_zone
+
+ # ============================================================
+ - name: Create a public zone (CHECK MODE)
+ route53_zone:
+ zone: "{{ resource_prefix }}.check.public"
+ comment: original comment
+ state: present
+ tags:
+ TestTag: "{{ resource_prefix }}"
+ another_tag: "{{ resource_prefix }} again"
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+ - output.comment == 'original comment'
+ - output.name == '{{ resource_prefix }}.check.public.'
+ - output.tags.TestTag == '{{ resource_prefix }}'
+ - output.tags.another_tag == '{{ resource_prefix }} again'
+ - not output.private_zone
+
+ # ============================================================
+ - name: Do an idemptotent update of a public zone
+ route53_zone:
+ zone: "{{ resource_prefix }}.public"
+ comment: original comment
+ state: present
+ tags:
+ TestTag: "{{ resource_prefix }}"
+ another_tag: "{{ resource_prefix }} again"
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - output.comment == 'original comment'
+ - output.name == '{{ resource_prefix }}.public.'
+ - output.tags.TestTag == '{{ resource_prefix }}'
+ - output.tags.another_tag == '{{ resource_prefix }} again'
+ - not output.private_zone
+
+ - name: Do an idemptotent update of a public zone (CHECK MODE)
+ route53_zone:
+ zone: "{{ resource_prefix }}.public"
+ comment: original comment
+ state: present
+ tags:
+ TestTag: "{{ resource_prefix }}"
+ another_tag: "{{ resource_prefix }} again"
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - not output.changed
+ - output.comment == 'original comment'
+ - output.name == '{{ resource_prefix }}.public.'
+ - output.tags.TestTag == '{{ resource_prefix }}'
+ - output.tags.another_tag == '{{ resource_prefix }} again'
+ - not output.private_zone
+
+ # ============================================================
+ - name: Modify tags on a public zone
+ route53_zone:
+ zone: "{{ resource_prefix }}.public"
+ comment: original comment
+ state: present
+ tags:
+ AnotherTag: "{{ resource_prefix }}.anothertag"
+ purge_tags: true
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - "'TestTag' not in output.tags"
+ - output.tags.AnotherTag == '{{ resource_prefix }}.anothertag'
+
+ # ============================================================
+ - name: Update comment and remove tags of a public zone
+ route53_zone:
+ zone: "{{ resource_prefix }}.public"
+ comment: updated comment
+ state: present
+ purge_tags: true
+ tags: {}
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.result.comment == "updated comment"
+ - not output.tags
+
+ - name: Update comment and remove tags of a public zone (CHECK MODE)
+ route53_zone:
+ zone: "{{ resource_prefix }}.public"
+ comment: updated comment for check
+ state: present
+ purge_tags: true
+ tags: {}
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+ - output.result.comment == "updated comment for check"
+ - not output.tags
+
+ # ============================================================
+ - name: Delete public zone (CHECK MODE)
+ route53_zone:
+ zone: "{{ resource_prefix }}.public"
+ state: absent
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+ - "'Successfully deleted' in output.result"
+
+ - name: Delete public zone
+ route53_zone:
+ zone: "{{ resource_prefix }}.public"
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - "'Successfully deleted' in output.result"
+
+ # ============================================================
+ - name: Create a private zone (CHECK MODE)
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: original comment
+ state: present
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+
+ - name: Create a private zone
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: original comment
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ # ============================================================
+ - name: Idemptotent update a private zone
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: original comment
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - "'There is already a private hosted zone in the same region with the same VPC' in output.msg"
+
+ - name: Idemptotent update a private zone (CHECK MODE)
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: original comment
+ state: present
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - not output.changed
+ - "'There is already a private hosted zone in the same region with the same VPC' in output.msg"
+
+ # ============================================================
+ - name: Update private zone comment
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: updated_comment
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.result.comment == "updated_comment"
+
+ - name: Update private zone comment (CHECK MODE)
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: updated_comment check
+ state: present
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+ - output.result.comment == "updated_comment check"
+
+ # ============================================================
+ - name: Try to delete private zone without setting vpc_id and vpc_region
+ route53_zone:
+ zone: "{{ resource_prefix }}.private"
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - "output.result == 'No zone to delete.'"
+
+ - name: Try to delete private zone without setting vpc_id and vpc_region (CHECK MODE)
+ route53_zone:
+ zone: "{{ resource_prefix }}.private"
+ state: absent
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - not output.changed
+ - "output.result == 'No zone to delete.'"
+
+ # ============================================================
+ - name: Try to delete a public zone that does not exists
+ route53_zone:
+ zone: "{{ resource_prefix }}.publicfake"
+ comment: original comment
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - "output.result == 'No zone to delete.'"
+
+ - name: Try to delete a public zone that does not exists (CHECK MODE)
+ route53_zone:
+ zone: "{{ resource_prefix }}.publicfake"
+ comment: original comment
+ state: absent
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - not output.changed
+ - "output.result == 'No zone to delete.'"
+
+ # ============================================================
+ - name: Delete private zone (CHECK MODE)
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ state: absent
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+ - "'Successfully deleted' in output.result"
+
+ - name: Delete private zone
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - "'Successfully deleted' in output.result"
+
+ # ============================================================
+ - name: Create a private zone (new format) (CHECK MODE)
+ route53_zone:
+ vpcs:
+ - id: "{{ testing_vpc.vpc.id }}"
+ region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: original comment
+ state: present
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+
+ - name: Create a private zone (new format)
+ route53_zone:
+ vpcs:
+ - id: "{{ testing_vpc.vpc.id }}"
+ region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: original comment
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ # ============================================================
+ - name: Idemptotent update a private zone (new format) (CHECK MODE)
+ route53_zone:
+ vpcs:
+ - id: "{{ testing_vpc.vpc.id }}"
+ region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: original comment
+ state: present
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - not output.changed
+ - "'There is already a private hosted zone in the same region with the same VPC' in output.msg"
+
+ - name: Idemptotent update a private zone (new format)
+ route53_zone:
+ vpcs:
+ - id: "{{ testing_vpc.vpc.id }}"
+ region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: original comment
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - "'There is already a private hosted zone in the same region with the same VPC' in output.msg"
+
+ # ============================================================
+ - name: Update a private zone comment (new format) (CHECK MODE)
+ route53_zone:
+ vpcs:
+ - id: "{{ testing_vpc.vpc.id }}"
+ region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: new comment
+ state: present
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+
+ - name: Update a private zone comment (new format)
+ route53_zone:
+ vpcs:
+ - id: "{{ testing_vpc.vpc.id }}"
+ region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: new comment
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ # ============================================================
+ - name: Delete private zone (new format) (CHECK MODE)
+ route53_zone:
+ vpcs:
+ - id: "{{ testing_vpc.vpc.id }}"
+ region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ state: absent
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+ - "'Successfully deleted' in output.result"
+
+ - name: Delete private zone (new format)
+ route53_zone:
+ vpcs:
+ - id: "{{ testing_vpc.vpc.id }}"
+ region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ state: absent
+ register: output
+
+ # ============================================================
+ - block:
+ - name: Create second VPC for use in testing
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc2"
+ cidr_block: 10.22.34.0/23
+ tags:
+ Name: Ansible ec2_instance Testing VPC 2
+ tenancy: default
+ register: second_testing_vpc
+
+ - name: Create a private zone with multiple VPCs (CHECK MODE)
+ route53_zone:
+ vpcs:
+ - id: "{{ testing_vpc.vpc.id }}"
+ region: "{{ aws_region }}"
+ - id: "{{ second_testing_vpc.vpc.id }}"
+ region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: original comment
+ state: present
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+
+ - name: Create a private zone with multiple VPCs
+ route53_zone:
+ vpcs:
+ - id: "{{ testing_vpc.vpc.id }}"
+ region: "{{ aws_region }}"
+ - id: "{{ second_testing_vpc.vpc.id }}"
+ region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ comment: original comment
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.vpc_id == testing_vpc.vpc.id # The first one for backwards compatibility
+ - output.vpc_region == aws_region
+ - (output.vpcs | length) == 2
+ - output.vpcs.1.id == second_testing_vpc.vpc.id
+ - output.vpcs.1.region == aws_region
+
+ # ============================================================
+ - name: Delete private zone with multiple VPCs (CHECK MODE)
+ route53_zone:
+ vpcs:
+ - id: "{{ testing_vpc.vpc.id }}"
+ region: "{{ aws_region }}"
+ - id: "{{ second_testing_vpc.vpc.id }}"
+ region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ state: absent
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+ - "'Successfully deleted' in output.result"
+
+ - name: Delete private zone with multiple VPCs
+ route53_zone:
+ vpcs:
+ - id: "{{ testing_vpc.vpc.id }}"
+ region: "{{ aws_region }}"
+ - id: "{{ second_testing_vpc.vpc.id }}"
+ region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - "'Successfully deleted' in output.result"
+
+ always:
+ - name: Delete second VPC for use in testing
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc2"
+ cidr_block: 10.22.34.0/23
+ state: absent
+
+ # ============================================================
+ - name: Create a public zone
+ route53_zone:
+ zone: "{{ resource_prefix }}.public2"
+ comment: this is an example
+ state: present
+ register: new_zone
+
+ # Delete zone using its id
+ - name: Delete zone using attribute hosted_zone_id (CHECK MODE)
+ route53_zone:
+ zone: "{{ resource_prefix }}.public2"
+ hosted_zone_id: "{{new_zone.zone_id}}"
+ state: absent
+ register: output
+ check_mode: yes
+
+ - assert:
+ that:
+ - output.changed
+ - "'Successfully deleted' in output.result"
+
+ - name: Delete zone using attribute hosted_zone_id
+ route53_zone:
+ zone: "{{ resource_prefix }}.public2"
+ hosted_zone_id: "{{new_zone.zone_id}}"
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - "'Successfully deleted' in output.result"
+
+ # ============================================================
+ always:
+ - name: Ensure public zone is deleted
+ route53_zone:
+ zone: "{{ item }}"
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+ with_items:
+ - "{{ resource_prefix }}.public"
+ - "{{ resource_prefix }}.public2"
+
+ - name: Ensure private zone is deleted
+ route53_zone:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ vpc_region: "{{ aws_region }}"
+ zone: "{{ resource_prefix }}.private"
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: remove the VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: 10.22.32.0/23
+ state: absent
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/aliases b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/aliases
new file mode 100644
index 000000000..4ef4b2067
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/inventory b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/inventory
new file mode 100644
index 000000000..db31e4a9b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/inventory
@@ -0,0 +1,17 @@
+[tests]
+ownership_controls
+missing
+simple
+complex
+dotted
+tags
+encryption_kms
+encryption_bucket_key
+encryption_sse
+public_access
+acl
+object_lock
+
+[all:vars]
+ansible_connection=local
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/main.yml
new file mode 100644
index 000000000..22fc0d64f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/main.yml
@@ -0,0 +1,12 @@
+---
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/s3_bucket/tasks/
+
+# VPC should get cleaned up once all hosts have run
+- hosts: all
+ gather_facts: no
+ strategy: free
+ #serial: 10
+ roles:
+ - s3_bucket
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/meta/main.yml
new file mode 100644
index 000000000..67c81ac7f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - role: setup_botocore_pip
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml
new file mode 100644
index 000000000..ef5c13907
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+bucket_name: '{{ resource_prefix }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml
new file mode 100644
index 000000000..f924af173
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml
@@ -0,0 +1,68 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - set_fact:
+ local_bucket_name: "{{ bucket_name | hash('md5')}}acl"
+
+ - name: 'Create a simple bucket'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ object_ownership: BucketOwnerPreferred
+ public_access:
+ block_public_acls: true
+ block_public_policy: true
+ ignore_public_acls: true
+ restrict_public_buckets: true
+
+ - name: 'Update bucket ACL, new value = private'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ acl: private
+ state: present
+ register: private_acl
+
+ - assert:
+ that:
+ - private_acl.changed
+
+ - name: 'Update bucket ACL, new value = public-read'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ acl: public-read
+ state: present
+ ignore_errors: true
+ register: public_read_acl
+
+ - assert:
+ that:
+ - public_read_acl is failed
+
+ - name: 'Update bucket ACL, new value = public-read'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ acl: public-read
+ state: present
+ public_access:
+ block_public_acls: false
+ block_public_policy: true
+ ignore_public_acls: true
+ restrict_public_buckets: true
+ ignore_errors: true
+ register: public_read_acl
+
+ - assert:
+ that:
+ - public_read_acl.changed
+
+ # ============================================================
+ always:
+ - name: Ensure all buckets are deleted
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml
new file mode 100644
index 000000000..8b8a8bdca
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml
@@ -0,0 +1,150 @@
+---
+- block:
+ - set_fact:
+ local_bucket_name: "{{ bucket_name | hash('md5')}}complex"
+ - name: 'Create more complex s3_bucket'
+ s3_bucket:
+ name: "{{ local_bucket_name }}"
+ state: present
+ policy: "{{ lookup('template','policy.json') }}"
+ requester_pays: yes
+ versioning: yes
+ public_access:
+ block_public_acls: false
+ tags:
+ example: tag1
+ another: tag2
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ - output.name == '{{ local_bucket_name }}'
+ - output.requester_pays
+ - output.versioning.MfaDelete == 'Disabled'
+ - output.versioning.Versioning == 'Enabled'
+ - output.tags.example == 'tag1'
+ - output.tags.another == 'tag2'
+ - output.policy.Statement[0].Action == 's3:GetObject'
+ - output.policy.Statement[0].Effect == 'Allow'
+ - output.policy.Statement[0].Principal == '*'
+ - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ local_bucket_name }}/*'
+ - output.policy.Statement[0].Sid == 'AddPerm'
+
+ # ============================================================
+
+ - name: 'Pause to help with s3 bucket eventual consistency'
+ wait_for:
+ timeout: 10
+ delegate_to: localhost
+
+ - name: 'Try to update the same complex s3_bucket'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ policy: "{{ lookup('template','policy.json') }}"
+ requester_pays: yes
+ versioning: yes
+ tags:
+ example: tag1
+ another: tag2
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - output.name == '{{ local_bucket_name }}'
+ - output.requester_pays
+ - output.versioning.MfaDelete == 'Disabled'
+ - output.versioning.Versioning == 'Enabled'
+ - output.tags.example == 'tag1'
+ - output.tags.another == 'tag2'
+ - output.policy.Statement[0].Action == 's3:GetObject'
+ - output.policy.Statement[0].Effect == 'Allow'
+ - output.policy.Statement[0].Principal == '*'
+ - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ local_bucket_name }}/*'
+ - output.policy.Statement[0].Sid == 'AddPerm'
+
+ # ============================================================
+ - name: 'Update bucket policy on complex bucket'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ policy: "{{ lookup('template','policy-updated.json') }}"
+ requester_pays: yes
+ versioning: yes
+ tags:
+ example: tag1
+ another: tag2
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ - output.policy.Statement[0].Action == 's3:GetObject'
+ - output.policy.Statement[0].Effect == 'Deny'
+ - output.policy.Statement[0].Principal.AWS == '*'
+ - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ local_bucket_name }}/*'
+ - output.policy.Statement[0].Sid == 'AddPerm'
+
+ # ============================================================
+
+ - name: 'Pause to help with s3 bucket eventual consistency'
+ wait_for:
+ timeout: 10
+ delegate_to: localhost
+
+ - name: Update attributes for s3_bucket
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ policy: "{{ lookup('template','policy.json') }}"
+ requester_pays: no
+ versioning: no
+ tags:
+ example: tag1-udpated
+ another: tag2
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ - output.name == '{{ local_bucket_name }}'
+ - not output.requester_pays
+ - output.versioning.MfaDelete == 'Disabled'
+ - output.versioning.Versioning in ['Suspended', 'Disabled']
+ - output.tags.example == 'tag1-udpated'
+ - output.tags.another == 'tag2'
+ - output.policy.Statement[0].Action == 's3:GetObject'
+ - output.policy.Statement[0].Effect == 'Allow'
+ - output.policy.Statement[0].Principal == '*'
+ - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ local_bucket_name }}/*'
+ - output.policy.Statement[0].Sid == 'AddPerm'
+
+ - name: 'Delete complex test bucket'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Re-delete complex test bucket'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+
+ # ============================================================
+ always:
+ - name: 'Ensure all buckets are deleted'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml
new file mode 100644
index 000000000..1461b51bc
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml
@@ -0,0 +1,55 @@
+---
+- block:
+ - name: 'Ensure bucket_name contains a .'
+ set_fact:
+ local_bucket_name: "{{ bucket_name | hash('md5')}}.dotted"
+
+
+ # ============================================================
+ #
+ - name: 'Create bucket with dot in name'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ - output.name == '{{ local_bucket_name }}'
+
+
+ # ============================================================
+
+ - name: 'Pause to help with s3 bucket eventual consistency'
+ wait_for:
+ timeout: 10
+ delegate_to: localhost
+
+ - name: 'Delete s3_bucket with dot in name'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Re-delete s3_bucket with dot in name'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+
+ # ============================================================
+ always:
+ - name: 'Ensure all buckets are deleted'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_bucket_key.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_bucket_key.yml
new file mode 100644
index 000000000..66a54c1e0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_bucket_key.yml
@@ -0,0 +1,103 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: Set facts for encryption_bucket_key test
+ set_fact:
+ local_bucket_name: "{{ bucket_name | hash('md5') }}-bucket-key"
+ # ============================================================
+
+ - name: "Create a simple bucket"
+ s3_bucket:
+ name: "{{ local_bucket_name }}"
+ state: present
+ register: output
+
+ - name: "Enable aws:kms encryption with KMS master key"
+ s3_bucket:
+ name: "{{ local_bucket_name }}"
+ state: present
+ encryption: "aws:kms"
+ register: output
+
+ - name: "Enable bucket key for bucket with aws:kms encryption"
+ s3_bucket:
+ name: "{{ local_bucket_name }}"
+ state: present
+ encryption: "aws:kms"
+ bucket_key_enabled: true
+ register: output
+
+ - name: "Assert for 'Enable bucket key for bucket with aws:kms encryption'"
+ assert:
+ that:
+ - output.changed
+ - output.encryption
+
+ - name: "Re-enable bucket key for bucket with aws:kms encryption (idempotent)"
+ s3_bucket:
+ name: "{{ local_bucket_name }}"
+ encryption: "aws:kms"
+ bucket_key_enabled: true
+ register: output
+
+ - name: "Assert for 'Re-enable bucket key for bucket with aws:kms encryption (idempotent)'"
+ assert:
+ that:
+ - not output.changed
+ - output.encryption
+
+ ## # ============================================================
+ ##
+ ## AWS S3 no longer supports disabling S3 encryption
+ ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/default-encryption-faq.html
+ ##
+ ## - name: Disable encryption from bucket
+ ## s3_bucket:
+ ## name: "{{ local_bucket_name }}"
+ ## encryption: none
+ ## bucket_key_enabled: false
+ ## register: output
+ ##
+ ## - name: Assert for 'Disable encryption from bucket'
+ ## assert:
+ ## that:
+ ## - output.changed
+ ## - not output.encryption
+ ##
+ ## - name: Disable encryption from bucket (idempotent)
+ ## s3_bucket:
+ ## name: "{{ local_bucket_name }}"
+ ## bucket_key_enabled: true
+ ## register: output
+ ##
+ ## - name: Assert for 'Disable encryption from bucket (idempotent)'
+ ## assert:
+ ## that:
+ ## - output is not changed
+ ## - not output.encryption
+ ##
+ ## # ============================================================
+
+ - name: Delete encryption test s3 bucket
+ s3_bucket:
+ name: "{{ local_bucket_name }}"
+ state: absent
+ register: output
+
+ - name: Assert for 'Delete encryption test s3 bucket'
+ assert:
+ that:
+ - output.changed
+
+ # ============================================================
+ always:
+ - name: Ensure all buckets are deleted
+ s3_bucket:
+ name: "{{ local_bucket_name }}"
+ state: absent
+ failed_when: false
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml
new file mode 100644
index 000000000..75cdb4c6f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml
@@ -0,0 +1,92 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - set_fact:
+ local_bucket_name: "{{ bucket_name | hash('md5')}}e-kms"
+ # ============================================================
+
+ - name: 'Create a simple bucket'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ register: output
+
+ - name: 'Enable aws:kms encryption with KMS master key'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ encryption: "aws:kms"
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.encryption
+ - output.encryption.SSEAlgorithm == 'aws:kms'
+
+ - name: 'Re-enable aws:kms encryption with KMS master key (idempotent)'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ encryption: "aws:kms"
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - output.encryption
+ - output.encryption.SSEAlgorithm == 'aws:kms'
+
+ ## # ============================================================
+ ##
+ ## AWS S3 no longer supports disabling S3 encryption
+ ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/default-encryption-faq.html
+ ##
+ ## - name: Disable encryption from bucket
+ ## s3_bucket:
+ ## name: '{{ local_bucket_name }}'
+ ## state: present
+ ## encryption: "none"
+ ## register: output
+ ##
+ ## - assert:
+ ## that:
+ ## - output.changed
+ ## - not output.encryption
+ ##
+ ## - name: Disable encryption from bucket
+ ## s3_bucket:
+ ## name: '{{ local_bucket_name }}'
+ ## state: present
+ ## encryption: "none"
+ ## register: output
+ ##
+ ## - assert:
+ ## that:
+ ## - output is not changed
+ ## - not output.encryption
+ ##
+ ## # ============================================================
+
+ - name: Delete encryption test s3 bucket
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ # ============================================================
+ always:
+ - name: Ensure all buckets are deleted
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml
new file mode 100644
index 000000000..60ee26009
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml
@@ -0,0 +1,93 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - set_fact:
+ local_bucket_name: "{{ bucket_name | hash('md5')}}e-sse"
+ # ============================================================
+
+ - name: 'Create a simple bucket'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ register: output
+
+ - name: 'Enable AES256 encryption'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ encryption: 'AES256'
+ register: output
+
+ - assert:
+ that:
+ # SSE is now enabled by default
+ # - output.changed
+ - output.encryption
+ - output.encryption.SSEAlgorithm == 'AES256'
+
+ - name: 'Re-enable AES256 encryption (idempotency)'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ encryption: 'AES256'
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - output.encryption
+ - output.encryption.SSEAlgorithm == 'AES256'
+
+ ## # ============================================================
+ ##
+ ## AWS S3 no longer supports disabling S3 encryption
+ ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/default-encryption-faq.html
+ ##
+ ## - name: Disable encryption from bucket
+ ## s3_bucket:
+ ## name: '{{ local_bucket_name }}'
+ ## state: present
+ ## encryption: "none"
+ ## register: output
+ ##
+ ## - assert:
+ ## that:
+ ## - output.changed
+ ## - not output.encryption
+ ##
+ ## - name: Disable encryption from bucket
+ ## s3_bucket:
+ ## name: '{{ local_bucket_name }}'
+ ## state: present
+ ## encryption: "none"
+ ## register: output
+ ##
+ ## - assert:
+ ## that:
+ ## - output is not changed
+ ## - not output.encryption
+ ##
+ ## # ============================================================
+
+ - name: Delete encryption test s3 bucket
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ # ============================================================
+ always:
+ - name: Ensure all buckets are deleted
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml
new file mode 100644
index 000000000..8eba03ba1
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml
@@ -0,0 +1,20 @@
+---
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/ec2_roles/tasks/
+#
+# ###############################################################################
+
+- name: "Wrap up all tests and setup AWS credentials"
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - debug:
+ msg: "{{ inventory_hostname }} start: {{ lookup('pipe','date') }}"
+ - include_tasks: '{{ inventory_hostname }}.yml'
+ - debug:
+ msg: "{{ inventory_hostname }} finish: {{ lookup('pipe','date') }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml
new file mode 100644
index 000000000..eaac3ea79
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml
@@ -0,0 +1,28 @@
+---
+- name: 'Attempt to delete non-existent buckets'
+ block:
+ - set_fact:
+ local_bucket_name: "{{ bucket_name | hash('md5')}}-missing"
+ # ============================================================
+ #
+ # While in theory the 'simple' test case covers this there are
+ # ways in which eventual-consistency could catch us out.
+ #
+ - name: 'Delete non-existstent s3_bucket (never created)'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output is success
+ - output is not changed
+
+ # ============================================================
+ always:
+ - name: 'Ensure all buckets are deleted'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/object_lock.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/object_lock.yml
new file mode 100644
index 000000000..9140a566b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/object_lock.yml
@@ -0,0 +1,131 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - set_fact:
+ local_bucket_name: "{{ bucket_name | hash('md5')}}-objectlock"
+
+ # ============================================================
+
+ - name: 'Create a simple bucket'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - not output.object_lock_enabled
+
+ - name: 'Re-disable object lock (idempotency)'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ object_lock_enabled: false
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - not output.object_lock_enabled
+
+ - name: 'Enable object lock'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ object_lock_enabled: true
+ register: output
+ ignore_errors: true
+
+ - assert:
+ that:
+ - output is failed
+
+ - name: Delete test s3 bucket
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ # ============================================================
+
+ - name: 'Create a bucket with object lock enabled'
+ s3_bucket:
+ name: '{{ local_bucket_name }}-2'
+ state: present
+ object_lock_enabled: true
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.object_lock_enabled
+
+ - name: 'Disable object lock'
+ s3_bucket:
+ name: '{{ local_bucket_name }}-2'
+ state: present
+ object_lock_enabled: false
+ register: output
+ ignore_errors: true
+
+ - assert:
+ that:
+ - output is failed
+
+ - name: 'Re-Enable object lock (idempotency)'
+ s3_bucket:
+ name: '{{ local_bucket_name }}-2'
+ state: present
+ object_lock_enabled: true
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - output.object_lock_enabled
+
+ - name: 'Touch bucket with object lock enabled (idempotency)'
+ s3_bucket:
+ name: '{{ local_bucket_name }}-2'
+ state: present
+ object_lock_enabled: true
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - output.object_lock_enabled
+
+ - name: Delete test s3 bucket
+ s3_bucket:
+ name: '{{ local_bucket_name }}-2'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ # ============================================================
+ always:
+ - name: Ensure all buckets are deleted
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ ignore_errors: yes
+
+ - name: Ensure all buckets are deleted
+ s3_bucket:
+ name: '{{ local_bucket_name }}-2'
+ state: absent
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/ownership_controls.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/ownership_controls.yml
new file mode 100644
index 000000000..683ff0659
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/ownership_controls.yml
@@ -0,0 +1,143 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - set_fact:
+ local_bucket_name: "{{ bucket_name | hash('md5')}}ownership"
+
+ - name: 'Create a simple bucket bad value for ownership controls'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ object_ownership: default
+ ignore_errors: true
+ register: output
+
+ - assert:
+ that:
+ - output.failed
+
+ - name: 'Create bucket with object_ownership set to object_writer'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ ignore_errors: true
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - not output.object_ownership|bool
+
+ - name: delete s3 bucket
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+
+ - name: 'create s3 bucket with object ownership controls'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ object_ownership: ObjectWriter
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.object_ownership
+ - output.object_ownership == 'ObjectWriter'
+
+ - name: 'update s3 bucket ownership preferred controls'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ object_ownership: BucketOwnerPreferred
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.object_ownership
+ - output.object_ownership == 'BucketOwnerPreferred'
+
+ - name: 'test idempotency update s3 bucket ownership preferred controls'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ object_ownership: BucketOwnerPreferred
+ register: output
+
+ - assert:
+ that:
+ - output.changed is false
+ - output.object_ownership
+ - output.object_ownership == 'BucketOwnerPreferred'
+
+ - name: 'update s3 bucket ownership enforced controls'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ object_ownership: BucketOwnerEnforced
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.object_ownership
+ - output.object_ownership == 'BucketOwnerEnforced'
+
+ - name: 'test idempotency update s3 bucket ownership preferred controls'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ object_ownership: BucketOwnerEnforced
+ register: output
+
+ - assert:
+ that:
+ - output.changed is false
+ - output.object_ownership
+ - output.object_ownership == 'BucketOwnerEnforced'
+
+ - name: 'delete s3 bucket ownership controls'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ delete_object_ownership: true
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - not output.object_ownership|bool
+
+ - name: 'delete s3 bucket ownership controls once again (idempotency)'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ delete_object_ownership: true
+ register: idempotency
+
+ - assert:
+ that:
+ - not idempotency.changed
+ - not idempotency.object_ownership|bool
+
+ # ============================================================
+ always:
+ - name: delete s3 bucket ownership controls
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ delete_object_ownership: true
+ ignore_errors: yes
+
+ - name: Ensure all buckets are deleted
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml
new file mode 100644
index 000000000..743a2ce4d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml
@@ -0,0 +1,115 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - set_fact:
+ local_bucket_name: "{{ bucket_name | hash('md5')}}-public"
+ # ============================================================
+
+ - name: 'Create a simple bucket with public access block configuration'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ public_access:
+ block_public_acls: true
+ block_public_policy: true
+ ignore_public_acls: true
+ restrict_public_buckets: true
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.public_access_block
+ - output.public_access_block.BlockPublicAcls
+ - output.public_access_block.BlockPublicPolicy
+ - output.public_access_block.IgnorePublicAcls
+ - output.public_access_block.RestrictPublicBuckets
+
+ - name: 'Re-configure public access block configuration'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ public_access:
+ block_public_acls: true
+ block_public_policy: false
+ ignore_public_acls: true
+ restrict_public_buckets: false
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.public_access_block
+ - output.public_access_block.BlockPublicAcls
+ - not output.public_access_block.BlockPublicPolicy
+ - output.public_access_block.IgnorePublicAcls
+ - not output.public_access_block.RestrictPublicBuckets
+
+ - name: 'Re-configure public access block configuration (idempotency)'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ public_access:
+ block_public_acls: true
+ block_public_policy: false
+ ignore_public_acls: true
+ restrict_public_buckets: false
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - output.public_access_block
+ - output.public_access_block.BlockPublicAcls
+ - not output.public_access_block.BlockPublicPolicy
+ - output.public_access_block.IgnorePublicAcls
+ - not output.public_access_block.RestrictPublicBuckets
+
+ - name: 'Delete public access block configuration'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ delete_public_access: true
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ - not output.public_access_block|bool
+
+ - name: 'Delete public access block configuration (idempotency)'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ delete_public_access: true
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - not output.public_access_block|bool
+
+ # ============================================================
+
+ - name: Delete testing s3 bucket
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ # ============================================================
+ always:
+ - name: Ensure all buckets are deleted
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml
new file mode 100644
index 000000000..7a2f3a4e2
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml
@@ -0,0 +1,67 @@
+---
+- name: 'Run simple tests'
+ block:
+ - set_fact:
+ local_bucket_name: "{{ bucket_name | hash('md5')}}-simple"
+ # Note: s3_bucket doesn't support check_mode
+
+ # ============================================================
+ - name: 'Create a simple s3_bucket'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - output is success
+ - output is changed
+ - output.name == '{{ local_bucket_name }}'
+ - not output.requester_pays
+ - output.public_access is undefined
+
+ # ============================================================
+ - name: 'Try to update the simple bucket with the same values'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - output is success
+ - output is not changed
+ - output.name == '{{ local_bucket_name }}'
+ - not output.requester_pays
+
+ # ============================================================
+ - name: 'Delete the simple s3_bucket'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output is success
+ - output is changed
+
+ # ============================================================
+ - name: 'Re-delete the simple s3_bucket (idempotency)'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output is success
+ - output is not changed
+
+ # ============================================================
+ always:
+ - name: 'Ensure all buckets are deleted'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml
new file mode 100644
index 000000000..1df4e5c9c
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml
@@ -0,0 +1,257 @@
+---
+- name: 'Run tagging tests'
+ block:
+ - set_fact:
+ local_bucket_name: "{{ bucket_name | hash('md5')}}-tags"
+ # ============================================================
+ - name: 'Create simple s3_bucket for testing tagging'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.name == '{{ local_bucket_name }}'
+
+ # ============================================================
+
+ - name: 'Add tags to s3 bucket'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ tags:
+ example: tag1
+ another: tag2
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.name == '{{ local_bucket_name }}'
+ - output.tags.example == 'tag1'
+ - output.tags.another == 'tag2'
+
+ - name: 'Re-Add tags to s3 bucket'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ tags:
+ example: tag1
+ another: tag2
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - output.name == '{{ local_bucket_name }}'
+ - output.tags.example == 'tag1'
+ - output.tags.another == 'tag2'
+
+ # ============================================================
+
+ - name: Remove a tag from an s3_bucket
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ tags:
+ example: tag1
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.name == '{{ local_bucket_name }}'
+ - output.tags.example == 'tag1'
+ - "'another' not in output.tags"
+
+ - name: Re-remove the tag from an s3_bucket
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ tags:
+ example: tag1
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - output.name == '{{ local_bucket_name }}'
+ - output.tags.example == 'tag1'
+ - "'another' not in output.tags"
+
+ ## ============================================================
+
+ #- name: 'Pause to help with s3 bucket eventual consistency'
+ # wait_for:
+ # timeout: 10
+ # delegate_to: localhost
+
+ ## ============================================================
+
+ - name: 'Add a tag for s3_bucket with purge_tags False'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ purge_tags: no
+ tags:
+ anewtag: here
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.name == '{{ local_bucket_name }}'
+ - output.tags.example == 'tag1'
+ - output.tags.anewtag == 'here'
+
+ - name: 'Re-add a tag for s3_bucket with purge_tags False'
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ purge_tags: no
+ tags:
+ anewtag: here
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - output.name == '{{ local_bucket_name }}'
+ - output.tags.example == 'tag1'
+ - output.tags.anewtag == 'here'
+
+ ## ============================================================
+
+ #- name: 'Pause to help with s3 bucket eventual consistency'
+ # wait_for:
+ # timeout: 10
+ # delegate_to: localhost
+
+ ## ============================================================
+
+ - name: Update a tag for s3_bucket with purge_tags False
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ purge_tags: no
+ tags:
+ anewtag: next
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.name == '{{ local_bucket_name }}'
+ - output.tags.example == 'tag1'
+ - output.tags.anewtag == 'next'
+
+ - name: Re-update a tag for s3_bucket with purge_tags False
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ purge_tags: no
+ tags:
+ anewtag: next
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - output.name == '{{ local_bucket_name }}'
+ - output.tags.example == 'tag1'
+ - output.tags.anewtag == 'next'
+
+ ## ============================================================
+
+ #- name: 'Pause to help with s3 bucket eventual consistency'
+ # wait_for:
+ # timeout: 10
+ # delegate_to: localhost
+
+ ## ============================================================
+
+ - name: Pass empty tags dict for s3_bucket with purge_tags False
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ purge_tags: no
+ tags: {}
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - output.name == '{{ local_bucket_name }}'
+ - output.tags.example == 'tag1'
+ - output.tags.anewtag == 'next'
+
+ ## ============================================================
+
+ #- name: 'Pause to help with s3 bucket eventual consistency'
+ # wait_for:
+ # timeout: 10
+ # delegate_to: localhost
+
+ ## ============================================================
+
+ - name: Do not specify any tag to ensure previous tags are not removed
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - output.name == '{{ local_bucket_name }}'
+ - output.tags.example == 'tag1'
+
+ # ============================================================
+
+ - name: Remove all tags
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ tags: {}
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.name == '{{ local_bucket_name }}'
+ - output.tags == {}
+
+ - name: Re-remove all tags
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: present
+ tags: {}
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - output.name == '{{ local_bucket_name }}'
+ - output.tags == {}
+
+ # ============================================================
+
+ - name: Delete bucket
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ # ============================================================
+ always:
+ - name: Ensure all buckets are deleted
+ s3_bucket:
+ name: '{{ local_bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json
new file mode 100644
index 000000000..e0b10273f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json
@@ -0,0 +1,12 @@
+{
+ "Version":"2012-10-17",
+ "Statement":[
+ {
+ "Sid":"AddPerm",
+ "Effect":"Deny",
+ "Principal": {"AWS": "*"},
+ "Action":["s3:GetObject"],
+ "Resource":["arn:aws:s3:::{{local_bucket_name}}/*"]
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json
new file mode 100644
index 000000000..0f7c4968f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json
@@ -0,0 +1,12 @@
+{
+ "Version":"2012-10-17",
+ "Statement":[
+ {
+ "Sid":"AddPerm",
+ "Effect":"Allow",
+ "Principal": "*",
+ "Action":["s3:GetObject"],
+ "Resource":["arn:aws:s3:::{{local_bucket_name}}/*"]
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/runme.sh
new file mode 100755
index 000000000..aa324772b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/runme.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+#
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
+
+
+set -eux
+
+export ANSIBLE_ROLES_PATH=../
+
+ansible-playbook main.yml -i inventory "$@"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/aliases b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/aliases
new file mode 100644
index 000000000..d34fac48d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+aws_s3
+s3_object_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/defaults/main.yml
new file mode 100644
index 000000000..d408adb49
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+# defaults file for s3
+bucket_name: '{{ resource_prefix | hash("md5") }}'
+bucket_name_acl: "{{ bucket_name + '-with-acl' }}"
+bucket_name_with_dot: "{{ bucket_name + '.bucket' }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/files/hello.txt b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/files/hello.txt
new file mode 100644
index 000000000..8ab686eaf
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/files/hello.txt
@@ -0,0 +1 @@
+Hello, World!
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/files/test.png b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/files/test.png
new file mode 100644
index 000000000..1dc64bab8
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/files/test.png
Binary files differ
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/meta/main.yml
new file mode 100644
index 000000000..60f81883a
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/meta/main.yml
@@ -0,0 +1,6 @@
+dependencies:
+ - setup_remote_tmp_dir
+ # required for s3.get_object_attributes
+ - role: setup_botocore_pip
+ vars:
+ botocore_version: '1.24.7'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml
new file mode 100644
index 000000000..aff38eba1
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml
@@ -0,0 +1,135 @@
+- block:
+ - name: define bucket name used for tests
+ set_fact:
+ copy_bucket:
+ src: "{{ bucket_name }}-copysrc"
+ dst: "{{ bucket_name }}-copydst"
+
+ - name: create bucket source
+ s3_object:
+ bucket: "{{ copy_bucket.src }}"
+ mode: create
+
+ - name: Create content
+ set_fact:
+ content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}"
+
+ - name: Put a content in the source bucket
+ s3_object:
+ bucket: "{{ copy_bucket.src }}"
+ mode: put
+ content: "{{ content }}"
+ object: source.txt
+ tags:
+ ansible_release: '2.0.0'
+ ansible_team: cloud
+ retries: 3
+ delay: 3
+ register: put_result
+ until: "put_result.msg == 'PUT operation complete'"
+
+ - name: Copy the content of the source bucket into dest bucket
+ s3_object:
+ bucket: "{{ copy_bucket.dst }}"
+ mode: copy
+ object: destination.txt
+ copy_src:
+ bucket: "{{ copy_bucket.src }}"
+ object: source.txt
+
+ - name: Get the content copied into {{ copy_bucket.dst }}
+ s3_object:
+ bucket: "{{ copy_bucket.dst }}"
+ mode: getstr
+ object: destination.txt
+ register: copy_content
+
+ - name: assert that the content is matching with the source
+ assert:
+ that:
+ - content == copy_content.contents
+
+ - name: Get the download url for object copied into {{ copy_bucket.dst }}
+ s3_object:
+ bucket: "{{ copy_bucket.dst }}"
+ mode: geturl
+ object: destination.txt
+ register: copy_url
+
+ - name: assert that tags are the same in the destination bucket
+ assert:
+ that:
+ - put_result.tags == copy_url.tags
+
+ - name: Copy the same content from the source bucket into dest bucket (idempotency)
+ s3_object:
+ bucket: "{{ copy_bucket.dst }}"
+ mode: copy
+ object: destination.txt
+ copy_src:
+ bucket: "{{ copy_bucket.src }}"
+ object: source.txt
+ register: copy_idempotency
+
+ - name: assert that no change was made
+ assert:
+ that:
+ - copy_idempotency is not changed
+ - "copy_idempotency.msg == 'ETag from source and destination are the same'"
+
+ - name: Copy object with tags
+ s3_object:
+ bucket: "{{ copy_bucket.dst }}"
+ mode: copy
+ object: destination.txt
+ tags:
+ ansible_release: "2.0.1"
+ copy_src:
+ bucket: "{{ copy_bucket.src }}"
+ object: source.txt
+ register: copy_result
+
+ - name: assert that tags were updated
+ assert:
+ that:
+ - copy_result is changed
+ - copy_result.tags['ansible_release'] == '2.0.1'
+
+ - name: Copy object with tags (idempotency)
+ s3_object:
+ bucket: "{{ copy_bucket.dst }}"
+ mode: copy
+ object: destination.txt
+ tags:
+ ansible_release: "2.0.1"
+ copy_src:
+ bucket: "{{ copy_bucket.src }}"
+ object: source.txt
+ register: copy_result
+
+ - name: assert that no change was made
+ assert:
+ that:
+ - copy_result is not changed
+
+ - name: Copy from unexisting key should not succeed
+ s3_object:
+ bucket: "{{ copy_bucket.dst }}"
+ mode: copy
+ object: missing_key.txt
+ copy_src:
+ bucket: "{{ copy_bucket.src }}"
+ object: this_key_does_not_exist.txt
+ register: result
+
+ - name: Validate result when copying missing key
+ assert:
+ that:
+ - result is not changed
+ - 'result.msg == "Key this_key_does_not_exist.txt does not exist in bucket {{ copy_bucket.src }}."'
+
+ always:
+ - include_tasks: delete_bucket.yml
+ with_items:
+ - "{{ copy_bucket.dst }}"
+ - "{{ copy_bucket.src }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object_acl_disabled_bucket.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object_acl_disabled_bucket.yml
new file mode 100644
index 000000000..7fbd8b786
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object_acl_disabled_bucket.yml
@@ -0,0 +1,111 @@
+- name: test copying objects to bucket with ACL disabled
+ block:
+ - name: Create a bucket with ACL disabled for the test
+ s3_bucket:
+ name: "{{ bucket_name }}-acl-disabled"
+ object_ownership: BucketOwnerEnforced
+ state: present
+ register: create_result
+
+ - name: Ensure bucket creation
+ assert:
+ that:
+ - create_result is changed
+ - create_result is not failed
+ - create_result.object_ownership == "BucketOwnerEnforced"
+
+ - name: Create content
+ set_fact:
+ content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}"
+
+ - name: Create local acl_disabled_upload_test.txt
+ copy:
+ content: "{{ content }}"
+ dest: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt"
+
+ - name: Upload a file to the bucket (check_mode)
+ amazon.aws.s3_object:
+ bucket: "{{ bucket_name }}-acl-disabled"
+ src: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt"
+ object: "acl_disabled_upload_test.txt"
+ mode: put
+ check_mode: true
+ register: upload_file_result
+
+ - assert:
+ that:
+ - upload_file_result is changed
+ - upload_file_result is not failed
+ - upload_file_result.msg == "PUT operation skipped - running in check mode"
+ - '"s3:PutObject" not in upload_file_result.resource_actions'
+
+ - name: Upload a file to the bucket
+ amazon.aws.s3_object:
+ bucket: "{{ bucket_name }}-acl-disabled"
+ src: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt"
+ object: "acl_disabled_upload_test.txt"
+ mode: put
+ register: upload_file_result
+
+ - assert:
+ that:
+ - upload_file_result is changed
+ - upload_file_result is not failed
+ - upload_file_result.msg == "PUT operation complete"
+ - '"s3:PutObject" in upload_file_result.resource_actions'
+
+ - name: Upload a file to the bucket (check_mode - idempotency)
+ amazon.aws.s3_object:
+ bucket: "{{ bucket_name }}-acl-disabled"
+ src: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt"
+ object: "acl_disabled_upload_test.txt"
+ mode: put
+ check_mode: true
+ register: upload_file_result
+
+ - assert:
+ that:
+ - upload_file_result is not changed
+ - upload_file_result is not failed
+ - upload_file_result.msg != "PUT operation complete"
+ - '"s3:PutObject" not in upload_file_result.resource_actions'
+
+ - name: Upload a file to the bucket (idempotency)
+ amazon.aws.s3_object:
+ bucket: "{{ bucket_name }}-acl-disabled"
+ src: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt"
+ object: "acl_disabled_upload_test.txt"
+ mode: put
+ register: upload_file_result
+
+ - assert:
+ that:
+ - upload_file_result is not changed
+ - upload_file_result is not failed
+ - upload_file_result.msg != "PUT operation complete"
+ - '"s3:PutObject" not in upload_file_result.resource_actions'
+
+ always:
+
+ - name: Delete the file in the bucket
+ amazon.aws.s3_object:
+ bucket: "{{ bucket_name }}-acl-disabled"
+ src: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt"
+ object: "acl_disabled_upload_test.txt"
+ mode: delobj
+ retries: 3
+ delay: 3
+ ignore_errors: true
+
+ - name: Delete bucket created in this test
+ s3_bucket:
+ name: "{{ bucket_name }}-acl-disabled"
+ object_ownership: BucketOwnerEnforced
+ state: absent
+ register: delete_result
+
+ - name: Ensure bucket deletion
+ assert:
+ that:
+ - delete_result is changed
+ - delete_result is not failed
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/delete_bucket.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/delete_bucket.yml
new file mode 100644
index 000000000..d285c7a95
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/delete_bucket.yml
@@ -0,0 +1,25 @@
+- name: delete bucket at the end of Integration tests
+ block:
+ - name: list bucket object
+ s3_object:
+ bucket: "{{ item }}"
+ mode: list
+ register: objects
+ ignore_errors: true
+
+ - name: remove objects from bucket
+ s3_object:
+ bucket: "{{ item }}"
+ mode: delobj
+ object: "{{ obj }}"
+ with_items: "{{ objects.s3_keys }}"
+ loop_control:
+ loop_var: obj
+ when: "'s3_keys' in objects"
+ ignore_errors: true
+
+ - name: delete the bucket
+ s3_object:
+ bucket: "{{ item }}"
+ mode: delete
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml
new file mode 100644
index 000000000..e85fd7886
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml
@@ -0,0 +1,1092 @@
+---
+# Integration tests for s3_object
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+
+ block:
+ - name: get ARN of calling user
+ aws_caller_info:
+ register: aws_caller_info
+
+ - name: register account id
+ set_fact:
+ aws_account: "{{ aws_caller_info.account }}"
+
+ - name: check that temp directory was made
+ assert:
+ that:
+ - remote_tmp_dir is defined
+
+ - name: Create content
+ set_fact:
+ content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}"
+
+ - name: test create bucket without permissions
+ module_defaults: { group/aws: {} }
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: create
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - "result.msg != 'MODULE FAILURE'"
+
+ - name: test create bucket with an invalid name
+ s3_object:
+ bucket: "{{ bucket_name }}-"
+ mode: create
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+
+ - name: test create bucket
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: create
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: trying to create a bucket name that already exists
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: create
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: Create local upload.txt
+ copy:
+ content: "{{ content }}"
+ dest: "{{ remote_tmp_dir }}/upload.txt"
+
+ - name: stat the file
+ stat:
+ path: "{{ remote_tmp_dir }}/upload.txt"
+ get_checksum: yes
+ register: upload_file
+
+ - name: test putting an object in the bucket
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ remote_tmp_dir }}/upload.txt"
+ object: delete.txt
+ tags:
+ "lowercase spaced": "hello cruel world"
+ "Title Case": "Hello Cruel World"
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.msg == "PUT operation complete"
+
+ - name: List keys
+ amazon.aws.s3_object_info:
+ bucket_name: "{{ bucket_name }}"
+ register: list_keys_result
+
+ - name: Get s3 object info without specific details
+ amazon.aws.s3_object_info:
+ bucket_name: "{{ bucket_name }}"
+ object_name: "{{ list_keys_result.s3_keys[0] }}"
+ register: info_result
+
+ - assert:
+ that:
+ - info_result is not failed
+ - info_result is not changed
+ - '"object_info" in info_result'
+ - info_result.object_info[0] | length != 0
+ - '"object_data" in info_result.object_info[0]'
+ - '"e_tag" in info_result.object_info[0].object_data'
+ - '"last_modified" in info_result.object_info[0].object_data'
+ - '"content_type" in info_result.object_info[0].object_data'
+
+ - name: Get s3 object info with specific details
+ amazon.aws.s3_object_info:
+ bucket_name: "{{ bucket_name }}"
+ object_name: "{{ list_keys_result.s3_keys[0] }}"
+ object_details:
+ object_acl: true
+ object_tagging: true
+ object_lock_configuration: true
+ object_attributes: true
+ object_retention: false
+ attributes_list:
+ - ETag
+ - ObjectSize
+ - StorageClass
+ - Checksum
+ - ObjectParts
+ register: info_detail_result
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - assert:
+ that:
+ - info_detail_result is not failed
+ - info_detail_result is not changed
+ - '"object_info" in info_detail_result'
+ - info_detail_result.object_info[0] | length != 0
+ - '"object_acl" in info_detail_result.object_info[0]'
+ - '"object_tagging" in info_detail_result.object_info[0]'
+ - info_detail_result.object_info[0].object_tagging | length == 2
+ - '"object_legal_hold" not in info_detail_result.object_info[0]'
+ - '"object_lock_configuration" in info_detail_result.object_info[0]'
+ - '"object_attributes" in info_detail_result.object_info[0]'
+ - '"object_retention" not in info_detail_result.object_info[0]'
+ - '"e_tag" in info_result.object_info[0].object_data'
+ - '"last_modified" in info_result.object_info[0].object_data'
+ - '"content_type" in info_result.object_info[0].object_data'
+
+ - name: test using s3_object with async
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ remote_tmp_dir }}/upload.txt"
+ object: delete.txt
+ register: test_async
+ async: 30
+ poll: 0
+
+ - name: ensure it completed
+ async_status:
+ jid: "{{ test_async.ansible_job_id }}"
+ register: status
+ until: status is finished
+ retries: 30
+
+ - name: test put with overwrite=different and unmodified object
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ remote_tmp_dir }}/upload.txt"
+ object: delete.txt
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: check that roles file lookups work as expected
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: hello.txt
+ object: delete.txt
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.msg == "PUT operation complete"
+
+ # s3_object (and its old alias) use an action plugin to support using the
+ # 'file' lookup path or a remote path. Keeping this working is dependent on
+ # having a redirect for both the module and the action plugin
+ - name: check that roles file lookups work as expected when using old name
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: hello.txt
+ object: delete.txt
+ overwrite: always
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.msg == "PUT operation complete"
+
+ - name: test put with overwrite=never
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ remote_tmp_dir }}/upload.txt"
+ object: delete.txt
+ overwrite: never
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: test put with overwrite=different and modified object
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ remote_tmp_dir }}/upload.txt"
+ object: delete.txt
+ overwrite: different
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: test put with overwrite=always
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ remote_tmp_dir }}/upload.txt"
+ object: delete.txt
+ overwrite: always
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: test get object
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ remote_tmp_dir }}/download.txt"
+ object: delete.txt
+ retries: 3
+ delay: 3
+ register: result
+ until: "result.msg == 'GET operation complete'"
+
+ - name: stat the file so we can compare the checksums
+ stat:
+ path: "{{ remote_tmp_dir }}/download.txt"
+ get_checksum: yes
+ register: download_file
+
+ - assert:
+ that:
+ - upload_file.stat.checksum == download_file.stat.checksum
+
+ - name: test get with overwrite=different and identical files
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ remote_tmp_dir }}/download.txt"
+ object: delete.txt
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: modify destination
+ copy:
+ dest: "{{ remote_tmp_dir }}/download.txt"
+ src: hello.txt
+
+ - name: test get with overwrite=never
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ remote_tmp_dir }}/download.txt"
+ object: delete.txt
+ overwrite: never
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: test get with overwrite=different and modified file
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ remote_tmp_dir }}/download.txt"
+ object: delete.txt
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: test get with overwrite=always
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ remote_tmp_dir }}/download.txt"
+ object: delete.txt
+ overwrite: always
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: test get with overwrite=latest and identical files
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ remote_tmp_dir }}/download.txt"
+ object: delete.txt
+ overwrite: latest
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: modify mtime for local file to past
+ shell: touch -mt 197001010900.00 "{{ remote_tmp_dir }}/download.txt"
+
+ - name: test get with overwrite=latest and files that mtimes are different
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ remote_tmp_dir }}/download.txt"
+ object: delete.txt
+ overwrite: latest
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: test geturl of the object
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: geturl
+ object: delete.txt
+ retries: 3
+ delay: 3
+ register: result
+ until: result is changed
+
+ - assert:
+ that:
+ - "'Download url:' in result.msg"
+ - result is changed
+
+ - name: test geturl of the object with sigv4
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: geturl
+ sig_v4: true
+ object: delete.txt
+ retries: 3
+ delay: 3
+ register: result
+ until: result is changed
+
+ - assert:
+ that:
+ - "'Download url:' in result.msg"
+ - result is changed
+
+ - name: test getstr of the object
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: getstr
+ object: delete.txt
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result.msg == "GET operation complete"
+ - result.contents == content
+
+ - name: test list to get all objects in the bucket
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: list
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - "'delete.txt' in result.s3_keys"
+ - result.msg == "LIST operation complete"
+
+ - name: test delobj to just delete an object in the bucket
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: delobj
+ object: delete.txt
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - "'Object deleted from bucket' in result.msg"
+ - result is changed
+
+ - name: test putting an encrypted object in the bucket
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ remote_tmp_dir }}/upload.txt"
+ encrypt: yes
+ object: delete_encrypt.txt
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.msg == "PUT operation complete"
+
+ - name: test get encrypted object
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ remote_tmp_dir }}/download_encrypted.txt"
+ object: delete_encrypt.txt
+ retries: 3
+ delay: 3
+ register: result
+ until: "result.msg == 'GET operation complete'"
+
+ - name: stat the file so we can compare the checksums
+ stat:
+ path: "{{ remote_tmp_dir }}/download_encrypted.txt"
+ get_checksum: yes
+ register: download_file
+
+ - assert:
+ that:
+ - upload_file.stat.checksum == download_file.stat.checksum
+
+ - name: delete encrypted file
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: delobj
+ object: delete_encrypt.txt
+ retries: 3
+ delay: 3
+
+ - name: test putting an aws:kms encrypted object in the bucket
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ remote_tmp_dir }}/upload.txt"
+ encrypt: yes
+ encryption_mode: aws:kms
+ object: delete_encrypt_kms.txt
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.msg == "PUT operation complete"
+
+ - name: test get KMS encrypted object
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ remote_tmp_dir }}/download_kms.txt"
+ object: delete_encrypt_kms.txt
+ retries: 3
+ delay: 3
+ register: result
+ until: "result.msg == 'GET operation complete'"
+
+ - name: get the stat of the file so we can compare the checksums
+ stat:
+ path: "{{ remote_tmp_dir }}/download_kms.txt"
+ get_checksum: yes
+ register: download_file
+
+ - assert:
+ that:
+ - upload_file.stat.checksum == download_file.stat.checksum
+
+ # FIXME - could use a test that checks uploaded file is *actually* aws:kms encrypted
+
+ - name: delete KMS encrypted file
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: delobj
+ object: delete_encrypt_kms.txt
+ retries: 3
+ delay: 3
+
+ # FIXME: could use a test that checks non standard KMS key
+ # but that would require ability to create and remove such keys.
+ # PRs exist for that, but propose deferring until after merge.
+
+ - name: test creation of empty path
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: create
+ object: foo/bar/baz/
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - "'Virtual directory foo/bar/baz/ created' in result.msg"
+ - result is changed
+
+ - name: test deletion of empty path
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: delobj
+ object: foo/bar/baz/
+ retries: 3
+ delay: 3
+
+ - name: test delete bucket
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: delete
+ register: result
+ retries: 3
+ delay: 3
+ until: result is changed
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: test create a bucket with a dot in the name
+ s3_object:
+ bucket: "{{ bucket_name_with_dot }}"
+ mode: create
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: test delete a bucket with a dot in the name
+ s3_object:
+ bucket: "{{ bucket_name_with_dot }}"
+ mode: delete
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: test delete a nonexistent bucket
+ s3_object:
+ bucket: "{{ bucket_name_with_dot }}"
+ mode: delete
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: make tempfile 4 GB for OSX
+ command:
+ _raw_params: "dd if=/dev/zero of={{ remote_tmp_dir }}/largefile bs=1m count=4096"
+ when: ansible_distribution == 'MacOSX'
+
+ - name: make tempfile 4 GB for linux
+ command:
+ _raw_params: "dd if=/dev/zero of={{ remote_tmp_dir }}/largefile bs=1M count=4096"
+ when: ansible_system == 'Linux'
+
+ - name: test multipart download - platform specific
+ block:
+ - name: make a bucket to upload the file
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: create
+
+ - name: upload the file to the bucket
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ remote_tmp_dir }}/largefile"
+ object: multipart.txt
+
+ - name: download file once
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ remote_tmp_dir }}/download.txt"
+ object: multipart.txt
+ overwrite: different
+ retries: 3
+ delay: 3
+ until: "result.msg == 'GET operation complete'"
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: download file again
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ remote_tmp_dir }}/download.txt"
+ object: multipart.txt
+ overwrite: different
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ when: ansible_system == 'Linux' or ansible_distribution == 'MacOSX'
+
+ - name: make a bucket with the bucket-owner-full-control ACL
+ s3_bucket:
+ name: "{{ bucket_name_acl }}"
+ state: present
+ policy: "{{ lookup('template', 'policy.json.j2') }}"
+ register: bucket_with_policy
+
+ - assert:
+ that:
+ - bucket_with_policy is changed
+
+ # # XXX Doesn't fail... ( ? Eventual consistency ? )
+ # - name: fail to upload the file to the bucket with an ACL
+ # s3_object:
+ # bucket: "{{ bucket_name_acl }}"
+ # mode: put
+ # src: "{{ tmpdir.path }}/upload.txt"
+ # object: file-with-permissions.txt
+ # permission: private
+ # ignore_nonexistent_bucket: True
+ # register: upload_private
+ # ignore_errors: True
+ #
+ # - assert:
+ # that:
+ # - upload_private is failed
+
+ - name: upload the file to the bucket with an ACL
+ s3_object:
+ bucket: "{{ bucket_name_acl }}"
+ mode: put
+ src: "{{ remote_tmp_dir }}/upload.txt"
+ object: file-with-permissions.txt
+ permission: bucket-owner-full-control
+ ignore_nonexistent_bucket: True
+ register: upload_owner
+
+ - assert:
+ that:
+ - upload_owner is changed
+
+ - name: create an object from static content
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ object: put-content.txt
+ mode: put
+ content: >-
+ test content
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: ensure idempotency on static content
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ object: put-content.txt
+ mode: put
+ content: >-
+ test content
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: fetch test content
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: getstr
+ object: put-content.txt
+ register: result
+
+ - assert:
+ that:
+ - result.contents == "test content"
+
+ - set_fact:
+ put_template_text: test template
+
+ - name: create an object from a template
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ object: put-template.txt
+ mode: put
+ content: "{{ lookup('template', 'templates/put-template.txt.j2')|replace('\n', '') }}"
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: create an object from a template (idempotency)
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ object: put-template.txt
+ mode: put
+ content: "{{ lookup('template', 'templates/put-template.txt.j2')|replace('\n', '') }}"
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: fetch template content
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: getstr
+ object: put-template.txt
+ register: result
+
+ - assert:
+ that:
+ - result.contents == "template:test template"
+
+ # at present, there is no lookup that can process binary data, so we use slurp instead
+ - slurp:
+ src: "{{ role_path }}/files/test.png"
+ register: put_binary
+
+ - name: create an object from binary data
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ object: put-binary.bin
+ mode: put
+ content_base64: "{{ put_binary.content }}"
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: create an object from binary data (idempotency)
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ object: put-binary.bin
+ mode: put
+ content_base64: "{{ put_binary.content }}"
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: fetch binary content
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ remote_tmp_dir }}/download_binary.bin"
+ object: put-binary.bin
+ register: result
+
+ - name: stat the files so we can compare the checksums
+ stat:
+ path: "{{ item }}"
+ get_checksum: yes
+ loop:
+ - "{{ role_path }}/files/test.png"
+ - "{{ remote_tmp_dir }}/download_binary.bin"
+ register: binary_files
+
+ - assert:
+ that:
+ - binary_files.results[0].stat.checksum == binary_files.results[1].stat.checksum
+
+ - include_tasks: copy_object.yml
+
+ - include_tasks: copy_object_acl_disabled_bucket.yml
+
+ # ============================================================
+ - name: 'Run tagging tests'
+ block:
+ # ============================================================
+ - name: create an object from static content
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ object: put-content.txt
+ mode: put
+ content: >-
+ test content
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ "Tag Two": 'two {{ resource_prefix }}'
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - "'tags' in result"
+ - (result.tags | length) == 2
+ - result.tags["tag_one"] == '{{ resource_prefix }} One'
+ - result.tags["Tag Two"] == 'two {{ resource_prefix }}'
+
+ - name: ensure idempotency on static content
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ object: put-content.txt
+ mode: put
+ content: >-
+ test content
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ "Tag Two": 'two {{ resource_prefix }}'
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - "'tags' in result"
+ - (result.tags | length) == 2
+ - result.tags["tag_one"] == '{{ resource_prefix }} One'
+ - result.tags["Tag Two"] == 'two {{ resource_prefix }}'
+
+ - name: Remove a tag from an S3 object
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ object: put-content.txt
+ mode: put
+ content: >-
+ test content
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - "'tags' in result"
+ - (result.tags | length) == 1
+ - result.tags["tag_one"] == "{{ resource_prefix }} One"
+ - "'Tag Two' not in result.tags"
+
+ - name: Remove the tag from an S3 object (idempotency)
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ object: put-content.txt
+ mode: put
+ content: >-
+ test content
+ tags:
+ tag_one: '{{ resource_prefix }} One'
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - "'tags' in result"
+ - (result.tags | length) == 1
+ - result.tags["tag_one"] == "{{ resource_prefix }} One"
+ - "'Tag Two' not in result.tags"
+
+ - name: Add a tag for an S3 object with purge_tags False
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ object: put-content.txt
+ mode: put
+ content: >-
+ test content
+ tags:
+ tag_three: '{{ resource_prefix }} Three'
+ purge_tags: false
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - "'tags' in result"
+ - (result.tags | length) == 2
+ - result.tags["tag_three"] == '{{ resource_prefix }} Three'
+ - result.tags["tag_one"] == '{{ resource_prefix }} One'
+
+ - name: Add a tag for an S3 object with purge_tags False (idempotency)
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ object: put-content.txt
+ mode: put
+ content: >-
+ test content
+ tags:
+ tag_three: '{{ resource_prefix }} Three'
+ purge_tags: false
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - "'tags' in result"
+ - (result.tags | length) == 2
+ - result.tags["tag_three"] == '{{ resource_prefix }} Three'
+ - result.tags["tag_one"] == '{{ resource_prefix }} One'
+
+ - name: Update tags for an S3 object with purge_tags False
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ object: put-content.txt
+ mode: put
+ content: >-
+ test content
+ tags:
+ "TagFour": '{{ resource_prefix }} tag_four'
+ purge_tags: false
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - "'tags' in result"
+ - (result.tags | length) == 3
+ - result.tags["tag_one"] == '{{ resource_prefix }} One'
+ - result.tags["tag_three"] == '{{ resource_prefix }} Three'
+ - result.tags["TagFour"] == '{{ resource_prefix }} tag_four'
+
+ - name: Update tags for an S3 object with purge_tags False (idempotency)
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ object: put-content.txt
+ mode: put
+ content: >-
+ test content
+ tags:
+ "TagFour": '{{ resource_prefix }} tag_four'
+ purge_tags: false
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - "'tags' in result"
+ - (result.tags | length) == 3
+ - result.tags["tag_one"] == '{{ resource_prefix }} One'
+ - result.tags["tag_three"] == '{{ resource_prefix }} Three'
+ - result.tags["TagFour"] == '{{ resource_prefix }} tag_four'
+
+ - name: Specify empty tags for an S3 object with purge_tags False
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ object: put-content.txt
+ mode: put
+ content: >-
+ test content
+ tags: {}
+ purge_tags: false
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - "'tags' in result"
+ - (result.tags | length) == 3
+ - result.tags["tag_one"] == '{{ resource_prefix }} One'
+ - result.tags["tag_three"] == '{{ resource_prefix }} Three'
+ - result.tags["TagFour"] == '{{ resource_prefix }} tag_four'
+
+ - name: Do not specify any tag to ensure previous tags are not removed
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ object: put-content.txt
+ mode: put
+ content: >-
+ test content
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - "'tags' in result"
+ - (result.tags | length) == 3
+ - result.tags["tag_one"] == '{{ resource_prefix }} One'
+ - result.tags["tag_three"] == '{{ resource_prefix }} Three'
+ - result.tags["TagFour"] == '{{ resource_prefix }} tag_four'
+
+ - name: Remove all tags
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ object: put-content.txt
+ mode: put
+ overwrite: different
+ content: >-
+ test content
+ tags: {}
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - "'tags' in result"
+ - (result.tags | length) == 0
+
+ - name: Remove all tags (idempotency)
+ s3_object:
+ bucket: "{{ bucket_name }}"
+ object: put-content.txt
+ mode: put
+ content: >-
+ test content
+ tags: {}
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ - "'tags' in result"
+ - (result.tags | length) == 0
+
+ always:
+
+ - name: delete temporary files
+ file:
+ state: absent
+ path: "{{ remote_tmp_dir }}"
+ ignore_errors: true
+
+ - include_tasks: delete_bucket.yml
+ with_items:
+ - "{{ bucket_name }}"
+ - "{{ bucket_name_with_dot }}"
+ - "{{ bucket_name_acl }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/templates/policy.json.j2 b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/templates/policy.json.j2
new file mode 100644
index 000000000..4af2e0713
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/templates/policy.json.j2
@@ -0,0 +1,21 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "Only allow writes to my bucket with bucket owner full control",
+ "Effect": "Allow",
+ "Principal": { "AWS":"{{ aws_account }}" },
+ "Action": [
+ "s3:PutObject"
+ ],
+ "Resource": [
+ "arn:aws:s3:::{{ bucket_name_acl }}/*"
+ ],
+ "Condition": {
+ "StringEquals": {
+ "s3:x-amz-acl": "bucket-owner-full-control"
+ }
+ }
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/templates/put-template.txt.j2 b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/templates/put-template.txt.j2
new file mode 100644
index 000000000..2a75e9f2d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/templates/put-template.txt.j2
@@ -0,0 +1,2 @@
+template:
+{{ put_template_text }}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/defaults/main.yml
new file mode 100644
index 000000000..16ad00270
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/defaults/main.yml
@@ -0,0 +1,2 @@
+default_botocore_version: '1.21.0'
+default_boto3_version: '1.18.0'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/handlers/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/handlers/main.yml
new file mode 100644
index 000000000..2536d1ac7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/handlers/main.yml
@@ -0,0 +1,2 @@
+- name: 'Delete temporary pip environment'
+ include_tasks: cleanup.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/cleanup.yml
new file mode 100644
index 000000000..25b3ec27e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/cleanup.yml
@@ -0,0 +1,5 @@
+- name: 'Delete temporary pip environment'
+ file:
+ path: "{{ botocore_pip_directory }}"
+ state: absent
+ no_log: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/main.yml
new file mode 100644
index 000000000..1a0d7c4fb
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/main.yml
@@ -0,0 +1,43 @@
+- name: 'Ensure that we have virtualenv available to us'
+ pip:
+ name: virtualenv
+
+- name: 'Create temporary directory for pip environment'
+ tempfile:
+ path: /var/tmp
+ state: directory
+ prefix: botocore
+ suffix: .test
+ register: botocore_pip_directory
+ notify:
+ - 'Delete temporary pip environment'
+
+- name: 'Record temporary directory'
+ set_fact:
+ botocore_pip_directory: "{{ botocore_pip_directory.path }}"
+
+- set_fact:
+ botocore_virtualenv: "{{ botocore_pip_directory }}/virtualenv"
+ botocore_virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
+
+- set_fact:
+ botocore_virtualenv_interpreter: "{{ botocore_virtualenv }}/bin/python"
+
+- pip:
+ name:
+ - 'boto3{{ _boto3_comparison }}{{ _boto3_version }}'
+ - 'botocore{{ _botocore_comparison }}{{ _botocore_version }}'
+ - 'coverage<5'
+ virtualenv: "{{ botocore_virtualenv }}"
+ virtualenv_command: "{{ botocore_virtualenv_command }}"
+ virtualenv_site_packages: no
+ vars:
+ _boto3_version: '{{ boto3_version | default(default_boto3_version) }}'
+ _botocore_version: '{{ botocore_version | default(default_botocore_version) }}'
+ _is_default_boto3: '{{ _boto3_version == default_boto3_version }}'
+ _is_default_botocore: '{{ _botocore_version == default_botocore_version }}'
+ # Only set the default to >= if the other dep has been updated and the dep has not been set
+ _default_boto3_comparison: '{% if _is_default_boto3 and not _is_default_botocore %}>={% else %}=={% endif %}'
+ _default_botocore_comparison: '{% if _is_default_botocore and not _is_default_boto3 %}>={% else %}=={% endif %}'
+ _boto3_comparison: '{{ boto3_comparison | default(_default_boto3_comparison) }}'
+ _botocore_comparison: '{{ botocore_comparison | default(_default_botocore_comparison) }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/defaults/main.yml
new file mode 100644
index 000000000..172a10a20
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/defaults/main.yml
@@ -0,0 +1,4 @@
+ec2_ami_name: 'Fedora-Cloud-Base-*.x86_64*'
+# CentOS Community Platform Engineering (CPE)
+ec2_ami_owner_id: '125523088429'
+ec2_ami_ssh_user: 'fedora'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/tasks/main.yml
new file mode 100644
index 000000000..f41791073
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/tasks/main.yml
@@ -0,0 +1,53 @@
+---
+# Setup a couple of common facts about the AWS Region
+#
+# Information about availablity zones
+# - ec2_availability_zone_names
+#
+# An EC2 AMI that can be used for spinning up Instances performs as search
+# rather than hardcoding the IDs so we're not limited to specific Regions
+# - ec2_ami_id
+#
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+
+ run_once: True
+ block:
+ # ============================================================
+
+ - name: Get available AZs
+ aws_az_info:
+ filters:
+ region-name: '{{ aws_region }}'
+ register: _az_info
+
+ - name: Pick an AZ
+ set_fact:
+ ec2_availability_zone_names: '{{ _az_info.availability_zones | selectattr("zone_name", "defined") | map(attribute="zone_name") | list }}'
+
+ # ============================================================
+
+ - name: Get a list of images
+ ec2_ami_info:
+ filters:
+ name: '{{ ec2_ami_name }}'
+ owner-id: '{{ ec2_ami_owner_id }}'
+ architecture: x86_64
+ virtualization-type: hvm
+ root-device-type: ebs
+ register: _images_info
+ # Very spammy
+ no_log: True
+
+ - name: Set Fact for latest AMI
+ vars:
+ latest_image: '{{ _images_info.images | sort(attribute="creation_date") | reverse | first }}'
+ set_fact:
+ ec2_ami_id: '{{ latest_image.image_id }}'
+ ec2_ami_details: '{{ latest_image }}'
+ ec2_ami_root_disk: '{{ latest_image.block_device_mappings[0].device_name }}'
+ ec2_ami_ssh_user: '{{ ec2_ami_ssh_user }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/defaults/main.yml
new file mode 100644
index 000000000..e73afad8f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/defaults/main.yml
@@ -0,0 +1,24 @@
+---
+# defaults file for ec2_instance tests
+ec2_instance_test_name: 'ec2_instance'
+
+ec2_instance_owner: 'integration-run-{{ ec2_instance_test_name }}'
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-{{ ec2_instance_test_name }}'
+
+vpc_name: '{{ resource_prefix }}-{{ ec2_instance_test_name }}'
+vpc_seed: '{{ resource_prefix }}-{{ ec2_instance_test_name }}'
+
+vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
+
+subnet_a_az: '{{ ec2_availability_zone_names[0] }}'
+subnet_a_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
+subnet_a_startswith: '10.{{ 256 | random(seed=vpc_seed) }}.32.'
+subnet_a_name: '{{ resource_prefix }}-{{ ec2_instance_test_name }}-a'
+subnet_b_az: '{{ ec2_availability_zone_names[1] }}'
+subnet_b_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.33.0/24'
+subnet_b_startswith: '10.{{ 256 | random(seed=vpc_seed) }}.33.'
+subnet_b_name: '{{ resource_prefix }}-{{ ec2_instance_test_name }}-b'
+
+security_group_name_1: '{{ resource_prefix }}-{{ ec2_instance_test_name }}-1'
+security_group_name_2: '{{ resource_prefix }}-{{ ec2_instance_test_name }}-2'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/handlers/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/handlers/main.yml
new file mode 100644
index 000000000..b8dee611d
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/handlers/main.yml
@@ -0,0 +1,2 @@
+- name: 'Delete ec2_instance environment'
+ include_tasks: cleanup.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/cleanup.yml
new file mode 100644
index 000000000..0a0aa1eed
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/cleanup.yml
@@ -0,0 +1,118 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: Set termination protection to false (so we can terminate instance) (cleanup)
+ ec2_instance:
+ filters:
+ instance-state-name: ['pending', 'running', 'stopping', 'stopped']
+ vpc-id: '{{ testing_vpc.vpc.id }}'
+ termination_protection: false
+ ignore_errors: yes
+
+ - name: "(Cleanup) Find all remaining Instances"
+ ec2_instance_info:
+ filters:
+ vpc-id: '{{ testing_vpc.vpc.id }}'
+ instance-state-name: ['pending', 'running', 'shutting-down', 'stopping', 'stopped']
+ register: instances
+
+ - name: "(Cleanup) Remove Instances (start)"
+ ec2_instance:
+ state: absent
+ instance_ids: '{{ item.instance_id }}'
+ wait: no
+ ignore_errors: yes
+ loop: '{{ instances.instances }}'
+
+ - name: "(Cleanup) Remove Instances (wait for completion)"
+ ec2_instance:
+ state: absent
+ instance_ids: '{{ item.instance_id }}'
+ filters:
+ instance-state-name: ['pending', 'running', 'shutting-down', 'stopping', 'stopped']
+ vpc-id: '{{ testing_vpc.vpc.id }}'
+ wait: yes
+ ignore_errors: yes
+ loop: '{{ instances.instances }}'
+
+ - name: "(Cleanup) Find all remaining ENIs"
+ ec2_eni_info:
+ filters:
+ vpc-id: "{{ testing_vpc.vpc.id }}"
+ register: enis
+
+ - name: "(Cleanup) delete all ENIs"
+ ec2_eni:
+ state: absent
+ eni_id: "{{ item.id }}"
+ register: eni_removed
+ until: eni_removed is not failed
+ with_items: "{{ enis.network_interfaces }}"
+ ignore_errors: yes
+ retries: 10
+
+ - name: "(Cleanup) Find all remaining Security Groups"
+ ec2_security_group_info:
+ filters:
+ vpc-id: '{{ testing_vpc.vpc.id }}'
+ register: security_groups
+
+ - name: "(Cleanup) Remove the security group rules"
+ ec2_security_group:
+ state: present
+ name: '{{ item.group_name }}'
+ description: '{{ item.description }}'
+ vpc_id: '{{ testing_vpc.vpc.id }}'
+ rules: []
+ egress_rules: []
+ loop: '{{ security_groups.security_groups }}'
+ register: sg_removed
+ until: sg_removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: "(Cleanup) Remove the security groups"
+ ec2_security_group:
+ state: absent
+ group_id: '{{ item.group_id }}'
+ loop: '{{ security_groups.security_groups }}'
+ when:
+ - item.group_name != 'default'
+ register: sg_removed
+ until: sg_removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: "(Cleanup) Find all remaining Subnets"
+ ec2_vpc_subnet_info:
+ filters:
+ vpc-id: '{{ testing_vpc.vpc.id }}'
+ register: subnets
+
+ - name: "(Cleanup) Remove subnets"
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ item.cidr_block }}"
+ register: removed
+ loop: '{{ subnets.subnets }}'
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: "(Cleanup) Remove the VPC"
+ ec2_vpc_net:
+ state: absent
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: Ansible Testing VPC
+ tenancy: default
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/main.yml
new file mode 100644
index 000000000..fa12818c1
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/main.yml
@@ -0,0 +1,88 @@
+- run_once: '{{ setup_run_once | default("no") | bool }}'
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: "Create VPC for use in testing"
+ ec2_vpc_net:
+ state: present
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: "{{ vpc_name }}"
+ tenancy: default
+ register: testing_vpc
+ notify:
+ - 'Delete ec2_instance environment'
+
+ - name: "Create default subnet in zone A"
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_a_cidr }}"
+ az: "{{ subnet_a_az }}"
+ resource_tags:
+ Name: "{{ subnet_a_name }}"
+ register: testing_subnet_a
+
+ - name: "Create secondary subnet in zone B"
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_b_cidr }}"
+ az: "{{ subnet_b_az }}"
+ resource_tags:
+ Name: "{{ subnet_b_name }}"
+ register: testing_subnet_b
+
+ - name: "create a security group with the vpc"
+ ec2_group:
+ state: present
+ name: "{{ security_group_name_1 }}"
+ description: a security group for ansible tests
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ register: sg
+
+ - name: "create secondary security group with the vpc"
+ ec2_group:
+ name: "{{ security_group_name_2 }}"
+ description: a secondary security group for ansible tests
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ register: sg2
+
+ - name: Preserve defaults for other roles
+ set_fact:
+ # Ensure variables are available outside of this role
+ vpc_cidr: '{{ vpc_cidr }}'
+ vpc_name: '{{ vpc_name }}'
+ subnet_a_az: '{{ subnet_a_az }}'
+ subnet_a_cidr: '{{ subnet_a_cidr }}'
+ subnet_a_startswith: '{{ subnet_a_startswith }}'
+ subnet_a_name: '{{ subnet_a_name }}'
+ subnet_b_az: '{{ subnet_b_az }}'
+ subnet_b_cidr: '{{ subnet_b_cidr }}'
+ subnet_b_startswith: '{{ subnet_b_startswith }}'
+ subnet_b_name: '{{ subnet_b_name }}'
+ security_group_name_1: '{{ security_group_name_1 }}'
+ security_group_name_2: '{{ security_group_name_2 }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml
new file mode 100644
index 000000000..229037c8b
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml
@@ -0,0 +1,5 @@
+- name: delete temporary directory
+ include_tasks: default-cleanup.yml
+
+- name: delete temporary directory (windows)
+ include_tasks: windows-cleanup.yml
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml
new file mode 100644
index 000000000..39872d749
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml
@@ -0,0 +1,5 @@
+- name: delete temporary directory
+ file:
+ path: "{{ remote_tmp_dir }}"
+ state: absent
+ no_log: yes
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml
new file mode 100644
index 000000000..00877dca0
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml
@@ -0,0 +1,12 @@
+- name: create temporary directory
+ tempfile:
+ path: /var/tmp
+ state: directory
+ suffix: .test
+ register: remote_tmp_dir
+ notify:
+ - delete temporary directory
+
+- name: record temporary directory
+ set_fact:
+ remote_tmp_dir: "{{ remote_tmp_dir.path }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml
new file mode 100644
index 000000000..f8df391b5
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml
@@ -0,0 +1,10 @@
+- name: make sure we have the ansible_os_family and ansible_distribution_version facts
+ setup:
+ gather_subset: distribution
+ when: ansible_facts == {}
+
+- include_tasks: "{{ lookup('first_found', files)}}"
+ vars:
+ files:
+ - "{{ ansible_os_family | lower }}.yml"
+ - "default.yml"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml
new file mode 100644
index 000000000..32f372d0f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml
@@ -0,0 +1,4 @@
+- name: delete temporary directory (windows)
+ ansible.windows.win_file:
+ path: '{{ remote_tmp_dir }}'
+ state: absent
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml
new file mode 100644
index 000000000..317c146db
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml
@@ -0,0 +1,10 @@
+- name: create temporary directory
+ register: remote_tmp_dir
+ notify:
+ - delete temporary directory (windows)
+ ansible.windows.win_tempfile:
+ state: directory
+ suffix: .test
+- name: record temporary directory
+ set_fact:
+ remote_tmp_dir: '{{ remote_tmp_dir.path }}'
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py
new file mode 100644
index 000000000..827856386
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+"""
+Reads an OpenSSH Public key and spits out the 'AWS' MD5 sum
+The equivalent of
+
+ssh-keygen -f id_rsa.pub -e -m PKCS8 | openssl pkey -pubin -outform DER | openssl md5 -c | cut -f 2 -d ' '
+
+(but without needing the OpenSSL CLI)
+"""
+
+
+import hashlib
+import sys
+from cryptography.hazmat.primitives import serialization
+
+if len(sys.argv) == 0:
+ ssh_public_key = "id_rsa.pub"
+else:
+ ssh_public_key = sys.argv[1]
+
+with open(ssh_public_key, "rb") as key_file:
+ public_key = serialization.load_ssh_public_key(
+ key_file.read(),
+ )
+pub_der = public_key.public_bytes(
+ encoding=serialization.Encoding.DER,
+ format=serialization.PublicFormat.SubjectPublicKeyInfo,
+)
+md5digest = hashlib.md5(pub_der).hexdigest()
+# Format the md5sum into the normal format
+pairs = zip(md5digest[::2], md5digest[1::2])
+md5string = ":".join(["".join(pair) for pair in pairs])
+
+print(md5string)
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/meta/main.yml
new file mode 100644
index 000000000..32cf5dda7
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/tasks/main.yml
new file mode 100644
index 000000000..31bd2176e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/tasks/main.yml
@@ -0,0 +1,71 @@
+# (c) 2014, James Laska <jlaska@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: create a temp dir
+ tempfile:
+ state: directory
+ register: sshkey_dir
+ tags:
+ - prepare
+
+- name: ensure script is available
+ copy:
+ src: ec2-fingerprint.py
+ dest: '{{ sshkey_dir.path }}/ec2-fingerprint.py'
+ mode: 0700
+ tags:
+ - prepare
+
+- name: Set location of SSH keys
+ set_fact:
+ sshkey: '{{ sshkey_dir.path }}/key_one'
+ another_sshkey: '{{ sshkey_dir.path }}/key_two'
+ sshkey_pub: '{{ sshkey_dir.path }}/key_one.pub'
+ another_sshkey_pub: '{{ sshkey_dir.path }}/key_two.pub'
+
+- name: generate sshkey
+ shell: echo 'y' | ssh-keygen -P '' -f '{{ sshkey }}'
+ tags:
+ - prepare
+
+- name: record fingerprint
+ shell: '{{ sshkey_dir.path }}/ec2-fingerprint.py {{ sshkey_pub }}'
+ register: fingerprint
+ tags:
+ - prepare
+
+- name: generate another_sshkey
+ shell: echo 'y' | ssh-keygen -P '' -f {{ another_sshkey }}
+ tags:
+ - prepare
+
+- name: record another fingerprint
+ shell: '{{ sshkey_dir.path }}/ec2-fingerprint.py {{ another_sshkey_pub }}'
+ register: another_fingerprint
+ tags:
+ - prepare
+
+- name: set facts for future roles
+ set_fact:
+ # Public SSH keys (OpenSSH format)
+ key_material: "{{ lookup('file', sshkey_pub) }}"
+ another_key_material: "{{ lookup('file', another_sshkey_pub) }}"
+ # AWS 'fingerprint' (md5digest)
+ fingerprint: '{{ fingerprint.stdout }}'
+ another_fingerprint: '{{ another_fingerprint.stdout }}'
+ tags:
+ - prepare