summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/amazon/aws/tests
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
commita453ac31f3428614cceb99027f8efbdb9258a40b (patch)
treef61f87408f32a8511cbd91799f9cececb53e0374 /collections-debian-merged/ansible_collections/amazon/aws/tests
parentInitial commit. (diff)
downloadansible-a453ac31f3428614cceb99027f8efbdb9258a40b.tar.xz
ansible-a453ac31f3428614cceb99027f8efbdb9258a40b.zip
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collections-debian-merged/ansible_collections/amazon/aws/tests')
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/.gitignore3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/requirements.txt3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/main.yml26
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/tests.yml183
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/tasks/main.yaml18
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/files/hello.txt1
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/files/test.pngbin0 -> 99 bytes
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/meta/main.yml0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/tasks/main.yml663
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/templates/put-template.txt.j22
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/defaults/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/cf_template.json37
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/main.yml463
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2/defaults/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2/tasks/main.yml208
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/defaults/main.yml11
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/tasks/main.yml490
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/vars/main.yml20
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/tasks/main.yml334
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/vars/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/defaults/main.yml14
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/main.yaml166
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_attachment.yaml204
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_deletion.yaml92
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml219
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml267
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml166
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml74
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml213
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/defaults/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/credential_tests.yml161
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/data_validation.yml33
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/diff_mode.yml167
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/ec2_classic.yml86
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/egress_tests.yml177
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/group_info.yml96
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/ipv6_default_tests.yml90
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/main.yml1460
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/multi_account.yml124
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/multi_nested_target.yml213
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/numeric_protos.yml60
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/rule_group_create.yml126
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/meta/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/tasks/main.yml137
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml141
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml70
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml16
-rwxr-xr-xcollections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/runme.sh22
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/templates/inventory.j220
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/defaults/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/main.yml277
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/defaults/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/tasks/main.yml143
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/vars/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/defaults/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml27
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/tests.yml546
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml931
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/defaults/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/tasks/main.yml1318
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/defaults/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/tasks/main.yml692
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml11
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml9
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml63
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml51
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml31
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml9
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml18
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml86
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml61
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml74
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml68
-rwxr-xr-xcollections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/runme.sh37
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j214
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j214
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j215
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j222
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/test.aws_ec2.yml0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml11
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml9
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml56
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml9
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml18
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml76
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml64
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml66
-rwxr-xr-xcollections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/runme.sh35
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory.j210
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j213
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j217
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/test.aws_rds.yml0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/inventory6
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/amazonroot.pem20
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/isrg-x1.pem31
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py46
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml202
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml281
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml123
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml12
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml57
-rwxr-xr-xcollections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/runme.sh16
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/setup.yml40
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/boto_config.j25
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/session_credentials.yml.j23
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/connect_to_aws.yml8
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/ec2_connect.yml8
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/inventory6
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/files/amazonroot.pem20
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/files/isrg-x1.pem31
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/library/example_module.py59
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/meta/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/tasks/credentials.yml212
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/tasks/endpoints.yml105
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/tasks/main.yml12
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/tasks/profiles.yml63
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/files/amazonroot.pem20
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/files/isrg-x1.pem31
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/library/example_module.py51
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/meta/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/tasks/credentials.yml212
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/tasks/endpoints.yml119
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/tasks/main.yml12
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/tasks/profiles.yml63
-rwxr-xr-xcollections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/runme.sh17
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/setup.yml40
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/templates/boto_config.j25
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/templates/session_credentials.yml.j23
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/inventory6
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py39
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml36
-rwxr-xr-xcollections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/runme.sh8
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/prepare_tests/tasks/main.yml0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/inventory13
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/main.yml12
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/meta/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml146
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml54
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml88
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml88
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml20
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml26
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml114
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml65
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml256
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json12
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json12
-rwxr-xr-xcollections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/runme.sh12
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2/defaults/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2/tasks/common.yml119
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2/vars/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml5
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml11
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml4
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml10
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/tasks/main.yml55
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/requirements.yml4
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/sanity/ignore-2.10.txt4
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/sanity/ignore-2.11.txt4
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/sanity/ignore-2.9.txt17
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/compat/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/compat/builtins.py33
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/compat/mock.py122
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/compat/unittest.py38
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/loader.py116
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/path.py8
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/procenv.py90
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/vault_helper.py39
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/yaml_helper.py124
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/conftest.py72
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/ansible_aws_module/test_fail_json_aws.py321
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/test_is_boto3_error_code.py271
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/test_is_boto3_error_message.py164
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/test_scrub_none_parameters.py56
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/ec2/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/ec2/test_aws.py103
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/ec2/test_compare_policies.py341
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/test_ec2.py191
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py43
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/test_iam.py296
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/inventory/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/inventory/test_aws_ec2.py189
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/lookup/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/lookup/fixtures/avi.json104
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/lookup/test_aws_secret.py218
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/lookup/test_aws_ssm.py166
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/conftest.py31
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/a.pem31
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/b.pem47
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.0.cert121
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.1.cert69
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.2.cert113
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.3.cert124
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.4.cert86
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-4.cert121
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/simple-chain-a.cert18
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/simple-chain-b.cert18
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/thezip.zipbin0 -> 162 bytes
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/.gitkeep0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.CreateStack_1.json17
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DeleteStack_1.json16
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_1.json38
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_2.json80
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_3.json80
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_4.json80
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_5.json80
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_6.json100
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_7.json119
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_1.json40
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_2.json39
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_3.json39
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_4.json39
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_5.json39
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_6.json39
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_7.json45
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.CreateStack_1.json17
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DeleteStack_1.json16
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_1.json39
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_2.json83
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_3.json83
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_4.json83
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_5.json83
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_6.json104
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_7.json124
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_1.json40
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_2.json39
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_3.json39
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_4.json39
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_5.json39
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_6.json39
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_7.json45
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_1.json22
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_2.json22
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStacks_1.json22
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/get_nonexistent_stack/cloudformation.DescribeStacks_1.json22
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/invalid_template_json/cloudformation.CreateStack_1.json22
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.CreateStack_1.json17
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_1.json38
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_2.json101
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_3.json121
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_4.json180
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_5.json180
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_1.json42
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_2.json41
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_3.json52
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_4.json51
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_5.json50
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.CreateStack_1.json17
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DeleteStack_1.json16
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_1.json38
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_2.json121
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_1.json42
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_2.json42
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.CreateStack_1.json17
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DeleteStack_1.json16
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_1.json38
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_2.json121
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_3.json180
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_1.json42
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_2.json52
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_3.json51
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_aws_s3.py38
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_cloudformation.py213
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_group.py83
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/utils.py50
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/requirements.txt2
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/utils/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/amazon/aws/tests/unit/utils/amazon_placebo_fixtures.py213
-rwxr-xr-xcollections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/aws.sh19
-rwxr-xr-xcollections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/check_matrix.py120
-rwxr-xr-xcollections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/sanity.sh14
-rwxr-xr-xcollections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/shippable.sh172
-rwxr-xr-xcollections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/timing.py16
-rwxr-xr-xcollections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/timing.sh5
-rwxr-xr-xcollections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/units.sh11
333 files changed, 23801 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/.gitignore b/collections-debian-merged/ansible_collections/amazon/aws/tests/.gitignore
new file mode 100644
index 00000000..e17f1f38
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/.gitignore
@@ -0,0 +1,3 @@
+output/
+integration/cloud-config-aws.ini
+integration/cloud-config-aws.yml
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/requirements.txt b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/requirements.txt
new file mode 100644
index 00000000..4f1c4feb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/requirements.txt
@@ -0,0 +1,3 @@
+# netaddr is needed for ansible.netcommon.ipv6
+netaddr
+virtualenv
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/aliases
new file mode 100644
index 00000000..157ce0c9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group3
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/main.yml
new file mode 100644
index 00000000..2fe745f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/main.yml
@@ -0,0 +1,5 @@
+- hosts: localhost
+ connection: local
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+ - include_tasks: 'tasks/main.yml'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/main.yml
new file mode 100644
index 00000000..ba92416e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/main.yml
@@ -0,0 +1,26 @@
+- set_fact:
+ virtualenv: "{{ remote_tmp_dir }}/virtualenv"
+ virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
+
+- set_fact:
+ virtualenv_interpreter: "{{ virtualenv }}/bin/python"
+
+- pip:
+ name: virtualenv
+
+- pip:
+ name:
+ - 'botocore>=1.13.0'
+ - boto3
+ - coverage<5
+ virtualenv: "{{ virtualenv }}"
+ virtualenv_command: "{{ virtualenv_command }}"
+ virtualenv_site_packages: no
+
+- include_tasks: tests.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+
+- file:
+ path: "{{ virtualenv }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/tests.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/tests.yml
new file mode 100644
index 00000000..8e8935f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/tests.yml
@@ -0,0 +1,183 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key | default(omit) }}'
+ aws_secret_key: '{{ aws_secret_key | default(omit) }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region | default(omit) }}'
+
+ block:
+ - name: 'List available AZs in current Region'
+ aws_az_info:
+ register: region_azs
+
+ - name: check task return attributes
+ vars:
+ first_az: '{{ region_azs.availability_zones[0] }}'
+ assert:
+ that:
+ - region_azs is successful
+ - '"availability_zones" in region_azs'
+ - '"group_name" in first_az'
+ - '"messages" in first_az'
+ - '"network_border_group" in first_az'
+ - '"opt_in_status" in first_az'
+ - '"region_name" in first_az'
+ - '"state" in first_az'
+ - '"zone_id" in first_az'
+ - '"zone_name" in first_az'
+ - '"zone_type" in first_az'
+
+ - name: 'List available AZs in current Region - check_mode'
+ aws_az_info:
+ check_mode: yes
+ register: check_azs
+
+ - name: check task return attributes
+ vars:
+ first_az: '{{ check_azs.availability_zones[0] }}'
+ assert:
+ that:
+ - check_azs is successful
+ - '"availability_zones" in check_azs'
+ - '"group_name" in first_az'
+ - '"messages" in first_az'
+ - '"network_border_group" in first_az'
+ - '"opt_in_status" in first_az'
+ - '"region_name" in first_az'
+ - '"state" in first_az'
+ - '"zone_id" in first_az'
+ - '"zone_name" in first_az'
+ - '"zone_type" in first_az'
+
+
+ # Be specific - aws_region isn't guaranteed to be any specific value
+ - name: 'List Available AZs in us-east-1'
+ aws_az_info:
+ region: 'us-east-1'
+ register: us_east_1
+
+ - name: 'Check that an AZ from us-east-1 has valid looking attributes'
+ vars:
+ first_az: '{{ us_east_1.availability_zones[0] }}'
+ assert:
+ that:
+ - us_east_1 is successful
+ - '"availability_zones" in us_east_1'
+ - '"group_name" in first_az'
+ - '"messages" in first_az'
+ - '"network_border_group" in first_az'
+ - '"opt_in_status" in first_az'
+ - '"region_name" in first_az'
+ - '"state" in first_az'
+ - '"zone_id" in first_az'
+ - '"zone_name" in first_az'
+ - '"zone_type" in first_az'
+ - first_az.group_name.startswith('us-east-1')
+ - first_az.network_border_group.startswith('us-east-1')
+ - first_az.region_name == 'us-east-1'
+ - first_az.zone_id.startswith('use1-az')
+ - not first_az.zone_id == "use1-az"
+ - first_az.zone_name.startswith('us-east-1')
+ - not first_az.zone_name == 'us-east-1'
+ - first_az.zone_type == 'availability-zone'
+
+ - name: 'Filter Available AZs in us-west-2 using - ("zone-name")'
+ aws_az_info:
+ region: 'us-west-2'
+ filters:
+ zone-name: 'us-west-2c'
+ register: us_west_2
+
+ - name: 'Check that an AZ from us-west-2 has attributes we expect'
+ vars:
+ first_az: '{{ us_west_2.availability_zones[0] }}'
+ assert:
+ that:
+ - us_west_2 is successful
+ - '"availability_zones" in us_west_2'
+ - us_west_2.availability_zones | length == 1
+ - '"group_name" in first_az'
+ - '"messages" in first_az'
+ - '"network_border_group" in first_az'
+ - '"opt_in_status" in first_az'
+ - '"region_name" in first_az'
+ - '"state" in first_az'
+ - '"zone_id" in first_az'
+ - '"zone_name" in first_az'
+ - '"zone_type" in first_az'
+ - first_az.group_name == 'us-west-2'
+ - first_az.network_border_group == 'us-west-2'
+ - first_az.region_name == 'us-west-2'
+ # AZs are mapped to the 'real' AZs on a per-account basis
+ - first_az.zone_id.startswith('usw2-az')
+ - not first_az.zone_id == 'usw2-az'
+ - first_az.zone_name == 'us-west-2c'
+ - first_az.zone_type == 'availability-zone'
+
+ - name: 'Filter Available AZs in eu-central-1 using _ ("zone_name")'
+ aws_az_info:
+ region: 'eu-central-1'
+ filters:
+ zone_name: 'eu-central-1b'
+ register: eu_central_1
+
+ - name: 'Check that eu-central-1b has the attributes we expect'
+ vars:
+ first_az: '{{ eu_central_1.availability_zones[0] }}'
+ assert:
+ that:
+ - eu_central_1 is successful
+ - '"availability_zones" in eu_central_1'
+ - eu_central_1.availability_zones | length == 1
+ - '"group_name" in first_az'
+ - '"messages" in first_az'
+ - '"network_border_group" in first_az'
+ - '"opt_in_status" in first_az'
+ - '"region_name" in first_az'
+ - '"state" in first_az'
+ - '"zone_id" in first_az'
+ - '"zone_name" in first_az'
+ - '"zone_type" in first_az'
+ - first_az.group_name == 'eu-central-1'
+ - first_az.network_border_group == 'eu-central-1'
+ - first_az.region_name == 'eu-central-1'
+ # AZs are mapped to the 'real' AZs on a per-account basis
+ - first_az.zone_id.startswith('euc1-az')
+ - not first_az.zone_id == "euc1-az"
+ - first_az.zone_name == 'eu-central-1b'
+ - first_az.zone_type == 'availability-zone'
+
+ - name: 'Filter Available AZs in eu-west-2 using _ and - ("zone_name" and "zone-name") : _ wins '
+ aws_az_info:
+ region: 'eu-west-2'
+ filters:
+ zone-name: 'eu-west-2a'
+ zone_name: 'eu-west-2c'
+ register: eu_west_2
+
+ - name: 'Check that we get the AZ specified by zone_name rather than zone-name'
+ vars:
+ first_az: '{{ eu_west_2.availability_zones[0] }}'
+ assert:
+ that:
+ - eu_west_2 is successful
+ - '"availability_zones" in eu_west_2'
+ - eu_west_2.availability_zones | length == 1
+ - '"group_name" in first_az'
+ - '"messages" in first_az'
+ - '"network_border_group" in first_az'
+ - '"opt_in_status" in first_az'
+ - '"region_name" in first_az'
+ - '"state" in first_az'
+ - '"zone_id" in first_az'
+ - '"zone_name" in first_az'
+ - '"zone_type" in first_az'
+ - first_az.group_name == 'eu-west-2'
+ - first_az.network_border_group == 'eu-west-2'
+ - first_az.region_name == 'eu-west-2'
+ # AZs are mapped to the 'real' AZs on a per-account basis
+ - first_az.zone_id.startswith('euw2-az')
+ - not first_az.zone_id == "euw2-az"
+ - first_az.zone_name == 'eu-west-2c'
+ - first_az.zone_type == 'availability-zone'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/tasks/main.yaml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/tasks/main.yaml
new file mode 100644
index 00000000..c40d0f11
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/tasks/main.yaml
@@ -0,0 +1,18 @@
+- module_defaults:
+ group/aws:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ block:
+ - name: retrieve caller facts
+ aws_caller_info:
+ register: result
+
+ - name: assert correct keys are returned
+ assert:
+ that:
+ - result.account is not none
+ - result.arn is not none
+ - result.user_id is not none
+ - result.account_alias is not none
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/aliases
new file mode 100644
index 00000000..72a9fb4f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group4
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/defaults/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/defaults/main.yml
new file mode 100644
index 00000000..eb7dd2d3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# defaults file for s3
+bucket_name: '{{resource_prefix}}'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/files/hello.txt b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/files/hello.txt
new file mode 100644
index 00000000..8ab686ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/files/hello.txt
@@ -0,0 +1 @@
+Hello, World!
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/files/test.png b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/files/test.png
new file mode 100644
index 00000000..1dc64bab
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/files/test.png
Binary files differ
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/meta/main.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/meta/main.yml
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/tasks/main.yml
new file mode 100644
index 00000000..5d811ce1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/tasks/main.yml
@@ -0,0 +1,663 @@
+---
+# Integration tests for aws_s3
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+
+ block:
+ - name: Create temporary directory
+ tempfile:
+ state: directory
+ register: tmpdir
+
+ - name: Create content
+ set_fact:
+ content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}"
+
+ - name: test create bucket without permissions
+ module_defaults: { group/aws: {} }
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: create
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - "result.msg != 'MODULE FAILURE'"
+
+ - name: test create bucket
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: create
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: trying to create a bucket name that already exists
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: create
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: Create local upload.txt
+ copy:
+ content: "{{ content }}"
+ dest: "{{ tmpdir.path }}/upload.txt"
+
+ - name: stat the file
+ stat:
+ path: "{{ tmpdir.path }}/upload.txt"
+ get_checksum: yes
+ register: upload_file
+
+ - name: test putting an object in the bucket
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ tmpdir.path }}/upload.txt"
+ object: delete.txt
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.msg == "PUT operation complete"
+
+ - name: test using aws_s3 with async
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ tmpdir.path }}/upload.txt"
+ object: delete.txt
+ register: test_async
+ async: 30
+ poll: 0
+
+ - name: ensure it completed
+ async_status:
+ jid: "{{ test_async.ansible_job_id }}"
+ register: status
+ until: status is finished
+ retries: 10
+
+ - name: test put with overwrite=different and unmodified object
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ tmpdir.path }}/upload.txt"
+ object: delete.txt
+ overwrite: different
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: check that roles file lookups work as expected
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: hello.txt
+ object: delete.txt
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.msg == "PUT operation complete"
+
+ - name: test put with overwrite=never
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ tmpdir.path }}/upload.txt"
+ object: delete.txt
+ overwrite: never
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: test put with overwrite=different and modified object
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ tmpdir.path }}/upload.txt"
+ object: delete.txt
+ overwrite: different
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: test put with overwrite=always
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ tmpdir.path }}/upload.txt"
+ object: delete.txt
+ overwrite: always
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: test get object
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ tmpdir.path }}/download.txt"
+ object: delete.txt
+ retries: 3
+ delay: 3
+ register: result
+ until: "result.msg == 'GET operation complete'"
+
+ - name: stat the file so we can compare the checksums
+ stat:
+ path: "{{ tmpdir.path }}/download.txt"
+ get_checksum: yes
+ register: download_file
+
+ - assert:
+ that:
+ - upload_file.stat.checksum == download_file.stat.checksum
+
+ - name: test get with overwrite=different and identical files
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ tmpdir.path }}/download.txt"
+ object: delete.txt
+ overwrite: different
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: modify destination
+ copy:
+ dest: "{{ tmpdir.path }}/download.txt"
+ src: hello.txt
+
+ - name: test get with overwrite=never
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ tmpdir.path }}/download.txt"
+ object: delete.txt
+ overwrite: never
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: test get with overwrite=different and modified file
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ tmpdir.path }}/download.txt"
+ object: delete.txt
+ overwrite: different
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: test get with overwrite=always
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ tmpdir.path }}/download.txt"
+ object: delete.txt
+ overwrite: always
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: test geturl of the object
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: geturl
+ object: delete.txt
+ retries: 3
+ delay: 3
+ register: result
+ until: result is changed
+
+ - assert:
+ that:
+ - "'Download url:' in result.msg"
+ - result is changed
+
+ - name: test getstr of the object
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: getstr
+ object: delete.txt
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result.msg == "GET operation complete"
+ - result.contents == content
+
+ - name: test list to get all objects in the bucket
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: list
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - "'delete.txt' in result.s3_keys"
+ - result.msg == "LIST operation complete"
+
+ - name: test delobj to just delete an object in the bucket
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: delobj
+ object: delete.txt
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - "'Object deleted from bucket' in result.msg"
+ - result is changed
+
+ - name: test putting an encrypted object in the bucket
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ tmpdir.path }}/upload.txt"
+ encrypt: yes
+ object: delete_encrypt.txt
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.msg == "PUT operation complete"
+
+ - name: test get encrypted object
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ tmpdir.path }}/download_encrypted.txt"
+ object: delete_encrypt.txt
+ retries: 3
+ delay: 3
+ register: result
+ until: "result.msg == 'GET operation complete'"
+
+ - name: stat the file so we can compare the checksums
+ stat:
+ path: "{{ tmpdir.path }}/download_encrypted.txt"
+ get_checksum: yes
+ register: download_file
+
+ - assert:
+ that:
+ - upload_file.stat.checksum == download_file.stat.checksum
+
+ - name: delete encrypted file
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: delobj
+ object: delete_encrypt.txt
+ retries: 3
+ delay: 3
+
+ - name: test putting an aws:kms encrypted object in the bucket
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ tmpdir.path }}/upload.txt"
+ encrypt: yes
+ encryption_mode: aws:kms
+ object: delete_encrypt_kms.txt
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.msg == "PUT operation complete"
+
+ - name: test get KMS encrypted object
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ tmpdir.path }}/download_kms.txt"
+ object: delete_encrypt_kms.txt
+ retries: 3
+ delay: 3
+ register: result
+ until: "result.msg == 'GET operation complete'"
+
+ - name: get the stat of the file so we can compare the checksums
+ stat:
+ path: "{{ tmpdir.path }}/download_kms.txt"
+ get_checksum: yes
+ register: download_file
+
+ - assert:
+ that:
+ - upload_file.stat.checksum == download_file.stat.checksum
+
+ # FIXME - could use a test that checks uploaded file is *actually* aws:kms encrypted
+
+ - name: delete KMS encrypted file
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: delobj
+ object: delete_encrypt_kms.txt
+ retries: 3
+ delay: 3
+
+ # FIXME: could use a test that checks non standard KMS key
+ # but that would require ability to create and remove such keys.
+ # PRs exist for that, but propose deferring until after merge.
+
+ - name: test creation of empty path
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: create
+ object: foo/bar/baz/
+ retries: 3
+ delay: 3
+ register: result
+
+ - assert:
+ that:
+ - "'Virtual directory foo/bar/baz/ created' in result.msg"
+ - result is changed
+
+ - name: test deletion of empty path
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: delobj
+ object: foo/bar/baz/
+ retries: 3
+ delay: 3
+
+ - name: test delete bucket
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: delete
+ register: result
+ retries: 3
+ delay: 3
+ until: result is changed
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: test create a bucket with a dot in the name
+ aws_s3:
+ bucket: "{{ bucket_name + '.bucket' }}"
+ mode: create
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: test delete a bucket with a dot in the name
+ aws_s3:
+ bucket: "{{ bucket_name + '.bucket' }}"
+ mode: delete
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: test delete a nonexistent bucket
+ aws_s3:
+ bucket: "{{ bucket_name + '.bucket' }}"
+ mode: delete
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: make tempfile 4 GB for OSX
+ command:
+ _raw_params: "dd if=/dev/zero of={{ tmpdir.path }}/largefile bs=1m count=4096"
+ when: ansible_distribution == 'MacOSX'
+
+ - name: make tempfile 4 GB for linux
+ command:
+ _raw_params: "dd if=/dev/zero of={{ tmpdir.path }}/largefile bs=1M count=4096"
+ when: ansible_system == 'Linux'
+
+ - name: test multipart download - platform specific
+ block:
+ - name: make a bucket to upload the file
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: create
+
+ - name: upload the file to the bucket
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: put
+ src: "{{ tmpdir.path }}/largefile"
+ object: multipart.txt
+
+ - name: download file once
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ tmpdir.path }}/download.txt"
+ object: multipart.txt
+ overwrite: different
+ retries: 3
+ delay: 3
+ until: "result.msg == 'GET operation complete'"
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: download file again
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ tmpdir.path }}/download.txt"
+ object: multipart.txt
+ overwrite: different
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+ when: ansible_system == 'Linux' or ansible_distribution == 'MacOSX'
+
+ - name: create an object from static content
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ object: put-content.txt
+ mode: put
+ content: >-
+ test content
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: ensure idempotency on static content
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ object: put-content.txt
+ mode: put
+ overwrite: different
+ content: >-
+ test content
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: fetch test content
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: getstr
+ object: put-content.txt
+ register: result
+
+ - assert:
+ that:
+ - result.contents == "test content"
+
+ - set_fact:
+ put_template_text: test template
+
+ - name: create an object from a template
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ object: put-template.txt
+ mode: put
+ content: "{{ lookup('template', 'templates/put-template.txt.j2') }}"
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: fetch template content
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: getstr
+ object: put-template.txt
+ register: result
+
+ - assert:
+ that:
+ - result.contents == "{{ lookup('template', 'templates/put-template.txt.j2') }}"
+
+ # at present, there is no lookup that can process binary data, so we use slurp instead
+ - slurp:
+ src: "{{ role_path }}/files/test.png"
+ register: put_binary
+
+ - name: create an object from binary data
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ object: put-binary.bin
+ mode: put
+ content_base64: "{{ put_binary.content }}"
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: fetch binary content
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: get
+ dest: "{{ tmpdir.path }}/download_binary.bin"
+ object: put-binary.bin
+ register: result
+
+ - name: stat the files so we can compare the checksums
+ stat:
+ path: "{{ item }}"
+ get_checksum: yes
+ loop:
+ - "{{ role_path }}/files/test.png"
+ - "{{ tmpdir.path }}/download_binary.bin"
+ register: binary_files
+
+ - assert:
+ that:
+ - binary_files.results[0].stat.checksum == binary_files.results[1].stat.checksum
+
+ always:
+ - name: remove uploaded files
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: delobj
+ object: "{{ item }}"
+ loop:
+ - hello.txt
+ - delete.txt
+ - delete_encrypt.txt
+ - delete_encrypt_kms.txt
+ - put-content.txt
+ - put-template.txt
+ - put-binary.txt
+ ignore_errors: yes
+
+ - name: delete temporary files
+ file:
+ state: absent
+ path: "{{ tmpdir.path }}"
+ ignore_errors: yes
+
+ - name: delete the bucket
+ aws_s3:
+ bucket: "{{ bucket_name }}"
+ mode: delete
+ ignore_errors: yes
+
+ - name: delete the dot bucket
+ aws_s3:
+ bucket: "{{ bucket_name + '.bucket' }}"
+ mode: delete
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/templates/put-template.txt.j2 b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/templates/put-template.txt.j2
new file mode 100644
index 00000000..2a75e9f2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/aws_s3/templates/put-template.txt.j2
@@ -0,0 +1,2 @@
+template:
+{{ put_template_text }}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/aliases
new file mode 100644
index 00000000..55555be7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+shippable/aws/group2
+cloudformation_info
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/defaults/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/defaults/main.yml
new file mode 100644
index 00000000..aaf0ca7e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/defaults/main.yml
@@ -0,0 +1,8 @@
+stack_name: "{{ resource_prefix }}"
+
+vpc_name: '{{ resource_prefix }}-vpc'
+vpc_seed: '{{ resource_prefix }}'
+vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
+subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
+
+ec2_ami_name: 'amzn2-ami-hvm-2.*-x86_64-gp2'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/cf_template.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/cf_template.json
new file mode 100644
index 00000000..ff4c5693
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/files/cf_template.json
@@ -0,0 +1,37 @@
+{
+ "AWSTemplateFormatVersion" : "2010-09-09",
+
+ "Description" : "Create an Amazon EC2 instance.",
+
+ "Parameters" : {
+ "InstanceType" : {
+ "Description" : "EC2 instance type",
+ "Type" : "String",
+ "Default" : "t3.nano",
+ "AllowedValues" : [ "t3.micro", "t3.nano"]
+ },
+ "ImageId" : {
+ "Type" : "String"
+ },
+ "SubnetId" : {
+ "Type" : "String"
+ }
+ },
+
+ "Resources" : {
+ "EC2Instance" : {
+ "Type" : "AWS::EC2::Instance",
+ "Properties" : {
+ "InstanceType" : { "Ref" : "InstanceType" },
+ "ImageId" : { "Ref" : "ImageId" },
+ "SubnetId": { "Ref" : "SubnetId" }
+ }
+ }
+ },
+
+ "Outputs" : {
+ "InstanceId" : {
+ "Value" : { "Ref" : "EC2Instance" }
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/main.yml
new file mode 100644
index 00000000..9b89722b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/main.yml
@@ -0,0 +1,463 @@
+---
+
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key | default(omit) }}'
+ aws_secret_key: '{{ aws_secret_key | default(omit) }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region | default(omit) }}'
+
+ block:
+
+ # ==== Env setup ==========================================================
+ - name: list available AZs
+ aws_az_info:
+ register: region_azs
+
+ - name: pick an AZ for testing
+ set_fact:
+ availability_zone: "{{ region_azs.availability_zones[0].zone_name }}"
+
+ - name: Create a test VPC
+ ec2_vpc_net:
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: Cloudformation testing
+ register: testing_vpc
+
+ - name: Create a test subnet
+ ec2_vpc_subnet:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ register: testing_subnet
+
+ - name: Find AMI to use
+ ec2_ami_info:
+ owners: 'amazon'
+ filters:
+ name: '{{ ec2_ami_name }}'
+ register: ec2_amis
+
+ - name: Set fact with latest AMI
+ vars:
+ latest_ami: '{{ ec2_amis.images | sort(attribute="creation_date") | last }}'
+ set_fact:
+ ec2_ami_image: '{{ latest_ami.image_id }}'
+
+ # ==== Cloudformation tests ===============================================
+
+ # 1. Basic stack creation (check mode, actual run and idempotency)
+ # 2. Tags
+ # 3. cloudformation_info tests (basic + all_facts)
+ # 4. termination_protection
+ # 5. create_changeset + changeset_name
+
+ # There is still scope to add tests for -
+ # 1. capabilities
+ # 2. stack_policy
+ # 3. on_create_failure (covered in unit tests)
+ # 4. Passing in a role
+ # 5. nested stacks?
+
+
+ - name: create a cloudformation stack (check mode)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+ check_mode: yes
+
+ - name: check task return attributes
+ assert:
+ that:
+ - cf_stack.changed
+ - "'msg' in cf_stack and 'New stack would be created' in cf_stack.msg"
+
+ - name: create a cloudformation stack
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - cf_stack.changed
+ - "'events' in cf_stack"
+ - "'output' in cf_stack and 'Stack CREATE complete' in cf_stack.output"
+ - "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs"
+ - "'stack_resources' in cf_stack"
+
+ - name: create a cloudformation stack (check mode) (idempotent)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+ check_mode: yes
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not cf_stack.changed
+
+ - name: create a cloudformation stack (idempotent)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not cf_stack.changed
+ - "'output' in cf_stack and 'Stack is already up-to-date.' in cf_stack.output"
+ - "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs"
+ - "'stack_resources' in cf_stack"
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "'cloudformation' in stack_info"
+ - "stack_info.cloudformation | length == 1"
+ - "stack_name in stack_info.cloudformation"
+ - "'stack_description' in stack_info.cloudformation[stack_name]"
+ - "'stack_outputs' in stack_info.cloudformation[stack_name]"
+ - "'stack_parameters' in stack_info.cloudformation[stack_name]"
+ - "'stack_tags' in stack_info.cloudformation[stack_name]"
+ - "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name"
+
+ - name: get stack details (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "'cloudformation' in stack_info"
+ - "stack_info.cloudformation | length == 1"
+ - "stack_name in stack_info.cloudformation"
+ - "'stack_description' in stack_info.cloudformation[stack_name]"
+ - "'stack_outputs' in stack_info.cloudformation[stack_name]"
+ - "'stack_parameters' in stack_info.cloudformation[stack_name]"
+ - "'stack_tags' in stack_info.cloudformation[stack_name]"
+ - "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name"
+
+ - name: get stack details (all_facts)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ all_facts: yes
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "'stack_events' in stack_info.cloudformation[stack_name]"
+ - "'stack_policy' in stack_info.cloudformation[stack_name]"
+ - "'stack_resource_list' in stack_info.cloudformation[stack_name]"
+ - "'stack_resources' in stack_info.cloudformation[stack_name]"
+ - "'stack_template' in stack_info.cloudformation[stack_name]"
+
+ - name: get stack details (all_facts) (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ all_facts: yes
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "'stack_events' in stack_info.cloudformation[stack_name]"
+ - "'stack_policy' in stack_info.cloudformation[stack_name]"
+ - "'stack_resource_list' in stack_info.cloudformation[stack_name]"
+ - "'stack_resources' in stack_info.cloudformation[stack_name]"
+ - "'stack_template' in stack_info.cloudformation[stack_name]"
+
+ # ==== Cloudformation tests (create changeset) ============================
+
+ # try to create a changeset by changing instance type
+ - name: create a changeset
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ create_changeset: yes
+ changeset_name: "test-changeset"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.micro"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: create_changeset_result
+
+ - name: assert changeset created
+ assert:
+ that:
+ - "create_changeset_result.changed"
+ - "'change_set_id' in create_changeset_result"
+ - "'Stack CREATE_CHANGESET complete' in create_changeset_result.output"
+
+ - name: get stack details with changesets
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ stack_change_sets: True
+ register: stack_info
+
+ - name: assert changesets in info
+ assert:
+ that:
+ - "'stack_change_sets' in stack_info.cloudformation[stack_name]"
+
+ - name: get stack details with changesets (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ stack_change_sets: True
+ register: stack_info
+ check_mode: yes
+
+ - name: assert changesets in info
+ assert:
+ that:
+ - "'stack_change_sets' in stack_info.cloudformation[stack_name]"
+
+ # try to create an empty changeset by passing in unchanged template
+ - name: create a changeset
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ create_changeset: yes
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: create_changeset_result
+
+ - name: assert changeset created
+ assert:
+ that:
+ - "not create_changeset_result.changed"
+ - "'The created Change Set did not contain any changes to this stack and was deleted.' in create_changeset_result.output"
+
+ # ==== Cloudformation tests (termination_protection) ======================
+
+ - name: set termination protection to true
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ termination_protection: yes
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+# This fails - #65592
+# - name: check task return attributes
+# assert:
+# that:
+# - cf_stack.changed
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
+
+ - name: get stack details (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
+
+ - name: set termination protection to false
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ termination_protection: no
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+# This fails - #65592
+# - name: check task return attributes
+# assert:
+# that:
+# - cf_stack.changed
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
+
+ - name: get stack details (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
+
+ # ==== Cloudformation tests (delete stack tests) ==========================
+
+ - name: delete cloudformation stack (check mode)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ check_mode: yes
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - cf_stack.changed
+ - "'msg' in cf_stack and 'Stack would be deleted' in cf_stack.msg"
+
+ - name: delete cloudformation stack
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - cf_stack.changed
+ - "'output' in cf_stack and 'Stack Deleted' in cf_stack.output"
+
+ - name: delete cloudformation stack (check mode) (idempotent)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ check_mode: yes
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not cf_stack.changed
+ - "'msg' in cf_stack"
+ - >-
+ "Stack doesn't exist" in cf_stack.msg
+
+ - name: delete cloudformation stack (idempotent)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not cf_stack.changed
+ - "'output' in cf_stack and 'Stack not found.' in cf_stack.output"
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "not stack_info.cloudformation"
+
+ - name: get stack details (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "not stack_info.cloudformation"
+
+ # ==== Cleanup ============================================================
+
+ always:
+
+ - name: delete stack
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ ignore_errors: yes
+
+ - name: Delete test subnet
+ ec2_vpc_subnet:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_cidr }}"
+ state: absent
+ ignore_errors: yes
+
+ - name: Delete test VPC
+ ec2_vpc_net:
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2/aliases
new file mode 100644
index 00000000..72a9fb4f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group4
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2/defaults/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2/defaults/main.yml
new file mode 100644
index 00000000..cc65a725
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16'
+subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24'
+ec2_ami_name: 'amzn2-ami-hvm-2.*-x86_64-gp2'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2/tasks/main.yml
new file mode 100644
index 00000000..624ddbb7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2/tasks/main.yml
@@ -0,0 +1,208 @@
+---
+- module_defaults:
+ group/aws:
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key | default(omit) }}'
+ aws_secret_key: '{{ aws_secret_key | default(omit) }}'
+ security_token: '{{ security_token | default(omit) }}'
+ collections:
+ - community.aws
+
+ block:
+
+ # SETUP: vpc, ec2 key pair, subnet, security group, ec2 instance
+ - name: list available AZs
+ aws_az_info:
+ register: region_azs
+
+ - name: pick an AZ for testing
+ set_fact:
+ availability_zone: "{{ region_azs.availability_zones[0].zone_name }}"
+
+ - name: create a VPC to work in
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ state: present
+ name: '{{ resource_prefix }}_setup'
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ register: setup_vpc
+
+ - name: create a key pair to use for creating an ec2 instance
+ ec2_key:
+ name: '{{ resource_prefix }}_setup'
+ state: present
+ register: setup_key
+
+ - name: create a subnet to use for creating an ec2 instance
+ ec2_vpc_subnet:
+ az: '{{ availability_zone }}'
+ tags: '{{ resource_prefix }}_setup'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: '{{ subnet_cidr }}'
+ state: present
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ register: setup_subnet
+
+ - name: create a security group to use for creating an ec2 instance
+ ec2_group:
+ name: '{{ resource_prefix }}_setup'
+ description: 'created by Ansible integration tests'
+ state: present
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ register: setup_sg
+
+ - name: Find AMI to use
+ ec2_ami_info:
+ owners: 'amazon'
+ filters:
+ name: '{{ ec2_ami_name }}'
+ register: ec2_amis
+
+ - name: Set fact with latest AMI
+ vars:
+ latest_ami: '{{ ec2_amis.images | sort(attribute="creation_date") | last }}'
+ set_fact:
+ ec2_ami_image: '{{ latest_ami.image_id }}'
+
+ # ============================================================
+
+ - name: test first instance is started
+ ec2:
+ instance_type: t2.micro
+ key_name: '{{ setup_key.key.name }}'
+ state: present
+ image: '{{ ec2_ami_image }}'
+ wait: yes
+ instance_tags:
+ ResourcePrefix: '{{ resource_prefix }}-integration_tests'
+ group_id: '{{ setup_sg.group_id }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+ register: test_instance_1
+
+ - name: test second instance is started
+ ec2:
+ instance_type: t2.micro
+ key_name: '{{ setup_key.key.name }}'
+ state: present
+ image: '{{ ec2_ami_image }}'
+ wait: yes
+ instance_tags:
+ ResourcePrefix: '{{ resource_prefix }}-another_tag'
+ group_id: '{{ setup_sg.group_id }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+ register: test_instance_2
+
+ - name: assert instances started
+ assert:
+ that:
+ - "test_instance_1.instances[0].state == 'running'"
+ - "test_instance_2.instances[0].state == 'running'"
+
+ - name: test first instance is terminated
+ ec2:
+ instance_ids: "{{ test_instance_1.instance_ids }}"
+ state: absent
+ wait: yes
+ register: result
+
+ - name: assert instance terminated
+ assert:
+ that:
+ - "result.instances[0].state == 'terminated'"
+
+ - name: test terminated instance is ignored when stopping
+ ec2:
+ instance_tags:
+ ResourcePrefix: '{{ resource_prefix }}-integration_tests'
+ state: stopped
+ wait: yes
+ register: result
+
+ - name: assert resource not changed
+ assert:
+ that:
+ - "result.changed == False"
+
+ - name: test second instance not terminated
+ ec2_instance_info:
+ instance_ids: "{{ test_instance_2.instance_ids }}"
+ register: result
+
+ - name: assert second instance still running
+ assert:
+ that:
+ - (result.instances|length) == 1
+ - "result.instances[0].state.name == 'running'"
+
+ - name: test second instance is stopped
+ ec2:
+ instance_ids: "{{ test_instance_2.instance_ids }}"
+ state: stopped
+ wait: yes
+ register: result
+
+ - name: assert second instance is stopped
+ assert:
+ that:
+ - "result.instances[0].state == 'stopped'"
+
+ # ========================================================
+
+ always:
+
+ # ============================================================
+
+
+ # TEAR DOWN: ec2 instance, ec2 key pair, security group, vpc
+ - name: Announce teardown start
+ debug:
+ msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****"
+
+ - name: get list of test instances
+ ec2_instance_info:
+ filters:
+ "tag:ResourcePrefix": "{{ resource_prefix }}-*"
+ register: test_instances
+
+ - name: delete test instances
+ ec2:
+ instance_ids: "{{ test_instances.instances|map(attribute='instance_id') }}"
+ state: absent
+ wait: yes
+ ignore_errors: yes
+
+ - name: remove setup keypair
+ ec2_key:
+ name: '{{resource_prefix}}_setup'
+ state: absent
+ ignore_errors: yes
+
+ - name: remove setup security group
+ ec2_group:
+ name: '{{ resource_prefix }}_setup'
+ description: 'created by Ansible integration tests'
+ state: absent
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ ignore_errors: yes
+
+ - name: remove setup subnet
+ ec2_vpc_subnet:
+ az: '{{ ec2_region }}a'
+ tags: '{{resource_prefix}}_setup'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: '{{ subnet_cidr }}'
+ state: absent
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ ignore_errors: yes
+
+ - name: remove setup VPC
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ state: absent
+ name: '{{ resource_prefix }}_setup'
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/aliases
new file mode 100644
index 00000000..d74e5f69
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+shippable/aws/group3
+ec2_ami_info
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/defaults/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/defaults/main.yml
new file mode 100644
index 00000000..65bcbbc0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+# defaults file for test_ec2_ami
+ec2_ami_name: '{{resource_prefix}}'
+ec2_ami_description: 'Created by ansible integration tests'
+# image for Amazon Linux AMI 2017.03.1 (HVM), SSD Volume Type
+ec2_ami_image:
+ us-east-1: ami-4fffc834
+ us-east-2: ami-ea87a78f
+
+vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16'
+subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/tasks/main.yml
new file mode 100644
index 00000000..38084ce2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/tasks/main.yml
@@ -0,0 +1,490 @@
+---
+# Test suite for ec2_ami
+- module_defaults:
+ group/aws:
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ block:
+
+ # ============================================================
+
+ # SETUP: vpc, ec2 key pair, subnet, security group, ec2 instance, snapshot
+ - name: create a VPC to work in
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ state: present
+ name: '{{ ec2_ami_name }}_setup'
+ resource_tags:
+ Name: '{{ ec2_ami_name }}_setup'
+ register: setup_vpc
+
+ - name: create a key pair to use for creating an ec2 instance
+ ec2_key:
+ name: '{{ ec2_ami_name }}_setup'
+ state: present
+ register: setup_key
+
+ - name: create a subnet to use for creating an ec2 instance
+ ec2_vpc_subnet:
+ az: '{{ ec2_region }}a'
+ tags: '{{ ec2_ami_name }}_setup'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: '{{ subnet_cidr }}'
+ state: present
+ resource_tags:
+ Name: '{{ ec2_ami_name }}_setup'
+ register: setup_subnet
+
+ - name: create a security group to use for creating an ec2 instance
+ ec2_group:
+ name: '{{ ec2_ami_name }}_setup'
+ description: 'created by Ansible integration tests'
+ state: present
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ register: setup_sg
+
+ - name: provision ec2 instance to create an image
+ ec2:
+ key_name: '{{ setup_key.key.name }}'
+ instance_type: t2.micro
+ state: present
+ image: '{{ ec2_region_images[ec2_region] }}'
+ wait: yes
+ instance_tags:
+ '{{ec2_ami_name}}_instance_setup': 'integration_tests'
+ group_id: '{{ setup_sg.group_id }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+ register: setup_instance
+
+ - name: take a snapshot of the instance to create an image
+ ec2_snapshot:
+ instance_id: '{{ setup_instance.instance_ids[0] }}'
+ device_name: /dev/xvda
+ state: present
+ register: setup_snapshot
+
+ # ============================================================
+
+ - name: test clean failure if not providing image_id or name with state=present
+ ec2_ami:
+ instance_id: '{{ setup_instance.instance_ids[0] }}'
+ state: present
+ description: '{{ ec2_ami_description }}'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ wait: yes
+ root_device_name: /dev/xvda
+ register: result
+ ignore_errors: yes
+
+ - name: assert error message is helpful
+ assert:
+ that:
+ - result.failed
+ - "result.msg == 'one of the following is required: name, image_id'"
+
+ # ============================================================
+
+ - name: create an image from the instance
+ ec2_ami:
+ instance_id: '{{ setup_instance.instance_ids[0] }}'
+ state: present
+ name: '{{ ec2_ami_name }}_ami'
+ description: '{{ ec2_ami_description }}'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ wait: yes
+ root_device_name: /dev/xvda
+ register: result
+
+ - name: set image id fact for deletion later
+ set_fact:
+ ec2_ami_image_id: "{{ result.image_id }}"
+
+ - name: assert that image has been created
+ assert:
+ that:
+ - "result.changed"
+ - "result.image_id.startswith('ami-')"
+ - "'Name' in result.tags and result.tags.Name == ec2_ami_name + '_ami'"
+
+ # ============================================================
+
+ - name: gather facts about the image created
+ ec2_ami_info:
+ image_ids: '{{ ec2_ami_image_id }}'
+ register: ami_facts_result
+ ignore_errors: true
+
+ - name: assert that the right image was found
+ assert:
+ that:
+ - "ami_facts_result.images[0].image_id == ec2_ami_image_id"
+
+ # some ec2_ami_info tests to test if the filtering is working fine.
+ # ============================================================
+
+ - name: gather info about the image
+ ec2_ami_info:
+ image_ids: '{{ ec2_region_images[ec2_region] }}'
+ register: ami_info_result
+ ignore_errors: true
+
+ - name: assert that the right image was found
+ assert:
+ that:
+ - "ami_info_result.images[0].image_id == '{{ ec2_region_images[ec2_region] }}'"
+
+ # ============================================================
+
+ - name: gather info about the image using boolean filter
+ ec2_ami_info:
+ image_ids: '{{ ec2_region_images[ec2_region] }}'
+ filters:
+ is-public: true
+ register: ami_info_result
+ ignore_errors: true
+
+ - name: assert that the right image was found
+ assert:
+ that:
+ - "ami_info_result.images[0].image_id == '{{ ec2_region_images[ec2_region] }}'"
+
+ # ============================================================
+
+ - name: gather info about the image using integer filter
+ ec2_ami_info:
+ image_ids: '{{ ec2_region_images[ec2_region] }}'
+ filters:
+ owner-id: 137112412989
+ register: ami_info_result
+ ignore_errors: true
+
+ - name: assert that the right image was found
+ assert:
+ that:
+ - "ami_info_result.images[0].image_id == '{{ ec2_region_images[ec2_region] }}'"
+
+ # ============================================================
+
+ - name: gather info about the image using string filter
+ ec2_ami_info:
+ image_ids: '{{ ec2_region_images[ec2_region] }}'
+ filters:
+ name: 'amzn-ami-hvm-2017.09.0.20170930-x86_64-gp2'
+ register: ami_info_result
+ ignore_errors: true
+
+ - name: assert that the right image was found
+ assert:
+ that:
+ - "ami_info_result.images[0].image_id == '{{ ec2_region_images[ec2_region] }}'"
+
+ # e2_ami_info filtering tests ends
+ # ============================================================
+
+ - name: delete the image
+ ec2_ami:
+ instance_id: '{{ setup_instance.instance_ids[0] }}'
+ state: absent
+ delete_snapshot: yes
+ name: '{{ ec2_ami_name }}_ami'
+ description: '{{ ec2_ami_description }}'
+ image_id: '{{ result.image_id }}'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ wait: yes
+ ignore_errors: true
+ register: result
+
+ - name: assert that the image has been deleted
+ assert:
+ that:
+ - "result.changed"
+ - "'image_id' not in result"
+ - "result.snapshots_deleted"
+
+ # ==============================================================
+
+ - name: test removing an ami if no image ID is provided (expected failed=true)
+ ec2_ami:
+ state: absent
+ register: result
+ ignore_errors: yes
+
+ - name: assert that an image ID is required
+ assert:
+ that:
+ - "result.failed"
+ - "result.msg == 'state is absent but all of the following are missing: image_id'"
+
+ # ============================================================
+
+ - name: create an image from the snapshot
+ ec2_ami:
+ name: '{{ ec2_ami_name }}_ami'
+ description: '{{ ec2_ami_description }}'
+ state: present
+ launch_permissions:
+ user_ids: []
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ root_device_name: /dev/xvda
+ device_mapping:
+ - device_name: /dev/xvda
+ volume_type: gp2
+ size: 8
+ delete_on_termination: true
+ snapshot_id: '{{ setup_snapshot.snapshot_id }}'
+ register: result
+ ignore_errors: true
+
+ - name: set image id fact for deletion later
+ set_fact:
+ ec2_ami_image_id: "{{ result.image_id }}"
+ ec2_ami_snapshot: "{{ result.block_device_mapping['/dev/xvda'].snapshot_id }}"
+
+ - name: assert a new ami has been created
+ assert:
+ that:
+ - "result.changed"
+ - "result.image_id.startswith('ami-')"
+
+ # ============================================================
+
+ - name: test default launch permissions idempotence
+ ec2_ami:
+ description: '{{ ec2_ami_description }}'
+ state: present
+ name: '{{ ec2_ami_name }}_ami'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ root_device_name: /dev/xvda
+ image_id: '{{ result.image_id }}'
+ launch_permissions:
+ user_ids: []
+ device_mapping:
+ - device_name: /dev/xvda
+ volume_type: gp2
+ size: 8
+ delete_on_termination: true
+ snapshot_id: '{{ setup_snapshot.snapshot_id }}'
+ register: result
+
+ - name: assert a new ami has not been created
+ assert:
+ that:
+ - "not result.changed"
+ - "result.image_id.startswith('ami-')"
+
+ # ============================================================
+
+ - name: add a tag to the AMI
+ ec2_ami:
+ state: present
+ description: '{{ ec2_ami_description }}'
+ image_id: '{{ result.image_id }}'
+ name: '{{ ec2_ami_name }}_ami'
+ tags:
+ New: Tag
+ register: result
+
+ - name: assert a tag was added
+ assert:
+ that:
+ - "'Name' in result.tags and result.tags.Name == ec2_ami_name + '_ami'"
+ - "'New' in result.tags and result.tags.New == 'Tag'"
+
+ - name: use purge_tags to remove a tag from the AMI
+ ec2_ami:
+ state: present
+ description: '{{ ec2_ami_description }}'
+ image_id: '{{ result.image_id }}'
+ name: '{{ ec2_ami_name }}_ami'
+ tags:
+ New: Tag
+ purge_tags: yes
+ register: result
+
+ - name: assert a tag was removed
+ assert:
+ that:
+ - "'Name' not in result.tags"
+ - "'New' in result.tags and result.tags.New == 'Tag'"
+
+ # ============================================================
+
+ - name: update AMI launch permissions
+ ec2_ami:
+ state: present
+ image_id: '{{ result.image_id }}'
+ description: '{{ ec2_ami_description }}'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ launch_permissions:
+ group_names: ['all']
+ register: result
+
+ - name: assert launch permissions were updated
+ assert:
+ that:
+ - "result.changed"
+
+ # ============================================================
+
+ - name: modify the AMI description
+ ec2_ami:
+ state: present
+ image_id: '{{ result.image_id }}'
+ name: '{{ ec2_ami_name }}_ami'
+ description: '{{ ec2_ami_description }}CHANGED'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ launch_permissions:
+ group_names: ['all']
+ register: result
+
+ - name: assert the description changed
+ assert:
+ that:
+ - "result.changed"
+
+ # ============================================================
+
+ - name: remove public launch permissions
+ ec2_ami:
+ state: present
+ image_id: '{{ result.image_id }}'
+ name: '{{ ec2_ami_name }}_ami'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ launch_permissions:
+ group_names: []
+ register: result
+
+ - name: assert launch permissions were updated
+ assert:
+ that:
+ - "result.changed"
+
+ # ============================================================
+
+ - name: delete ami without deleting the snapshot (default is not to delete)
+ ec2_ami:
+ instance_id: '{{ setup_instance.instance_ids[0] }}'
+ state: absent
+ name: '{{ ec2_ami_name }}_ami'
+ image_id: '{{ ec2_ami_image_id }}'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ wait: yes
+ ignore_errors: true
+ register: result
+
+ - name: assert that the image has been deleted
+ assert:
+ that:
+ - "result.changed"
+ - "'image_id' not in result"
+
+ - name: ensure the snapshot still exists
+ ec2_snapshot_info:
+ snapshot_ids:
+ - '{{ ec2_ami_snapshot }}'
+ register: snapshot_result
+
+ - name: assert the snapshot wasn't deleted
+ assert:
+ that:
+ - "snapshot_result.snapshots[0].snapshot_id == ec2_ami_snapshot"
+
+ - name: delete ami for a second time
+ ec2_ami:
+ instance_id: '{{ setup_instance.instance_ids[0] }}'
+ state: absent
+ name: '{{ ec2_ami_name }}_ami'
+ image_id: '{{ ec2_ami_image_id }}'
+ tags:
+ Name: '{{ ec2_ami_name }}_ami'
+ wait: yes
+ register: result
+
+ - name: assert that image does not exist
+ assert:
+ that:
+ - not result.changed
+ - not result.failed
+
+
+ # ============================================================
+
+ always:
+
+ # ============================================================
+
+ # TEAR DOWN: snapshot, ec2 instance, ec2 key pair, security group, vpc
+ - name: Announce teardown start
+ debug:
+ msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****"
+
+ - name: delete ami
+ ec2_ami:
+ state: absent
+ image_id: "{{ ec2_ami_image_id }}"
+ name: '{{ ec2_ami_name }}_ami'
+ wait: yes
+ ignore_errors: yes
+
+ - name: remove setup snapshot of ec2 instance
+ ec2_snapshot:
+ state: absent
+ snapshot_id: '{{ setup_snapshot.snapshot_id }}'
+ ignore_errors: yes
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ '{{ec2_ami_name}}_instance_setup': 'integration_tests'
+ group_id: '{{ setup_sg.group_id }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+ ignore_errors: yes
+
+ - name: remove setup keypair
+ ec2_key:
+ name: '{{ec2_ami_name}}_setup'
+ state: absent
+ ignore_errors: yes
+
+ - name: remove setup security group
+ ec2_group:
+ name: '{{ ec2_ami_name }}_setup'
+ description: 'created by Ansible integration tests'
+ state: absent
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ ignore_errors: yes
+
+ - name: remove setup subnet
+ ec2_vpc_subnet:
+ az: '{{ ec2_region }}a'
+ tags: '{{ec2_ami_name}}_setup'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: '{{ subnet_cidr }}'
+ state: absent
+ resource_tags:
+ Name: '{{ ec2_ami_name }}_setup'
+ ignore_errors: yes
+
+ - name: remove setup VPC
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ state: absent
+ name: '{{ ec2_ami_name }}_setup'
+ resource_tags:
+ Name: '{{ ec2_ami_name }}_setup'
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/vars/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/vars/main.yml
new file mode 100644
index 00000000..dac1fda2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/vars/main.yml
@@ -0,0 +1,20 @@
+---
+# vars file for test_ec2_ami
+
+# based on Amazon Linux AMI 2017.09.0 (HVM), SSD Volume Type
+ec2_region_images:
+ us-east-1: ami-8c1be5f6
+ us-east-2: ami-c5062ba0
+ us-west-1: ami-02eada62
+ us-west-2: ami-e689729e
+ ca-central-1: ami-fd55ec99
+ eu-west-1: ami-acd005d5
+ eu-central-1: ami-c7ee5ca8
+ eu-west-2: ami-1a7f6d7e
+ ap-southeast-1: ami-0797ea64
+ ap-southeast-2: ami-8536d6e7
+ ap-northeast-2: ami-9bec36f5
+ ap-northeast-1: ami-2a69be4c
+ ap-south-1: ami-4fc58420
+ sa-east-1: ami-f1344b9d
+ cn-north-1: ami-fba67596
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/defaults/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/defaults/main.yml
new file mode 100644
index 00000000..76164523
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# defaults file for test_ec2_eip
+tag_prefix: '{{resource_prefix}}'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/tasks/main.yml
new file mode 100644
index 00000000..4d62df7e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/tasks/main.yml
@@ -0,0 +1,334 @@
+---
+# __Test Info__
+# Create a self signed cert and upload it to AWS
+# http://www.akadia.com/services/ssh_test_certificate.html
+# http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/ssl-server-cert.html
+
+# __Test Outline__
+#
+# __ec2_elb_lb__
+# create test elb with listeners and certificate
+# change AZ's
+# change listeners
+# remove listeners
+# remove elb
+
+# __ec2-common__
+# test environment variable EC2_REGION
+# test with no parameters
+# test with only instance_id
+# test invalid region parameter
+# test valid region parameter
+# test invalid ec2_url parameter
+# test valid ec2_url parameter
+# test credentials from environment
+# test credential parameters
+
+- module_defaults:
+ group/aws:
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ block:
+
+ # ============================================================
+ # create test elb with listeners, certificate, and health check
+
+ - name: Create ELB
+ ec2_elb_lb:
+ name: "{{ tag_prefix }}"
+ state: present
+ zones:
+ - "{{ aws_region }}a"
+ - "{{ aws_region }}b"
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ - protocol: http
+ load_balancer_port: 8080
+ instance_port: 8080
+ health_check:
+ ping_protocol: http
+ ping_port: 80
+ ping_path: "/index.html"
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 10
+ register: info
+
+ - assert:
+ that:
+ - 'info.changed'
+ - 'info.elb.status == "created"'
+ - '"{{ aws_region }}a" in info.elb.zones'
+ - '"{{ aws_region }}b" in info.elb.zones'
+ - 'info.elb.health_check.healthy_threshold == 10'
+ - 'info.elb.health_check.interval == 30'
+ - 'info.elb.health_check.target == "HTTP:80/index.html"'
+ - 'info.elb.health_check.timeout == 5'
+ - 'info.elb.health_check.unhealthy_threshold == 2'
+ - '[80, 80, "HTTP", "HTTP"] in info.elb.listeners'
+ - '[8080, 8080, "HTTP", "HTTP"] in info.elb.listeners'
+
+ # ============================================================
+
+ # check ports, would be cool, but we are at the mercy of AWS
+ # to start things in a timely manner
+
+ #- name: check to make sure 80 is listening
+ # wait_for: host={{ info.elb.dns_name }} port=80 timeout=600
+ # register: result
+
+ #- name: assert can connect to port#
+ # assert: 'result.state == "started"'
+
+ #- name: check to make sure 443 is listening
+ # wait_for: host={{ info.elb.dns_name }} port=443 timeout=600
+ # register: result
+
+ #- name: assert can connect to port#
+ # assert: 'result.state == "started"'
+
+ # ============================================================
+
+ # Change AZ's
+
+ - name: Change AZ's
+ ec2_elb_lb:
+ name: "{{ tag_prefix }}"
+ state: present
+ zones:
+ - "{{ aws_region }}c"
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ purge_zones: yes
+ health_check:
+ ping_protocol: http
+ ping_port: 80
+ ping_path: "/index.html"
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 10
+ register: info
+
+
+
+ - assert:
+ that:
+ - 'info.elb.status == "ok"'
+ - 'info.changed'
+ - 'info.elb.zones[0] == "{{ aws_region }}c"'
+
+ # ============================================================
+
+ # Update AZ's
+
+ - name: Update AZ's
+ ec2_elb_lb:
+ name: "{{ tag_prefix }}"
+ state: present
+ zones:
+ - "{{ aws_region }}a"
+ - "{{ aws_region }}b"
+ - "{{ aws_region }}c"
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ purge_zones: yes
+ register: info
+
+ - assert:
+ that:
+ - 'info.changed'
+ - 'info.elb.status == "ok"'
+ - '"{{ aws_region }}a" in info.elb.zones'
+ - '"{{ aws_region }}b" in info.elb.zones'
+ - '"{{ aws_region }}c" in info.elb.zones'
+
+
+ # ============================================================
+
+ # Purge Listeners
+
+ - name: Purge Listeners
+ ec2_elb_lb:
+ name: "{{ tag_prefix }}"
+ state: present
+ zones:
+ - "{{ aws_region }}a"
+ - "{{ aws_region }}b"
+ - "{{ aws_region }}c"
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 81
+ purge_listeners: yes
+ register: info
+
+ - assert:
+ that:
+ - 'info.elb.status == "ok"'
+ - 'info.changed'
+ - '[80, 81, "HTTP", "HTTP"] in info.elb.listeners'
+ - 'info.elb.listeners|length == 1'
+
+
+
+ # ============================================================
+
+ # add Listeners
+
+ - name: Add Listeners
+ ec2_elb_lb:
+ name: "{{ tag_prefix }}"
+ state: present
+ zones:
+ - "{{ aws_region }}a"
+ - "{{ aws_region }}b"
+ - "{{ aws_region }}c"
+ listeners:
+ - protocol: http
+ load_balancer_port: 8081
+ instance_port: 8081
+ purge_listeners: no
+ register: info
+
+ - assert:
+ that:
+ - 'info.elb.status == "ok"'
+ - 'info.changed'
+ - '[80, 81, "HTTP", "HTTP"] in info.elb.listeners'
+ - '[8081, 8081, "HTTP", "HTTP"] in info.elb.listeners'
+ - 'info.elb.listeners|length == 2'
+
+
+ # ============================================================
+
+ - name: test with no name
+ ec2_elb_lb:
+ state: present
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "missing required arguments: name"'
+
+
+ # ============================================================
+ - name: test with only name (state missing)
+ ec2_elb_lb:
+ name: "{{ tag_prefix }}"
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when called with only name
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "missing required arguments: state"'
+
+
+ # ============================================================
+ - name: test invalid region parameter
+ ec2_elb_lb:
+ name: "{{ tag_prefix }}"
+ state: present
+ region: 'asdf querty 1234'
+ zones:
+ - "{{ aws_region }}a"
+ - "{{ aws_region }}b"
+ - "{{ aws_region }}c"
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ register: result
+ ignore_errors: true
+
+ - name: assert invalid region parameter
+ assert:
+ that:
+ - 'result.failed'
+ - '"Region asdf querty 1234 does not seem to be available" in result.msg'
+
+
+ # ============================================================
+ - name: test no authentication parameters
+ ec2_elb_lb:
+ name: "{{ tag_prefix }}"
+ state: present
+ aws_access_key: '{{ omit }}'
+ aws_secret_key: '{{ omit }}'
+ security_token: '{{ omit }}'
+ zones:
+ - "{{ aws_region }}a"
+ - "{{ aws_region }}b"
+ - "{{ aws_region }}c"
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ register: result
+ ignore_errors: true
+
+ - name: assert valid region parameter
+ assert:
+ that:
+ - 'result.failed'
+ - '"No handler was ready to authenticate" in result.msg'
+
+
+ # ============================================================
+ - name: test credentials from environment
+ ec2_elb_lb:
+ name: "{{ tag_prefix }}"
+ state: present
+ aws_access_key: "{{ omit }}"
+ aws_secret_key: "{{ omit }}"
+ security_token: "{{ omit }}"
+ zones:
+ - "{{ aws_region }}a"
+ - "{{ aws_region }}b"
+ - "{{ aws_region }}c"
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 81
+ environment:
+ EC2_ACCESS_KEY: bogus_access_key
+ EC2_SECRET_KEY: bogus_secret_key
+ register: result
+ ignore_errors: true
+
+ - name: assert credentials from environment
+ assert:
+ that:
+ - 'result.failed'
+ - '"InvalidClientTokenId" in result.exception'
+
+
+ always:
+
+ # ============================================================
+ - name: remove the test load balancer completely
+ ec2_elb_lb:
+ name: "{{ tag_prefix }}"
+ state: absent
+ register: result
+
+ - name: assert the load balancer was removed
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.elb.name == "{{tag_prefix}}"'
+ - 'result.elb.status == "deleted"'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/vars/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/vars/main.yml
new file mode 100644
index 00000000..79194af1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_elb_lb/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for test_ec2_elb_lb
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/aliases
new file mode 100644
index 00000000..96a59fec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+shippable/aws/group1
+ec2_eni_info
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/defaults/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/defaults/main.yml
new file mode 100644
index 00000000..cb3895af
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+vpc_seed_a: '{{ resource_prefix }}'
+vpc_seed_b: '{{ resource_prefix }}-ec2_eni'
+vpc_prefix: '10.{{ 256 | random(seed=vpc_seed_a) }}.{{ 256 | random(seed=vpc_seed_b ) }}'
+vpc_cidr: '{{ vpc_prefix}}.128/26'
+ip_1: "{{ vpc_prefix }}.132"
+ip_2: "{{ vpc_prefix }}.133"
+ip_3: "{{ vpc_prefix }}.134"
+ip_4: "{{ vpc_prefix }}.135"
+ip_5: "{{ vpc_prefix }}.136"
+
+ec2_ips:
+- "{{ vpc_prefix }}.137"
+- "{{ vpc_prefix }}.138"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/main.yaml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/main.yaml
new file mode 100644
index 00000000..3a099661
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/main.yaml
@@ -0,0 +1,166 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+
+ collections:
+ - community.aws
+
+ block:
+ - name: Get available AZs
+ aws_az_info:
+ filters:
+ region-name: "{{ aws_region }}"
+ register: az_info
+
+ - name: Pick an AZ
+ set_fact:
+ availability_zone: "{{ az_info['availability_zones'][0]['zone_name'] }}"
+
+ # ============================================================
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ register: vpc_result
+
+ - name: create a subnet
+ ec2_vpc_subnet:
+ cidr: "{{ vpc_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ state: present
+ register: vpc_subnet_result
+
+ - name: create a security group
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: "Created by {{ resource_prefix }}"
+ rules: []
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ register: vpc_sg_result
+
+ - name: Get a list of images
+ ec2_ami_info:
+ filters:
+ owner-alias: amazon
+ name: "amzn2-ami-minimal-hvm-*"
+ description: "Amazon Linux 2 AMI *"
+ register: images_info
+
+ - name: Set facts to simplify use of extra resources
+ set_fact:
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ vpc_subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ vpc_sg_id: "{{ vpc_sg_result.group_id }}"
+ image_id: "{{ images_info.images | sort(attribute='creation_date') | reverse | first | json_query('image_id') }}"
+
+ # ============================================================
+
+ - name: Create 2 instances to test attaching and detaching network interfaces
+ ec2_instance:
+ name: "{{ resource_prefix }}-eni-instance-{{ item }}"
+ image_id: "{{ image_id }}"
+ vpc_subnet_id: "{{ vpc_subnet_id }}"
+ instance_type: t2.micro
+ wait: false
+ security_group: "{{ vpc_sg_id }}"
+ network:
+ private_ip_address: '{{ ec2_ips[item] }}'
+ register: ec2_instances
+ loop:
+ - 0
+ - 1
+
+ # We only need these instances to be running
+ - name: set variables for the instance IDs
+ set_fact:
+ instance_id_1: "{{ ec2_instances.results[0].instance_ids[0] }}"
+ instance_id_2: "{{ ec2_instances.results[1].instance_ids[0] }}"
+
+ # ============================================================
+ - name: test attaching and detaching network interfaces
+ include_tasks: ./test_eni_basic_creation.yaml
+
+ - name: test attaching and detaching network interfaces
+ include_tasks: ./test_ipaddress_assign.yaml
+
+ - name: test attaching and detaching network interfaces
+ include_tasks: ./test_attachment.yaml
+
+ - name: test modifying source_dest_check
+ include_tasks: ./test_modifying_source_dest_check.yaml
+
+ - name: test modifying tags
+ include_tasks: ./test_modifying_tags.yaml
+
+ # Note: will delete *both* EC2 instances
+ - name: test modifying delete_on_termination
+ include_tasks: ./test_modifying_delete_on_termination.yaml
+
+ - name: test deleting ENIs
+ include_tasks: ./test_deletion.yaml
+
+ always:
+
+ # ============================================================
+ - name: remove the network interfaces
+ ec2_eni:
+ eni_id: "{{ item }}"
+ force_detach: True
+ state: absent
+ ignore_errors: true
+ retries: 5
+ loop:
+ - "{{ eni_id_1 | default(omit) }}"
+ - "{{ eni_id_2 | default(omit) }}"
+
+ - name: terminate the instances
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ instance_id_1 }}"
+ - "{{ instance_id_2 }}"
+ wait: True
+ ignore_errors: true
+ retries: 5
+ when: instance_id_1 is defined and instance_id_2 is defined
+
+ - name: remove the security group
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: "{{ resource_prefix }}"
+ rules: []
+ state: absent
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ignore_errors: true
+ retries: 5
+
+ - name: remove the subnet
+ ec2_vpc_subnet:
+ cidr: "{{ vpc_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ ignore_errors: true
+ retries: 5
+ when: vpc_subnet_result is defined
+
+ - name: remove the VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: "{{ vpc_cidr }}"
+ state: absent
+ ignore_errors: true
+ retries: 5
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_attachment.yaml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_attachment.yaml
new file mode 100644
index 00000000..bb0e1336
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_attachment.yaml
@@ -0,0 +1,204 @@
+ # ============================================================
+- name: attach the network interface to instance 1
+ ec2_eni:
+ instance_id: "{{ instance_id_1 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.attachment is defined
+ - result.interface.attachment is mapping
+ - result.interface.attachment.instance_id == instance_id_1
+ - _interface_0.attachment is defined
+ - _interface_0.attachment is mapping
+ - '"attach_time" in _interface_0.attachment'
+ - _interface_0.attachment.attach_time is string
+ - '"attachment_id" in _interface_0.attachment'
+ - _interface_0.attachment.attachment_id.startswith("eni-attach-")
+ - '"delete_on_termination" in _interface_0.attachment'
+ - _interface_0.attachment.delete_on_termination == False
+ - '"device_index" in _interface_0.attachment'
+ - _interface_0.attachment.device_index == 1
+ - '"instance_id" in _interface_0.attachment'
+ - _interface_0.attachment.instance_id == instance_id_1
+ - '"instance_owner_id" in _interface_0.attachment'
+ - _interface_0.attachment.instance_owner_id is string
+ - '"status" in _interface_0.attachment'
+ - _interface_0.attachment.status == "attached"
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- name: verify the eni is attached
+ ec2_eni:
+ instance_id: "{{ instance_id_1 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.attachment is defined
+ - result.interface.attachment.instance_id == instance_id_1
+ - _interface_0.attachment is defined
+ - _interface_0.attachment is mapping
+ - '"attach_time" in _interface_0.attachment'
+ - _interface_0.attachment.attach_time is string
+ - '"attachment_id" in _interface_0.attachment'
+ - _interface_0.attachment.attachment_id.startswith("eni-attach-")
+ - '"delete_on_termination" in _interface_0.attachment'
+ - _interface_0.attachment.delete_on_termination == False
+ - '"device_index" in _interface_0.attachment'
+ - _interface_0.attachment.device_index == 1
+ - '"instance_id" in _interface_0.attachment'
+ - _interface_0.attachment.instance_id == instance_id_1
+ - '"instance_owner_id" in _interface_0.attachment'
+ - _interface_0.attachment.instance_owner_id is string
+ - '"status" in _interface_0.attachment'
+ - _interface_0.attachment.status == "attached"
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+ # ============================================================
+- name: test attaching the network interface to a different instance
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.attachment is defined
+ - result.interface.attachment.instance_id == instance_id_2
+ - _interface_0.attachment is defined
+ - '"instance_id" in _interface_0.attachment'
+ - _interface_0.attachment.instance_id == instance_id_2
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+ # ============================================================
+- name: detach the network interface
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: False
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.attachment is undefined
+ - _interface_0.attachment is undefined
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- name: verify the network interface was detached
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: False
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.attachment is undefined
+
+ # ============================================================
+- name: reattach the network interface to test deleting it
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ register: result
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.attachment is defined
+ - result.interface.attachment.instance_id == instance_id_2
+
+- name: test that deleting the network interface while attached must be intentional
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: absent
+ register: result
+ ignore_errors: True
+
+- assert:
+ that:
+ - result.failed
+ - '"currently in use" in result.msg'
+
+# ============================================================
+- name: delete an attached network interface with force_detach
+ ec2_eni:
+ force_detach: True
+ eni_id: "{{ eni_id_1 }}"
+ state: absent
+ register: result
+ ignore_errors: True
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.attachment is undefined
+
+- name: test removing a network interface that does not exist
+ ec2_eni:
+ force_detach: True
+ eni_id: "{{ eni_id_1 }}"
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.attachment is undefined
+
+# ============================================================
+- name: recreate the network interface
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ register: result
+
+- set_fact:
+ eni_id_1: "{{ result.interface.id }}"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_deletion.yaml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_deletion.yaml
new file mode 100644
index 00000000..aecb625e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_deletion.yaml
@@ -0,0 +1,92 @@
+---
+# ============================================================
+- name: test deleting the unattached network interface by using the ID
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ name: "{{ resource_prefix }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: absent
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface is undefined
+ - '"network_interfaces" in eni_info'
+ - eni_id_1 not in ( eni_info | community.general.json_query("network_interfaces[].id") | list )
+
+- name: test removing the network interface by ID is idempotent
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ name: "{{ resource_prefix }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface is undefined
+
+# ============================================================
+- name: add a name tag to the other network interface before deleting it
+ ec2_eni:
+ eni_id: "{{ eni_id_2 }}"
+ name: "{{ resource_prefix }}"
+ state: present
+
+- name: test deleting the unattached network interface by using the name
+ ec2_eni:
+ name: "{{ resource_prefix }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: absent
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_2 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface is undefined
+ - '"network_interfaces" in eni_info'
+ - eni_id_2 not in ( eni_info | community.general.json_query("network_interfaces[].id") | list )
+
+- name: test removing the network interface by name is idempotent
+ ec2_eni:
+ name: "{{ resource_prefix }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface is undefined
+
+- name: verify that the network interface ID does not exist (retry-delete by ID)
+ ec2_eni:
+ eni_id: "{{ eni_id_2 }}"
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface is undefined
+
+# ============================================================
+
+- name: Fetch ENI info without filter
+ ec2_eni_info:
+ register: eni_info
+
+- name: Assert that ec2_eni_info doesn't contain the two interfaces we just deleted
+ assert:
+ that:
+ - '"network_interfaces" in eni_info'
+ - eni_id_1 not in ( eni_info | community.general.json_query("network_interfaces[].id") | list )
+ - eni_id_2 not in ( eni_info | community.general.json_query("network_interfaces[].id") | list )
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml
new file mode 100644
index 00000000..b18af2dc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml
@@ -0,0 +1,219 @@
+---
+# ============================================================
+- name: create a network interface
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ register: result
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.private_ip_addresses | length == 1
+
+- set_fact:
+ eni_id_1: "{{ result.interface.id }}"
+
+- name: Fetch ENI info (by ID)
+ ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- name: Assert that ec2_eni_info returns all the values we expect
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+ assert:
+ that:
+ - '"network_interfaces" in eni_info'
+ - eni_info.network_interfaces | length == 1
+ - '"association" not in _interface_0'
+ - '"attachment" not in _interface_0'
+ - '"availability_zone" in _interface_0'
+ - _interface_0.availability_zone.startswith(aws_region)
+ - '"description" in _interface_0'
+ - _interface_0.description == ""
+ - '"groups" in _interface_0'
+ - _interface_0.groups is iterable
+ - _interface_0.groups | length == 1
+ - '"id" in _interface_0'
+ - _interface_0.id.startswith("eni-")
+ - _interface_0.id == eni_id_1
+ - '"interface_type" in _interface_0'
+ - _interface_0.owner_id is string
+ - '"ipv6_addresses" in _interface_0'
+ - _interface_0.ipv6_addresses is iterable
+ - _interface_0.ipv6_addresses | length == 0
+ - '"mac_address" in _interface_0'
+ - _interface_0.owner_id is string
+ - _interface_0.mac_address | length == 17
+ - '"network_interface_id" in _interface_0'
+ - _interface_0.network_interface_id.startswith("eni-")
+ - _interface_0.network_interface_id == eni_id_1
+ - '"owner_id" in _interface_0'
+ - _interface_0.owner_id is string
+ - '"private_dns_name" in _interface_0'
+ - _interface_0.private_dns_name is string
+ - _interface_0.private_dns_name.endswith("ec2.internal")
+ - '"private_ip_address" in _interface_0'
+ - _interface_0.private_ip_address | ipaddr()
+ - _interface_0.private_ip_address == ip_1
+ - '"private_ip_addresses" in _interface_0'
+ - _interface_0.private_ip_addresses | length == 1
+ - ip_1 in ( eni_info | community.general.json_query("network_interfaces[].private_ip_addresses[].private_ip_address") | list )
+ - '"requester_id" in _interface_0'
+ - _interface_0.requester_id is string
+ - '"requester_managed" in _interface_0'
+ - _interface_0.requester_managed == False
+ - '"source_dest_check" in _interface_0'
+ - _interface_0.source_dest_check == True
+ - '"status" in _interface_0'
+ - _interface_0.status == "available"
+ - '"subnet_id" in _interface_0'
+ - _interface_0.subnet_id == vpc_subnet_id
+ - '"tag_set" in _interface_0'
+ - _interface_0.tag_set is mapping
+ - '"vpc_id" in _interface_0'
+ - _interface_0.vpc_id == vpc_id
+
+- name: test idempotence by using the same private_ip_address
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.private_ip_addresses | length == 1
+
+# ============================================================
+
+- name: create a second network interface to test IP reassignment
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_5 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ register: result
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.id != eni_id_1
+
+- name: save the second network interface ID for cleanup
+ set_fact:
+ eni_id_2: "{{ result.interface.id }}"
+
+- name: Fetch ENI info (using filter)
+ ec2_eni_info:
+ filters:
+ network-interface-id: '{{ eni_id_2 }}'
+ register: eni_info
+
+- name: Assert that ec2_eni_info returns all the values we expect
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+ assert:
+ that:
+ - '"network_interfaces" in eni_info'
+ - eni_info.network_interfaces | length == 1
+ - '"association" not in _interface_0'
+ - '"attachment" not in _interface_0'
+ - '"availability_zone" in _interface_0'
+ - _interface_0.availability_zone.startswith(aws_region)
+ - '"description" in _interface_0'
+ - _interface_0.description == ""
+ - '"groups" in _interface_0'
+ - _interface_0.groups is iterable
+ - _interface_0.groups | length == 1
+ - '"id" in _interface_0'
+ - _interface_0.id.startswith("eni-")
+ - _interface_0.id == eni_id_2
+ - '"interface_type" in _interface_0'
+ - _interface_0.owner_id is string
+ - '"ipv6_addresses" in _interface_0'
+ - _interface_0.ipv6_addresses is iterable
+ - _interface_0.ipv6_addresses | length == 0
+ - '"mac_address" in _interface_0'
+ - _interface_0.owner_id is string
+ - _interface_0.mac_address | length == 17
+ - '"network_interface_id" in _interface_0'
+ - _interface_0.network_interface_id.startswith("eni-")
+ - _interface_0.network_interface_id == eni_id_2
+ - '"owner_id" in _interface_0'
+ - _interface_0.owner_id is string
+ - '"private_dns_name" in _interface_0'
+ - _interface_0.private_dns_name is string
+ - _interface_0.private_dns_name.endswith("ec2.internal")
+ - '"private_ip_address" in _interface_0'
+ - _interface_0.private_ip_address | ipaddr()
+ - _interface_0.private_ip_address == ip_5
+ - '"private_ip_addresses" in _interface_0'
+ - _interface_0.private_ip_addresses | length == 1
+ - ip_5 in ( eni_info | community.general.json_query("network_interfaces[].private_ip_addresses[].private_ip_address") | list )
+ - '"requester_id" in _interface_0'
+ - _interface_0.requester_id is string
+ - '"requester_managed" in _interface_0'
+ - _interface_0.requester_managed == False
+ - '"source_dest_check" in _interface_0'
+ - _interface_0.source_dest_check == True
+ - '"status" in _interface_0'
+ - _interface_0.status == "available"
+ - '"subnet_id" in _interface_0'
+ - _interface_0.subnet_id == vpc_subnet_id
+ - '"tag_set" in _interface_0'
+ - _interface_0.tag_set is mapping
+ - '"vpc_id" in _interface_0'
+ - _interface_0.vpc_id == vpc_id
+
+- name: Fetch ENI info without filter
+ ec2_eni_info:
+ register: eni_info
+
+- name: Assert that ec2_eni_info contains at least the two interfaces we expect
+ assert:
+ that:
+ - '"network_interfaces" in eni_info'
+ - eni_info.network_interfaces | length >= 2
+ - eni_id_1 in ( eni_info | community.general.json_query("network_interfaces[].id") | list )
+ - eni_id_2 in ( eni_info | community.general.json_query("network_interfaces[].id") | list )
+
+# ============================================================
+# Run some VPC filter based tests of ec2_eni_info
+
+- name: Fetch ENI info with VPC filters - Available
+ ec2_eni_info:
+ filters:
+ vpc-id: '{{ vpc_id }}'
+ status: 'available'
+ register: eni_info
+
+- name: Assert that ec2_eni_info contains at least the two interfaces we expect
+ assert:
+ that:
+ - '"network_interfaces" in eni_info'
+ - eni_info.network_interfaces | length == 2
+ - eni_id_1 in ( eni_info | community.general.json_query("network_interfaces[].id") | list )
+ - eni_id_2 in ( eni_info | community.general.json_query("network_interfaces[].id") | list )
+
+- name: Fetch ENI info with VPC filters - VPC
+ ec2_eni_info:
+ filters:
+ vpc-id: '{{ vpc_id }}'
+ register: eni_info
+
+- name: Assert that ec2_eni_info contains at least the two interfaces we expect
+ assert:
+ that:
+ - '"network_interfaces" in eni_info'
+ - eni_info.network_interfaces | length == 4
+ - eni_id_1 in ( eni_info | community.general.json_query("network_interfaces[].id") | list )
+ - eni_id_2 in ( eni_info | community.general.json_query("network_interfaces[].id") | list )
+ - ec2_ips[0] in ( eni_info | community.general.json_query("network_interfaces[].private_ip_addresses[].private_ip_address") | list )
+ - ec2_ips[1] in ( eni_info | community.general.json_query("network_interfaces[].private_ip_addresses[].private_ip_address") | list )
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml
new file mode 100644
index 00000000..a0a3696e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml
@@ -0,0 +1,267 @@
+---
+# ============================================================
+- name: add two implicit secondary IPs
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_address_count: 2
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.private_ip_addresses | length == 3
+ - _interface_0.private_ip_addresses | length == 3
+ - ip_1 in ( eni_info | community.general.json_query("network_interfaces[].private_ip_addresses[].private_ip_address") | list )
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- name: test idempotence with two implicit secondary IPs
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_address_count: 2
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.private_ip_addresses | length == 3
+ - _interface_0.private_ip_addresses | length == 3
+ - ip_1 in ( eni_info | community.general.json_query("network_interfaces[].private_ip_addresses[].private_ip_address") | list )
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+# ============================================================
+- name: ensure secondary addresses are only removed if purge is set to true
+ ec2_eni:
+ purge_secondary_private_ip_addresses: false
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses: []
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.private_ip_addresses | length == 3
+ - _interface_0.private_ip_addresses | length == 3
+ - ip_1 in ( eni_info | community.general.json_query("network_interfaces[].private_ip_addresses[].private_ip_address") | list )
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+# ============================================================
+
+# Using secondary_private_ip_address_count leads to unpredicable IP assignment
+# For the following test, first find an IP that has not been used yet
+
+- name: save the list of private IPs in use
+ set_fact:
+ current_private_ips: "{{ result.interface | json_query('private_ip_addresses[*].private_ip_address') | list }}"
+
+- name: set new_secondary_ip to an IP that has not been used
+ set_fact:
+ new_secondary_ip: "{{ [ip_2, ip_3, ip_4] | difference(current_private_ips) | first }}"
+
+- name: add an explicit secondary address without purging the ones added implicitly
+ ec2_eni:
+ purge_secondary_private_ip_addresses: false
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses:
+ - "{{ new_secondary_ip }}"
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.private_ip_addresses | length == 4
+ - _interface_0.private_ip_addresses | length == 4
+ # Only ip_1 and the explicitly requested IP are guaranteed to be present
+ - ip_1 in _private_ips
+ - new_secondary_ip in _private_ips
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+ _private_ips: '{{ eni_info | community.general.json_query("network_interfaces[].private_ip_addresses[].private_ip_address") | list }}'
+
+# ============================================================
+- name: remove secondary address
+ ec2_eni:
+ purge_secondary_private_ip_addresses: true
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses: []
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.private_ip_addresses | length == 1
+ - _interface_0.private_ip_addresses | length == 1
+ - ip_1 in ( eni_info | community.general.json_query("network_interfaces[].private_ip_addresses[].private_ip_address") | list )
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- name: test idempotent behavior purging secondary addresses
+ ec2_eni:
+ purge_secondary_private_ip_addresses: true
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses: []
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.private_ip_addresses | length == 1
+ - result.interface.private_ip_addresses | length == 1
+ - _interface_0.private_ip_addresses | length == 1
+ - ip_1 in ( eni_info | community.general.json_query("network_interfaces[].private_ip_addresses[].private_ip_address") | list )
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+# ============================================================
+
+- name: Assign secondary IP addess to second ENI
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_5 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses:
+ - "{{ ip_4 }}"
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_2 }}'
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.id == eni_id_2
+ - result.interface.private_ip_addresses | length == 2
+ - _interface_0.private_ip_addresses | length == 2
+ - ip_5 in ( eni_info | community.general.json_query("network_interfaces[].private_ip_addresses[].private_ip_address") | list )
+ - ip_4 in ( eni_info | community.general.json_query("network_interfaces[].private_ip_addresses[].private_ip_address") | list )
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- name: test that reassignment of an IP already in use fails when not explcitly allowed (default for allow_reassignment == False)
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses:
+ - "{{ ip_2 }}"
+ - "{{ ip_3 }}"
+ - "{{ ip_4 }}"
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result.failed
+ - '"move is not allowed" in result.msg'
+
+# ============================================================
+- name: allow reassignment to add the list of secondary addresses
+ ec2_eni:
+ allow_reassignment: true
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses:
+ - "{{ ip_2 }}"
+ - "{{ ip_3 }}"
+ - "{{ ip_4 }}"
+ register: result
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.private_ip_addresses | length == 4
+
+- name: test reassigment is idempotent
+ ec2_eni:
+ allow_reassignment: true
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses:
+ - "{{ ip_2 }}"
+ - "{{ ip_3 }}"
+ - "{{ ip_4 }}"
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+
+# ============================================================
+
+- name: purge all the secondary addresses
+ ec2_eni:
+ purge_secondary_private_ip_addresses: true
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ secondary_private_ip_addresses: []
+ register: result
+- ec2_eni_info:
+ eni_id: '{{ eni_id_1 }}'
+ register: eni_info
+ until: _interface_0.private_ip_addresses | length == 1
+ retries: 5
+ delay: 2
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- assert:
+ that:
+ - result.changed
+ - _interface_0.private_ip_addresses | length == 1
+ - ip_1 in ( eni_info | community.general.json_query("network_interfaces[].private_ip_addresses[].private_ip_address") | list )
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml
new file mode 100644
index 00000000..8e8bd059
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml
@@ -0,0 +1,166 @@
+# ============================================================
+
+- name: ensure delete_on_termination defaults to False
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result is successful
+ - result.interface.attachment.delete_on_termination == false
+ - _interface_0.attachment.delete_on_termination == False
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+# ============================================================
+
+- name: enable delete_on_termination
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ delete_on_termination: True
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.attachment.delete_on_termination == true
+ - _interface_0.attachment.delete_on_termination == True
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- name: test idempotent behavior enabling delete_on_termination
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ delete_on_termination: True
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.attachment.delete_on_termination == true
+
+# ============================================================
+
+- name: disable delete_on_termination
+ ec2_eni:
+ instance_id: "{{ instance_id_2 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ state: present
+ attached: True
+ delete_on_termination: False
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.attachment.delete_on_termination == false
+ - _interface_0.attachment.delete_on_termination == False
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+# ============================================================
+
+- name: terminate the instance to make sure the attached ENI remains
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ instance_id_2 }}"
+ wait: True
+
+- name: verify the eni still exists
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: present
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.attachment is undefined
+
+# ============================================================
+
+- name: ensure the network interface is attached
+ ec2_eni:
+ instance_id: "{{ instance_id_1 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ attached: True
+ register: result
+
+- name: ensure delete_on_termination is true
+ ec2_eni:
+ instance_id: "{{ instance_id_1 }}"
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ attached: True
+ delete_on_termination: True
+ register: result
+
+- name: test terminating the instance after setting delete_on_termination to true
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ instance_id_1 }}"
+ wait: True
+
+- name: verify the eni was also removed
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: absent
+ register: result
+- ec2_eni_info:
+ register: eni_info
+
+- assert:
+ that:
+ - not result.changed
+ - '"network_interfaces" in eni_info'
+ - eni_info.network_interfaces | length >= 1
+ - eni_id_1 not in ( eni_info | community.general.json_query("network_interfaces[].id") | list )
+ - eni_id_2 in ( eni_info | community.general.json_query("network_interfaces[].id") | list )
+
+# ============================================================
+
+- name: recreate the network interface
+ ec2_eni:
+ device_index: 1
+ private_ip_address: "{{ ip_1 }}"
+ subnet_id: "{{ vpc_subnet_id }}"
+ state: present
+ register: result
+
+- set_fact:
+ eni_id_1: "{{ result.interface.id }}"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml
new file mode 100644
index 00000000..3ba6c257
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml
@@ -0,0 +1,74 @@
+ # ============================================================
+- name: test source_dest_check defaults to true
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ source_dest_check: true
+ state: present
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.source_dest_check == true
+
+ # ============================================================
+- name: disable source_dest_check
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ source_dest_check: false
+ state: present
+ register: result
+
+- name: Check source_dest_check state
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+ ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+ until: _interface_0.source_dest_check == False
+ retries: 5
+ delay: 2
+
+- assert:
+ that:
+ - result.changed
+ - _interface_0.source_dest_check == False
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- name: test idempotence disabling source_dest_check
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ source_dest_check: false
+ state: present
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.source_dest_check == false
+
+ # ============================================================
+- name: enable source_dest_check
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ source_dest_check: true
+ state: present
+ register: result
+
+- name: Check source_dest_check state
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+ ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+ until: _interface_0.source_dest_check == True
+ retries: 5
+ delay: 2
+
+- assert:
+ that:
+ - result.changed
+ - _interface_0.source_dest_check == True
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml
new file mode 100644
index 00000000..8164f869
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml
@@ -0,0 +1,213 @@
+ # ============================================================
+- name: verify there are no tags associated with the network interface
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: present
+ tags: {}
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - not result.interface.tags
+ - result.interface.name is undefined
+
+ # ============================================================
+- name: add tags to the network interface
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: present
+ name: "{{ resource_prefix }}"
+ tags:
+ CreatedBy: "{{ resource_prefix }}"
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.tags | length == 2
+ - result.interface.tags.CreatedBy == resource_prefix
+ - result.interface.tags.Name == resource_prefix
+ - result.interface.name == resource_prefix
+ - _interface_0.tags | length == 2
+ - _interface_0.tags.CreatedBy == resource_prefix
+ - _interface_0.tags.Name == resource_prefix
+ - _interface_0.tag_set | length == 2
+ - _interface_0.tag_set.CreatedBy == resource_prefix
+ - _interface_0.tag_set.Name == resource_prefix
+ - _interface_0.name == resource_prefix
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+ # ============================================================
+- name: test idempotence by using the Name tag and the subnet
+ ec2_eni:
+ name: "{{ resource_prefix }}"
+ state: present
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ register: result
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+
+ # ============================================================
+- name: test tags are not purged if tags are null even if name is provided
+ ec2_eni:
+ name: "{{ resource_prefix }}"
+ state: present
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.tags | length == 2
+ - result.interface.tags.CreatedBy == resource_prefix
+ - result.interface.tags.Name == resource_prefix
+ - result.interface.name == resource_prefix
+ - _interface_0.tag_set | length == 2
+ - _interface_0.tag_set.CreatedBy == resource_prefix
+ - _interface_0.tag_set.Name == resource_prefix
+ - _interface_0.name == resource_prefix
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+ # ============================================================
+- name: test setting purge tags to false
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: present
+ purge_tags: false
+ tags: {}
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.tags | length == 2
+ - result.interface.tags.CreatedBy == resource_prefix
+ - result.interface.tags.Name == resource_prefix
+ - result.interface.name == resource_prefix
+ - _interface_0.tag_set | length == 2
+ - _interface_0.tag_set.CreatedBy == resource_prefix
+ - _interface_0.tag_set.Name == resource_prefix
+ - _interface_0.name == resource_prefix
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+ # ============================================================
+- name: test adding a new tag without removing any others
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: present
+ purge_tags: false
+ tags:
+ environment: test
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.tags | length == 3
+ - result.interface.tags.environment == 'test'
+ - result.interface.tags.CreatedBy == resource_prefix
+ - result.interface.tags.Name == resource_prefix
+ - result.interface.name == resource_prefix
+ - _interface_0.tag_set | length == 3
+ - _interface_0.tag_set.environment == 'test'
+ - _interface_0.tag_set.CreatedBy == resource_prefix
+ - _interface_0.tag_set.Name == resource_prefix
+ - _interface_0.name == resource_prefix
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+ # ============================================================
+- name: test purging tags and adding a new one
+ ec2_eni:
+ name: "{{ resource_prefix }}"
+ state: present
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ tags:
+ Description: "{{ resource_prefix }}"
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.tags | length == 2
+ - result.interface.tags.Description == resource_prefix
+ - result.interface.tags.Name == resource_prefix
+ - result.interface.name == resource_prefix
+ - _interface_0.tag_set | length == 2
+ - _interface_0.tag_set.Description == resource_prefix
+ - _interface_0.tag_set.Name == resource_prefix
+ - _interface_0.name == resource_prefix
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+- name: test purging tags and adding a new one is idempotent
+ ec2_eni:
+ name: "{{ resource_prefix }}"
+ state: present
+ subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ tags:
+ Description: "{{ resource_prefix }}"
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - not result.changed
+ - result.interface.id == eni_id_1
+ - result.interface.tags | length == 2
+ - result.interface.tags.Description == resource_prefix
+ - result.interface.tags.Name == resource_prefix
+ - result.interface.name == resource_prefix
+ - _interface_0.tag_set | length == 2
+ - _interface_0.tag_set.Description == resource_prefix
+ - _interface_0.tag_set.Name == resource_prefix
+ - _interface_0.name == resource_prefix
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
+
+ # ============================================================
+- name: test purging all tags
+ ec2_eni:
+ eni_id: "{{ eni_id_1 }}"
+ state: present
+ tags: {}
+ register: result
+- ec2_eni_info:
+ eni_id: "{{ eni_id_1 }}"
+ register: eni_info
+
+- assert:
+ that:
+ - result.changed
+ - not result.interface.tags
+ - result.interface.name is undefined
+ - _interface_0.tag_set | length == 0
+ vars:
+ _interface_0: '{{ eni_info.network_interfaces[0] }}'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/aliases
new file mode 100644
index 00000000..ee717e99
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+shippable/aws/group2
+ec2_group_info
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/defaults/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/defaults/main.yml
new file mode 100644
index 00000000..f17a67a5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# defaults file for test_ec2_group
+ec2_group_name: '{{resource_prefix}}'
+ec2_group_description: 'Created by ansible integration tests'
+
+vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16'
+subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/credential_tests.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/credential_tests.yml
new file mode 100644
index 00000000..1957eaae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/credential_tests.yml
@@ -0,0 +1,161 @@
+---
+# A Note about ec2 environment variable name preference:
+# - EC2_URL -> AWS_URL
+# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
+# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
+# - EC2_REGION -> AWS_REGION
+#
+
+# - include: ../../setup_ec2/tasks/common.yml module_name: ec2_group
+
+- block:
+ # ============================================================
+ - name: test failure with no parameters
+ ec2_group:
+ register: result
+ ignore_errors: true
+
+ - name: assert failure with no parameters
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "one of the following is required: name, group_id"'
+
+ # ============================================================
+ - name: test failure with only name
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ register: result
+ ignore_errors: true
+
+ - name: assert failure with only name
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "Must provide description when state is present."'
+
+ # ============================================================
+ - name: test failure with only description
+ ec2_group:
+ description: '{{ec2_group_description}}'
+ register: result
+ ignore_errors: true
+
+ - name: assert failure with only description
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "one of the following is required: name, group_id"'
+
+ # ============================================================
+ - name: test failure with empty description (AWS API requires non-empty string desc)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: ''
+ region: '{{ec2_region}}'
+ register: result
+ ignore_errors: true
+
+ - name: assert failure with empty description
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "Must provide description when state is present."'
+
+ # ============================================================
+ - name: test valid region parameter
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ region: '{{ec2_region}}'
+ register: result
+ ignore_errors: true
+
+ - name: assert valid region parameter
+ assert:
+ that:
+ - 'result.failed'
+ - '"Unable to locate credentials" in result.msg'
+
+ # ============================================================
+ - name: test environment variable EC2_REGION
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ environment:
+ EC2_REGION: '{{ec2_region}}'
+ register: result
+ ignore_errors: true
+
+ - name: assert environment variable EC2_REGION
+ assert:
+ that:
+ - 'result.failed'
+ - '"Unable to locate credentials" in result.msg'
+
+ # ============================================================
+ - name: test invalid ec2_url parameter
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ environment:
+ EC2_URL: bogus.example.com
+ register: result
+ ignore_errors: true
+
+ - name: assert invalid ec2_url parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("The ec2_group module requires a region")'
+
+ # ============================================================
+ - name: test valid ec2_url parameter
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ environment:
+ EC2_URL: '{{ec2_url}}'
+ register: result
+ ignore_errors: true
+
+ - name: assert valid ec2_url parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("The ec2_group module requires a region")'
+
+ # ============================================================
+ - name: test credentials from environment
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ environment:
+ EC2_REGION: '{{ec2_region}}'
+ EC2_ACCESS_KEY: bogus_access_key
+ EC2_SECRET_KEY: bogus_secret_key
+ register: result
+ ignore_errors: true
+
+ - name: assert ec2_group with valid ec2_url
+ assert:
+ that:
+ - 'result.failed'
+ - '"validate the provided access credentials" in result.msg'
+
+ # ============================================================
+ - name: test credential parameters
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ ec2_region: '{{ec2_region}}'
+ ec2_access_key: 'bogus_access_key'
+ ec2_secret_key: 'bogus_secret_key'
+ register: result
+ ignore_errors: true
+
+ - name: assert credential parameters
+ assert:
+ that:
+ - 'result.failed'
+ - '"validate the provided access credentials" in result.msg'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/data_validation.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/data_validation.yml
new file mode 100644
index 00000000..c461287d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/data_validation.yml
@@ -0,0 +1,33 @@
+---
+- block:
+ - name: Create a group with only the default rule
+ ec2_group:
+ name: '{{ec2_group_name}}-input-tests'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+
+ - name: Run through some common weird port specs
+ ec2_group:
+ name: '{{ec2_group_name}}-input-tests'
+ description: '{{ec2_group_description}}'
+ rules:
+ - "{{ item }}"
+ with_items:
+ - proto: tcp
+ from_port: "8182"
+ to_port: 8182
+ cidr_ipv6: "fc00:ff9b::/96"
+ rule_desc: Mixed string and non-string ports
+ - proto: tcp
+ ports:
+ - "9000"
+ - 9001
+ - 9002-9005
+ cidr_ip: "10.2.3.0/24"
+ always:
+ - name: tidy up input testing group
+ ec2_group:
+ name: '{{ec2_group_name}}-input-tests'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/diff_mode.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/diff_mode.yml
new file mode 100644
index 00000000..e687bad2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/diff_mode.yml
@@ -0,0 +1,167 @@
+---
+ # ============================================================
+
+ - name: create a group with a rule (CHECK MODE + DIFF)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ rules_egress:
+ - proto: all
+ cidr_ip: 0.0.0.0/0
+ register: check_mode_result
+ check_mode: true
+ diff: true
+
+ - assert:
+ that:
+ - check_mode_result.changed
+
+ - name: create a group with a rule (DIFF)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ rules_egress:
+ - proto: all
+ cidr_ip: 0.0.0.0/0
+ register: result
+ diff: true
+
+ - assert:
+ that:
+ - result.changed
+ - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions
+ - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress
+
+ - name: add rules to make sorting occur (CHECK MODE + DIFF)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 172.16.0.0/12
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 10.0.0.0/8
+ rules_egress:
+ - proto: all
+ cidr_ip: 0.0.0.0/0
+ register: check_mode_result
+ check_mode: true
+ diff: true
+
+ - assert:
+ that:
+ - check_mode_result.changed
+
+ - name: add rules in a different order to test sorting consistency (DIFF)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 172.16.0.0/12
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 10.0.0.0/8
+ rules_egress:
+ - proto: all
+ cidr_ip: 0.0.0.0/0
+ register: result
+ diff: true
+
+ - assert:
+ that:
+ - result.changed
+ - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions
+ - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress
+
+ - name: purge rules (CHECK MODE + DIFF)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ rules_egress: []
+ register: check_mode_result
+ check_mode: true
+ diff: true
+
+ - assert:
+ that:
+ - check_mode_result.changed
+
+ - name: purge rules (DIFF)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ rules_egress: []
+ register: result
+ diff: true
+
+ - assert:
+ that:
+ - result.changed
+ - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions
+ - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress
+
+ - name: delete the security group (CHECK MODE + DIFF)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ state: absent
+ register: check_mode_result
+ diff: true
+ check_mode: true
+
+ - assert:
+ that:
+ - check_mode_result.changed
+
+ - name: delete the security group (DIFF)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ state: absent
+ register: result
+ diff: true
+
+ - assert:
+ that:
+ - result.changed
+ - not result.diff.0.after and not check_mode_result.diff.0.after
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/ec2_classic.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/ec2_classic.yml
new file mode 100644
index 00000000..4ea8553e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/ec2_classic.yml
@@ -0,0 +1,86 @@
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: Get available AZs
+ aws_az_info:
+ filters:
+ region-name: "{{ aws_region }}"
+ register: az_facts
+
+ - name: Create a classic ELB with classic networking
+ ec2_elb_lb:
+ name: "{{ resource_prefix }}-elb"
+ state: present
+ zones:
+ - "{{ az_facts['availability_zones'][0]['zone_name'] }}"
+ - "{{ az_facts['availability_zones'][1]['zone_name'] }}"
+ listeners:
+ - protocol: http # options are http, https, ssl, tcp
+ load_balancer_port: 80
+ instance_port: 80
+ proxy_protocol: True
+ register: classic_elb
+
+ - name: Assert the elb was created
+ assert:
+ that:
+ - classic_elb.changed
+
+ - name: Create a security group with a classic elb-sg rule
+ ec2_group:
+ name: "{{ resource_prefix }}-sg-a"
+ description: "EC2 classic test security group"
+ rules:
+ - proto: tcp
+ ports: 80
+ group_id: amazon-elb/amazon-elb-sg
+ state: present
+ register: classic_sg
+
+ - name: Assert the SG was created
+ assert:
+ that:
+ - classic_sg.changed
+ - "{{ classic_sg.ip_permissions | length }} == 1"
+
+ - set_fact:
+ elb_sg_id: "{{ classic_sg.ip_permissions[0].user_id_group_pairs[0].user_id }}/{{ classic_sg.ip_permissions[0].user_id_group_pairs[0].group_id }}/{{ classic_sg.ip_permissions[0].user_id_group_pairs[0].group_name }}"
+
+ - name: Update the security group
+ ec2_group:
+ name: "{{ resource_prefix }}-sg-a"
+ description: "EC2 classic test security group"
+ rules:
+ - proto: tcp
+ ports: 8080
+ group_id: "{{ elb_sg_id }}"
+ - proto: tcp
+ ports:
+ - 80
+ cidr_ip: 0.0.0.0/0
+ state: present
+ register: updated_classic_sg
+
+
+ - name: Assert the SG was updated
+ assert:
+ that:
+ - updated_classic_sg.changed
+ - "{{ updated_classic_sg.ip_permissions | length }} == 2"
+ - "{{ classic_sg.ip_permissions[0]}} not in {{ updated_classic_sg.ip_permissions }}"
+
+ # ===========================================
+ always:
+ - name: Terminate classic ELB
+ ec2_elb_lb:
+ name: "{{ resource_prefix }}-classic-elb"
+ state: absent
+
+ - name: Delete security group
+ ec2_group:
+ name: "{{ resource_prefix }}-sg-a"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/egress_tests.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/egress_tests.yml
new file mode 100644
index 00000000..5635f443
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/egress_tests.yml
@@ -0,0 +1,177 @@
+---
+- block:
+ - name: Create a group with only the default rule
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ state: present
+ register: result
+
+ - name: assert default rule is in place (expected changed=true)
+ assert:
+ that:
+ - result is changed
+ - result.ip_permissions|length == 0
+ - result.ip_permissions_egress|length == 1
+ - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0'
+
+ - name: Create a group with only the default rule
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ purge_rules_egress: false
+ state: present
+ register: result
+
+ - name: assert default rule is not purged (expected changed=false)
+ assert:
+ that:
+ - result is not changed
+ - result.ip_permissions|length == 0
+ - result.ip_permissions_egress|length == 1
+ - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0'
+
+ - name: Pass empty egress rules without purging, should leave default rule in place
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_rules_egress: false
+ rules_egress: []
+ state: present
+ register: result
+
+ - name: assert default rule is not purged (expected changed=false)
+ assert:
+ that:
+ - result is not changed
+ - result.ip_permissions|length == 0
+ - result.ip_permissions_egress|length == 1
+ - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0'
+
+ - name: Purge rules, including the default
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_rules_egress: true
+ rules_egress: []
+ state: present
+ register: result
+
+ - name: assert default rule is not purged (expected changed=false)
+ assert:
+ that:
+ - result is changed
+ - result.ip_permissions|length == 0
+ - result.ip_permissions_egress|length == 0
+
+ - name: Add a custom egress rule
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules_egress:
+ - proto: tcp
+ ports:
+ - 1212
+ cidr_ip: 10.2.1.2/32
+ state: present
+ register: result
+
+ - name: assert first rule is here
+ assert:
+ that:
+ - result.ip_permissions_egress|length == 1
+
+ - name: Add a second custom egress rule
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ description: '{{ec2_group_description}}'
+ purge_rules_egress: false
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules_egress:
+ - proto: tcp
+ ports:
+ - 2323
+ cidr_ip: 10.3.2.3/32
+ state: present
+ register: result
+
+ - name: assert the first rule is not purged
+ assert:
+ that:
+ - result.ip_permissions_egress|length == 2
+
+ - name: Purge the second rule (CHECK MODE) (DIFF MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules_egress:
+ - proto: tcp
+ ports:
+ - 1212
+ cidr_ip: 10.2.1.2/32
+ state: present
+ register: result
+ check_mode: True
+ diff: True
+
+ - name: assert first rule will be left
+ assert:
+ that:
+ - result.changed
+ - result.diff.0.after.ip_permissions_egress|length == 1
+ - result.diff.0.after.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '10.2.1.2/32'
+
+ - name: Purge the second rule
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules_egress:
+ - proto: tcp
+ ports:
+ - 1212
+ cidr_ip: 10.2.1.2/32
+ state: present
+ register: result
+
+ - name: assert first rule is here
+ assert:
+ that:
+ - result.ip_permissions_egress|length == 1
+ - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '10.2.1.2/32'
+
+ - name: add a rule for all TCP ports
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ description: '{{ec2_group_description}}'
+ rules_egress:
+ - proto: tcp
+ ports: 0-65535
+ cidr_ip: 0.0.0.0/0
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: result
+
+ - name: Re-add the default rule
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ description: '{{ec2_group_description}}'
+ rules_egress:
+ - proto: -1
+ cidr_ip: 0.0.0.0/0
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: result
+ always:
+ - name: tidy up egress rule test security group
+ ec2_group:
+ name: '{{ec2_group_name}}-egress-tests'
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/group_info.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/group_info.yml
new file mode 100644
index 00000000..86c8a546
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/group_info.yml
@@ -0,0 +1,96 @@
+---
+
+# file for testing the ec2_group_info module
+
+- block:
+ # ======================== Setup =====================================
+ - name: Create a group for testing group info retrieval below
+ ec2_group:
+ name: '{{ ec2_group_name }}-info-1'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ ec2_group_description }}'
+ rules:
+ - proto: tcp
+ ports:
+ - 90
+ cidr_ip: 10.2.2.2/32
+ tags:
+ test: '{{ resource_prefix }}_ec2_group_info_module'
+ register: group_info_test_setup
+
+ - name: Create another group for testing group info retrieval below
+ ec2_group:
+ name: '{{ ec2_group_name }}-info-2'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ ec2_group_description }}'
+ rules:
+ - proto: tcp
+ ports:
+ - 91
+ cidr_ip: 10.2.2.2/32
+
+ # ========================= ec2_group_info tests ====================
+
+ - name: Retrieve security group info based on SG name
+ ec2_group_info:
+ filters:
+ group-name: '{{ ec2_group_name }}-info-2'
+ register: result_1
+
+ - name: Assert results found
+ assert:
+ that:
+ - result_1.security_groups is defined
+ - (result_1.security_groups|first).group_name == '{{ ec2_group_name }}-info-2'
+
+ - name: Retrieve security group info based on SG VPC
+ ec2_group_info:
+ filters:
+ vpc-id: '{{ vpc_result.vpc.id }}'
+ register: result_2
+
+ - name: Assert results found
+ assert:
+ that:
+ - result_2.security_groups is defined
+ - (result_2.security_groups|first).vpc_id == vpc_result.vpc.id
+ - (result_2.security_groups|length) > 2
+
+ - name: Retrieve security group info based on SG tags
+ ec2_group_info:
+ filters:
+ "tag:test": "{{ resource_prefix }}_ec2_group_info_module"
+ register: result_3
+
+ - name: Assert results found
+ assert:
+ that:
+ - result_3.security_groups is defined
+ - (result_3.security_groups|first).group_id == group_info_test_setup.group_id
+
+ - name: Retrieve security group info based on SG ID
+ ec2_group_info:
+ filters:
+ group-id: '{{ group_info_test_setup.group_id }}'
+ register: result_4
+
+ - name: Assert correct result found
+ assert:
+ that:
+ - result_4.security_groups is defined
+ - (result_4.security_groups|first).group_id == group_info_test_setup.group_id
+ - (result_4.security_groups|length) == 1
+
+ always:
+ # ========================= Cleanup =================================
+ - name: tidy up test security group 1
+ ec2_group:
+ name: '{{ ec2_group_name }}-info-1'
+ state: absent
+ ignore_errors: yes
+
+ - name: tidy up test security group 2
+ ec2_group:
+ name: '{{ ec2_group_name }}-info-2'
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/ipv6_default_tests.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/ipv6_default_tests.yml
new file mode 100644
index 00000000..2dea42a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/ipv6_default_tests.yml
@@ -0,0 +1,90 @@
+---
+# ============================================================
+- name: test state=present for ipv6 (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ check_mode: true
+ register: result
+
+- name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+# ============================================================
+- name: test state=present for ipv6 (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ register: result
+
+- name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+# ============================================================
+- name: test rules_egress state=present for ipv6 (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ rules_egress:
+ - proto: "tcp"
+ from_port: 8181
+ to_port: 8181
+ cidr_ipv6: "64:ff9b::/96"
+ check_mode: true
+ register: result
+
+- name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+# ============================================================
+- name: test rules_egress state=present for ipv6 (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ rules_egress:
+ - proto: "tcp"
+ from_port: 8181
+ to_port: 8181
+ cidr_ipv6: "64:ff9b::/96"
+ register: result
+
+- name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+- name: delete it
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/main.yml
new file mode 100644
index 00000000..cbd39cb3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/main.yml
@@ -0,0 +1,1460 @@
+---
+# Runs a set of tets without the AWS connection credentials configured
+- include: ./credential_tests.yml
+- set_fact:
+ aws_security_token: '{{ security_token | default("") }}'
+ no_log: True
+
+# ============================================================
+# EC2 Classic tests can only be run on a pre-2013 AWS account with supported-platforms=EC2
+# Ansible CI does NOT have classic EC2 support; these tests are provided as-is for the
+# community and can be run if you have access to a classic account. To check if your account
+# has support for EC2 Classic you can use the `amazon.aws.aws_account_attribute` plugin.
+
+- name: determine if this is an EC2 Classic account
+ set_fact:
+ has_ec2_classic: "{{ lookup('amazon.aws.aws_account_attribute',
+ attribute='has-ec2-classic',
+ region=aws_region,
+ aws_access_key=aws_access_key,
+ aws_secret_key=aws_secret_key,
+ aws_security_token=aws_security_token,
+ wantlist=True) }}"
+
+# ============================================================
+- name: Run EC2 Classic accounts if account type is EC2
+ include: ./ec2_classic.yml
+ when: has_ec2_classic
+
+# ============================================================
+# Other tests depend on attribute='default-vpc', ie no vpc_id is set. This is
+# incompatible with EC2 classic accounts, so these tests can only be run in a
+# VPC-type account. See "Q. I really want a default VPC for my existing EC2
+# account. Is that possible?" in https://aws.amazon.com/vpc/faqs/#Default_VPCs
+- name: Run all other tests if account type is VPC
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit)}}"
+ region: "{{ aws_region }}"
+ block:
+ - name: determine if there is a default VPC
+ set_fact:
+ defaultvpc: "{{ lookup('amazon.aws.aws_account_attribute',
+ attribute='default-vpc',
+ region=aws_region,
+ aws_access_key=aws_access_key,
+ aws_secret_key=aws_secret_key,
+ aws_security_token=aws_security_token) }}"
+ register: default_vpc
+
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ register: vpc_result
+ #TODO(ryansb): Update CI for VPC peering permissions
+ #- include: ./multi_account.yml
+ - include: ./diff_mode.yml
+ - include: ./numeric_protos.yml
+ - include: ./rule_group_create.yml
+ - include: ./egress_tests.yml
+ - include: ./data_validation.yml
+ - include: ./multi_nested_target.yml
+ - include: ./group_info.yml
+
+ # ============================================================
+ - name: test state=absent (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: absent
+ check_mode: true
+ register: result
+
+ - name: assert no changes would be made
+ assert:
+ that:
+ - not result.changed
+
+ # ===========================================================
+ - name: test state=absent
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: absent
+ register: result
+
+ # ============================================================
+ - name: test state=present (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test state=present (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test state=present different description (expected changed=false) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}CHANGED'
+ state: present
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+
+ # ============================================================
+ - name: test state=present different description (expected changed=false)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}CHANGED'
+ state: present
+ ignore_errors: true
+ register: result
+
+ - name: assert state=present (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test state=present (expected changed=false)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ register: result
+
+ - name: assert state=present (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: tests IPv6 with the default VPC
+ include: ./ipv6_default_tests.yml
+ when: default_vpc
+
+ - name: test IPv6 with a specified VPC
+ block:
+
+ # ============================================================
+ - name: test state=present (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test state=present (expected changed=true)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test state=present for ipv6 (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test state=present for ipv6 (expected changed=true)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test state=present for ipv6 (expected changed=false) (CHECK MODE)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ check_mode: true
+ register: result
+
+ - name: assert nothing changed
+ assert:
+ that:
+ - 'not result.changed'
+
+ # ============================================================
+ - name: test state=present for ipv6 (expected changed=false)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ register: result
+
+ - name: assert nothing changed
+ assert:
+ that:
+ - 'not result.changed'
+
+ # ============================================================
+ - name: test rules_egress state=present for ipv6 (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ rules_egress:
+ - proto: "tcp"
+ from_port: 8181
+ to_port: 8181
+ cidr_ipv6: "64:ff9b::/96"
+ check_mode: true
+ diff: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.diff.0.before.ip_permissions == result.diff.0.after.ip_permissions'
+ - 'result.diff.0.before.ip_permissions_egress != result.diff.0.after.ip_permissions_egress'
+
+ # ============================================================
+ - name: test rules_egress state=present for ipv6 (expected changed=true)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: present
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6: "64:ff9b::/96"
+ rules_egress:
+ - proto: "tcp"
+ from_port: 8181
+ to_port: 8181
+ cidr_ipv6: "64:ff9b::/96"
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test state=absent (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ check_mode: true
+ diff: true
+ register: result
+
+ - name: assert group was removed
+ assert:
+ that:
+ - 'result.changed'
+ - 'not result.diff.0.after'
+
+ # ============================================================
+ - name: test state=absent (expected changed=true)
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ description: '{{ ec2_group_description }}-2'
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ register: result
+
+ - name: assert group was removed
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test state=present for ipv4 (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test state=present for ipv4 (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+ - 'result.ip_permissions|length == 1'
+ - 'result.ip_permissions_egress|length == 1'
+
+ # ============================================================
+ - name: add same rule to the existing group (expected changed=false) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ check_mode: true
+ diff: true
+ register: check_result
+
+ - assert:
+ that:
+ - not check_result.changed
+ - check_result.diff.0.before.ip_permissions.0 == check_result.diff.0.after.ip_permissions.0
+
+ # ============================================================
+ - name: add same rule to the existing group (expected changed=false)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ register: result
+
+ - name: assert state=present (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ - name: assert state=present (expected changed=false)
+ assert:
+ that:
+ - 'not check_result.changed'
+
+ # ============================================================
+ - name: add a rule that auto creates another security group (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ purge_rules: no
+ rules:
+ - proto: "tcp"
+ group_name: "{{ resource_prefix }} - Another security group"
+ group_desc: Another security group
+ ports: 7171
+ check_mode: true
+ register: result
+
+ - name: check that there are now two rules
+ assert:
+ that:
+ - result.changed
+
+ # ============================================================
+ - name: add a rule that auto creates another security group
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ purge_rules: no
+ rules:
+ - proto: "tcp"
+ group_name: "{{ resource_prefix }} - Another security group"
+ group_desc: Another security group
+ ports: 7171
+ register: result
+
+ - name: check that there are now two rules
+ assert:
+ that:
+ - result.changed
+ - result.warning is not defined
+ - result.ip_permissions|length == 2
+ - result.ip_permissions[0].user_id_group_pairs or
+ result.ip_permissions[1].user_id_group_pairs
+ - 'result.ip_permissions_egress[0].ip_protocol == "-1"'
+
+ # ============================================================
+ - name: test ip rules convert port numbers from string to int (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: "8183"
+ to_port: "8183"
+ cidr_ip: "10.1.1.1/32"
+ rules_egress:
+ - proto: "tcp"
+ from_port: "8184"
+ to_port: "8184"
+ cidr_ip: "10.1.1.1/32"
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test ip rules convert port numbers from string to int (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: "8183"
+ to_port: "8183"
+ cidr_ip: "10.1.1.1/32"
+ rules_egress:
+ - proto: "tcp"
+ from_port: "8184"
+ to_port: "8184"
+ cidr_ip: "10.1.1.1/32"
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+ - 'result.ip_permissions|length == 1'
+ - 'result.ip_permissions_egress[0].ip_protocol == "tcp"'
+
+
+ # ============================================================
+ - name: test group rules convert port numbers from string to int (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: "8185"
+ to_port: "8185"
+ group_id: "{{result.group_id}}"
+ rules_egress:
+ - proto: "tcp"
+ from_port: "8186"
+ to_port: "8186"
+ group_id: "{{result.group_id}}"
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test group rules convert port numbers from string to int (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: "8185"
+ to_port: "8185"
+ group_id: "{{result.group_id}}"
+ rules_egress:
+ - proto: "tcp"
+ from_port: "8186"
+ to_port: "8186"
+ group_id: "{{result.group_id}}"
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+ - result.warning is not defined
+
+ # ============================================================
+ - name: test adding a range of ports and ports given as strings (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8183-8190
+ - '8192'
+ cidr_ip: 10.1.1.1/32
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test adding a range of ports and ports given as strings (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8183-8190
+ - '8192'
+ cidr_ip: 10.1.1.1/32
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test adding a rule with a IPv4 CIDR with host bits set (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8195
+ cidr_ip: 10.0.0.1/8
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test adding a rule with a IPv4 CIDR with host bits set (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8195
+ cidr_ip: 10.0.0.1/8
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test adding the same rule with a IPv4 CIDR with host bits set (expected changed=false) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8195
+ cidr_ip: 10.0.0.1/8
+ check_mode: true
+ register: check_result
+
+ # ============================================================
+ - name: test adding the same rule with a IPv4 CIDR with host bits set (expected changed=false and a warning)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8195
+ cidr_ip: 10.0.0.1/8
+ register: result
+
+ - name: assert state=present (expected changed=false and a warning)
+ assert:
+ that:
+ - 'not check_result.changed'
+
+ - name: assert state=present (expected changed=false and a warning)
+ assert:
+ that:
+ # No way to assert for warnings?
+ - 'not result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test using the default VPC
+ block:
+
+ - name: test adding a rule with a IPv6 CIDR with host bits set (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8196
+ cidr_ipv6: '2001:db00::1/24'
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test adding a rule with a IPv6 CIDR with host bits set (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8196
+ cidr_ipv6: '2001:db00::1/24'
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+
+ - name: test adding a rule again with a IPv6 CIDR with host bits set (expected changed=false and a warning)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ state: present
+ # set purge_rules to false so we don't get a false positive from previously added rules
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8196
+ cidr_ipv6: '2001:db00::1/24'
+ register: result
+
+ - name: assert state=present (expected changed=false and a warning)
+ assert:
+ that:
+ # No way to assert for warnings?
+ - 'not result.changed'
+ - 'result.group_id.startswith("sg-")'
+
+ when: default_vpc
+
+ # ============================================================
+ - name: test state=absent (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ state: absent
+ check_mode: true
+ register: result
+
+ - name: assert state=absent (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test state=absent (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ state: absent
+ register: result
+
+ - name: assert state=absent (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'not result.group_id'
+
+ # ============================================================
+ - name: create security group in the VPC (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: create security group in the VPC
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.vpc_id == vpc_result.vpc.id'
+ - 'result.group_id.startswith("sg-")'
+
+ # ============================================================
+ - name: test adding tags (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ tags:
+ tag1: test1
+ tag2: test2
+ check_mode: true
+ diff: true
+ register: result
+
+ - name: assert that tags were added (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'not result.diff.0.before.tags'
+ - 'result.diff.0.after.tags.tag1 == "test1"'
+ - 'result.diff.0.after.tags.tag2 == "test2"'
+
+ # ============================================================
+ - name: test adding tags (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ tags:
+ tag1: test1
+ tag2: test2
+ register: result
+
+ - name: assert that tags were added (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.tags == {"tag1": "test1", "tag2": "test2"}'
+
+ # ============================================================
+ - name: test that tags are present (expected changed=False) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ purge_rules_egress: false
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ tags:
+ tag1: test1
+ tag2: test2
+ check_mode: true
+ register: result
+
+ - name: assert that tags were not changed (expected changed=False)
+ assert:
+ that:
+ - 'not result.changed'
+
+ # ============================================================
+ - name: test that tags are present (expected changed=False)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ purge_rules_egress: false
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ tags:
+ tag1: test1
+ tag2: test2
+ register: result
+
+ - name: assert that tags were not changed (expected changed=False)
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.tags == {"tag1": "test1", "tag2": "test2"}'
+
+ # ============================================================
+ - name: test purging tags (expected changed=True) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ tags:
+ tag1: test1
+ check_mode: true
+ register: result
+
+ - name: assert that tag2 was removed (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ # ============================================================
+ - name: test purging tags (expected changed=True)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ tags:
+ tag1: test1
+ register: result
+
+ - name: assert that tag2 was removed (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.tags == {"tag1": "test1"}'
+
+ # ============================================================
+
+ - name: assert that tags are left as-is if not specified (expected changed=False)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ register: result
+
+ - name: assert that the tags stayed the same (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.tags == {"tag1": "test1"}'
+
+ # ============================================================
+
+ - name: test purging all tags (expected changed=True)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ip: "10.1.1.1/32"
+ tags: {}
+ register: result
+
+ - name: assert that tag1 was removed (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'not result.tags'
+
+ # ============================================================
+ - name: test adding a rule and egress rule descriptions (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ # purge the other rules so assertions work for the subsequent tests for rule descriptions
+ purge_rules_egress: true
+ purge_rules: true
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rule_desc: ipv6 rule desc 1
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc: egress rule desc 1
+ check_mode: true
+ register: result
+
+ - name: assert that rule descriptions are created (expected changed=true)
+ # Only assert this if rule description is defined as the botocore version may < 1.7.2.
+ # It's still helpful to have these tests run on older versions since it verifies backwards
+ # compatibility with this feature.
+ assert:
+ that:
+ - 'result.changed'
+ when: result.ip_permissions_egress[0].ip_ranges[0].description is defined
+
+ - name: if an older version of botocore is installed changes should still have changed due to purged rules (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ when: result.ip_permissions_egress[0].ip_ranges[0].description is undefined
+
+ # =========================================================================================
+ - name: add rules without descriptions ready for adding descriptions to existing rules
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ # purge the other rules so assertions work for the subsequent tests for rule descriptions
+ purge_rules_egress: true
+ purge_rules: true
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ register: result
+
+ # ============================================================
+ - name: test adding a rule and egress rule descriptions (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ # purge the other rules so assertions work for the subsequent tests for rule descriptions
+ purge_rules_egress: true
+ purge_rules: true
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rule_desc: ipv6 rule desc 1
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc: egress rule desc 1
+ register: result
+
+ - name: assert that rule descriptions are created (expected changed=true)
+ # Only assert this if rule description is defined as the botocore version may < 1.7.2.
+ # It's still helpful to have these tests run on older versions since it verifies backwards
+ # compatibility with this feature.
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 1"'
+ - 'result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 1"'
+ when: result.ip_permissions_egress[0].ip_ranges[0].description is defined
+
+ - name: if an older version of botocore is installed changes should still have changed due to purged rules (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ when: result.ip_permissions_egress[0].ip_ranges[0].description is undefined
+
+ # ============================================================
+ - name: test modifying rule and egress rule descriptions (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_rules_egress: false
+ purge_rules: false
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rule_desc: ipv6 rule desc 2
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc: egress rule desc 2
+ check_mode: true
+ register: result
+
+ - name: assert that rule descriptions were modified (expected changed=true)
+ # Only assert this if rule description is defined as the botocore version may < 1.7.2.
+ # It's still helpful to have these tests run on older versions since it verifies backwards
+ # compatibility with this feature.
+ assert:
+ that:
+ - 'result.ip_permissions | length > 0'
+ - 'result.changed'
+ when: result.ip_permissions_egress[0].ip_ranges[0].description is defined
+
+ - name: if an older version of botocore is installed everything should stay the same (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ when: result.ip_permissions_egress[0].ip_ranges[0].description is undefined and result.ip_permissions_egress[1].ip_ranges[0].description is undefined
+
+ # ============================================================
+ - name: test modifying rule and egress rule descriptions (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_rules_egress: false
+ purge_rules: false
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rule_desc: ipv6 rule desc 2
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc: egress rule desc 2
+ register: result
+
+ - name: assert that rule descriptions were modified (expected changed=true)
+ # Only assert this if rule description is defined as the botocore version may < 1.7.2.
+ # It's still helpful to have these tests run on older versions since it verifies backwards
+ # compatibility with this feature.
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 2"'
+ - 'result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 2"'
+ when: result.ip_permissions_egress[0].ip_ranges[0].description is defined
+
+ - name: if an older version of botocore is installed everything should stay the same (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ when: result.ip_permissions_egress[0].ip_ranges[0].description is undefined
+
+ # ============================================================
+
+ - name: test creating rule in default vpc with egress rule (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}-default-vpc'
+ description: '{{ec2_group_description}} default VPC'
+ purge_rules_egress: true
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ip: 10.1.1.1/24
+ rule_desc: ipv4 rule desc
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc: egress rule desc 2
+ register: result
+
+ - name: assert that rule descriptions were modified (expected changed=true)
+ # Only assert this if rule description is defined as the botocore version may < 1.7.2.
+ # It's still helpful to have these tests run on older versions since it verifies backwards
+ # compatibility with this feature.
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.ip_permissions_egress|length == 1'
+
+ # ============================================================
+ - name: test that keeping the same rule descriptions (expected changed=false) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_rules_egress: false
+ purge_rules: false
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rule_desc: ipv6 rule desc 2
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc: egress rule desc 2
+ check_mode: true
+ register: result
+
+ - name: assert that rule descriptions stayed the same (expected changed=false)
+ # Only assert this if rule description is defined as the botocore version may < 1.7.2.
+ # It's still helpful to have these tests run on older versions since it verifies backwards
+ # compatibility with this feature.
+ assert:
+ that:
+ - 'not result.changed'
+ when: result.ip_permissions_egress[0].ip_ranges[0].description is defined
+
+ - name: if an older version of botocore is installed everything should stay the same (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ when: result.ip_permissions_egress[0].ip_ranges[0].description is undefined
+
+ # ============================================================
+ - name: test that keeping the same rule descriptions (expected changed=false)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_rules_egress: false
+ purge_rules: false
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rule_desc: ipv6 rule desc 2
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc: egress rule desc 2
+ register: result
+
+ - name: assert that rule descriptions stayed the same (expected changed=false)
+ # Only assert this if rule description is defined as the botocore version may < 1.7.2.
+ # It's still helpful to have these tests run on older versions since it verifies backwards
+ # compatibility with this feature.
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 2"'
+ - 'result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 2"'
+ when: result.ip_permissions_egress[0].ip_ranges[0].description is defined
+
+ - name: if an older version of botocore is installed everything should stay the same (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ when: result.ip_permissions_egress[0].ip_ranges[0].description is undefined
+
+ # ============================================================
+ - name: test removing rule descriptions (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_rules_egress: false
+ purge_rules: false
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rule_desc:
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc:
+ check_mode: true
+ register: result
+
+ - name: assert that rule descriptions were removed (expected changed=true)
+ # Only assert this if rule description is defined as the botocore version may < 1.7.2.
+ # It's still helpful to have these tests run on older versions since it verifies backwards
+ # compatibility with this feature.
+ assert:
+ that:
+ - 'result.changed'
+ when: result.ip_permissions_egress[0].ip_ranges[0].description is defined
+
+ - name: if an older version of botocore is installed everything should stay the same (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ when: result.ip_permissions_egress[0].ip_ranges[0].description is undefined
+
+ # ============================================================
+ - name: test removing rule descriptions (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ description: '{{ec2_group_description}}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ purge_rules_egress: false
+ purge_rules: false
+ state: present
+ rules:
+ - proto: "tcp"
+ ports:
+ - 8281
+ cidr_ipv6: 1001:d00::/24
+ rule_desc:
+ rules_egress:
+ - proto: "tcp"
+ ports:
+ - 8282
+ cidr_ip: 10.2.2.2/32
+ rule_desc:
+ register: result
+ ignore_errors: true
+
+ - name: assert that rule descriptions were removed (expected changed=true with newer botocore)
+ # Only assert this if rule description is defined as the botocore version may < 1.7.2.
+ # It's still helpful to have these tests run on older versions since it verifies backwards
+ # compatibility with this feature.
+ assert:
+ that:
+ - 'result.ip_permissions[0].ipv6_ranges[0].description is undefined'
+ - 'result.ip_permissions_egress[0].ip_ranges[0].description is undefined'
+ when: result is changed
+
+ - name: if an older version of botocore is installed everything should stay the same (expected changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ when: result.failed
+
+ # ============================================================
+
+ - name: test state=absent (expected changed=true)
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ state: absent
+ register: result
+
+ - name: assert state=absent (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'not result.group_id'
+ when: not has_ec2_classic
+
+ always:
+ # ============================================================
+ - name: tidy up security group
+ ec2_group:
+ name: '{{ec2_group_name}}'
+ state: absent
+ ignore_errors: yes
+
+ - name: tidy up security group for IPv6 EC2-Classic tests
+ ec2_group:
+ name: '{{ ec2_group_name }}-2'
+ state: absent
+ ignore_errors: yes
+
+ - name: tidy up default VPC security group
+ ec2_group:
+ name: '{{ec2_group_name}}-default-vpc'
+ state: absent
+ ignore_errors: yes
+
+ - name: tidy up automatically created SG
+ ec2_group:
+ name: "{{ resource_prefix }} - Another security group"
+ state: absent
+ ignore_errors: yes
+
+ - name: tidy up VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: absent
+ cidr_block: "{{ vpc_cidr }}"
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/multi_account.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/multi_account.yml
new file mode 100644
index 00000000..675dfd93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/multi_account.yml
@@ -0,0 +1,124 @@
+- block:
+ - aws_caller_info:
+ register: caller_facts
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc-2"
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Description: "Created by ansible-test"
+ register: vpc_result_2
+ - name: Peer the secondary-VPC to the main VPC
+ ec2_vpc_peer:
+ vpc_id: '{{ vpc_result_2.vpc.id }}'
+ peer_vpc_id: '{{ vpc_result.vpc.id }}'
+ peer_owner_id: '{{ caller_facts.account }}'
+ peer_region: '{{ aws_region }}'
+ register: peer_origin
+ - name: Accept the secondary-VPC peering connection in the main VPC
+ ec2_vpc_peer:
+ peer_vpc_id: '{{ vpc_result_2.vpc.id }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: accept
+ peering_id: '{{ peer_origin.peering_id }}'
+ peer_owner_id: '{{ caller_facts.account }}'
+ peer_region: '{{ aws_region }}'
+ - name: Create group in second VPC
+ ec2_group:
+ name: '{{ ec2_group_name }}-external'
+ description: '{{ ec2_group_description }}'
+ vpc_id: '{{ vpc_result_2.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ cidr_ip: 0.0.0.0/0
+ ports:
+ - 80
+ rule_desc: 'http whoo'
+ register: external
+ - name: Create group in internal VPC
+ ec2_group:
+ name: '{{ ec2_group_name }}-internal'
+ description: '{{ ec2_group_description }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ group_id: '{{ caller_facts.account }}/{{ external.group_id }}/{{ ec2_group_name }}-external'
+ ports:
+ - 80
+ - name: Re-make same rule, expecting changed=false in internal VPC
+ ec2_group:
+ name: '{{ ec2_group_name }}-internal'
+ description: '{{ ec2_group_description }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ group_id: '{{ caller_facts.account }}/{{ external.group_id }}/{{ ec2_group_name }}-external'
+ ports:
+ - 80
+ register: out
+ - assert:
+ that:
+ - out is not changed
+ - name: Try again with a bad group_id group in internal VPC
+ ec2_group:
+ name: '{{ ec2_group_name }}-internal'
+ description: '{{ ec2_group_description }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ group_id: '{{ external.group_id }}/{{ caller_facts.account }}/{{ ec2_group_name }}-external'
+ ports:
+ - 80
+ register: out
+ ignore_errors: true
+ - assert:
+ that:
+ - out is failed
+ always:
+ - pause: seconds=5
+ - name: Delete secondary-VPC side of peer
+ ec2_vpc_peer:
+ vpc_id: '{{ vpc_result_2.vpc.id }}'
+ peer_vpc_id: '{{ vpc_result.vpc.id }}'
+ peering_id: '{{ peer_origin.peering_id }}'
+ state: absent
+ peer_owner_id: '{{ caller_facts.account }}'
+ peer_region: '{{ aws_region }}'
+ ignore_errors: yes
+ - name: Delete main-VPC side of peer
+ ec2_vpc_peer:
+ peer_vpc_id: '{{ vpc_result_2.vpc.id }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ state: absent
+ peering_id: '{{ peer_origin.peering_id }}'
+ peer_owner_id: '{{ caller_facts.account }}'
+ peer_region: '{{ aws_region }}'
+ ignore_errors: yes
+ - name: Clean up group in second VPC
+ ec2_group:
+ name: '{{ ec2_group_name }}-external'
+ description: '{{ ec2_group_description }}'
+ state: absent
+ vpc_id: '{{ vpc_result_2.vpc.id }}'
+ ignore_errors: yes
+ - name: Clean up group in second VPC
+ ec2_group:
+ name: '{{ ec2_group_name }}-internal'
+ description: '{{ ec2_group_description }}'
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ ignore_errors: yes
+ - name: tidy up VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc-2"
+ state: absent
+ cidr_block: "{{ vpc_cidr }}"
+ ignore_errors: yes
+ register: removed
+ retries: 10
+ until: removed is not failed
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/multi_nested_target.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/multi_nested_target.yml
new file mode 100644
index 00000000..87f48468
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/multi_nested_target.yml
@@ -0,0 +1,213 @@
+---
+ # ============================================================
+
+ - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6:
+ - "64:ff9b::/96"
+ - ["2620::/32"]
+ - proto: "tcp"
+ ports: 5665
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ - ["10.0.0.0/24", "10.20.0.0/24"]
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+
+ - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=true)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6:
+ - "64:ff9b::/96"
+ - ["2620::/32"]
+ - proto: "tcp"
+ ports: 5665
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ - ["10.0.0.0/24", "10.20.0.0/24"]
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.ip_permissions | length == 2'
+ - 'result.ip_permissions[0].ip_ranges | length == 4 or result.ip_permissions[1].ip_ranges | length == 4'
+ - 'result.ip_permissions[0].ipv6_ranges | length == 2 or result.ip_permissions[1].ipv6_ranges | length == 2'
+
+ - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=false) (CHECK MODE)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6:
+ - "64:ff9b::/96"
+ - ["2620::/32"]
+ - proto: "tcp"
+ ports: 5665
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ - ["10.0.0.0/24", "10.20.0.0/24"]
+ check_mode: true
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'not result.changed'
+
+ - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=false)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6:
+ - "64:ff9b::/96"
+ - ["2620::/32"]
+ - proto: "tcp"
+ ports: 5665
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ - ["10.0.0.0/24", "10.20.0.0/24"]
+ register: result
+
+ - name: assert state=present (expected changed=true)
+ assert:
+ that:
+ - 'not result.changed'
+
+ - name: test state=present purging a nested ipv4 target (expected changed=true) (CHECK MODE)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6:
+ - "64:ff9b::/96"
+ - ["2620::/32"]
+ - proto: "tcp"
+ ports: 5665
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ - ["10.0.0.0/24"]
+ check_mode: true
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+
+ - name: test state=present purging a nested ipv4 target (expected changed=true)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6:
+ - "64:ff9b::/96"
+ - ["2620::/32"]
+ - proto: "tcp"
+ ports: 5665
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ - ["10.0.0.0/24"]
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - 'result.ip_permissions[0].ip_ranges | length == 3 or result.ip_permissions[1].ip_ranges | length == 3'
+ - 'result.ip_permissions[0].ipv6_ranges | length == 2 or result.ip_permissions[1].ipv6_ranges | length == 2'
+
+ - name: test state=present with both associated ipv6 targets nested (expected changed=false)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6:
+ - ["2620::/32", "64:ff9b::/96"]
+ - proto: "tcp"
+ ports: 5665
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ - ["10.0.0.0/24"]
+ register: result
+
+ - assert:
+ that:
+ - not result.changed
+
+ - name: test state=present add another nested ipv6 target (expected changed=true)
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ description: '{{ ec2_group_description }}'
+ state: present
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ cidr_ipv6:
+ - ["2620::/32", "64:ff9b::/96"]
+ - ["2001:DB8:A0B:12F0::1/64"]
+ - proto: "tcp"
+ ports: 5665
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ - ["10.0.0.0/24"]
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result.warning is not defined
+ - 'result.ip_permissions[0].ip_ranges | length == 3 or result.ip_permissions[1].ip_ranges | length == 3'
+ - 'result.ip_permissions[0].ipv6_ranges | length == 3 or result.ip_permissions[1].ipv6_ranges | length == 3'
+
+ - name: delete it
+ ec2_group:
+ name: '{{ ec2_group_name }}'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/numeric_protos.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/numeric_protos.yml
new file mode 100644
index 00000000..6cca9fc4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/numeric_protos.yml
@@ -0,0 +1,60 @@
+---
+- block:
+ - name: set up temporary group name for tests
+ set_fact:
+ group_tmp_name: '{{ec2_group_name}}-numbered-protos'
+
+ - name: Create a group with numbered protocol (GRE)
+ ec2_group:
+ name: '{{ group_tmp_name }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ ec2_group_description }}'
+ rules:
+ - proto: 47
+ to_port: -1
+ from_port: -1
+ cidr_ip: 0.0.0.0/0
+ state: present
+ register: result
+
+ - name: Create a group with a quoted proto
+ ec2_group:
+ name: '{{ group_tmp_name }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ ec2_group_description }}'
+ rules:
+ - proto: '47'
+ to_port: -1
+ from_port: -1
+ cidr_ip: 0.0.0.0/0
+ state: present
+ register: result
+ - assert:
+ that:
+ - result is not changed
+ - name: Add a tag with a numeric value
+ ec2_group:
+ name: '{{ group_tmp_name }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ ec2_group_description }}'
+ tags:
+ foo: 1
+ - name: Read a tag with a numeric value
+ ec2_group:
+ name: '{{ group_tmp_name }}'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ ec2_group_description }}'
+ tags:
+ foo: 1
+ register: result
+ - assert:
+ that:
+ - result is not changed
+
+ always:
+ - name: tidy up egress rule test security group
+ ec2_group:
+ name: '{{group_tmp_name}}'
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/rule_group_create.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/rule_group_create.yml
new file mode 100644
index 00000000..ab14d32d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_group/tasks/rule_group_create.yml
@@ -0,0 +1,126 @@
+---
+- block:
+ - name: Create a group with self-referring rule
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-1'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: "tcp"
+ from_port: 8000
+ to_port: 8100
+ group_name: '{{ec2_group_name}}-auto-create-1'
+ state: present
+ register: result
+
+ - name: Create a second group rule
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-2'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ state: present
+
+ - name: Create a series of rules with a recently created group as target
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-1'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ purge_rules: false
+ rules:
+ - proto: "tcp"
+ from_port: "{{ item }}"
+ to_port: "{{ item }}"
+ group_name: '{{ec2_group_name}}-auto-create-2'
+ state: present
+ register: result
+ with_items:
+ - 20
+ - 40
+ - 60
+ - 80
+
+ - assert:
+ that:
+ - result.warning is not defined
+
+ - name: Create a group with only the default rule
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-1'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: "tcp"
+ from_port: 8182
+ to_port: 8182
+ group_name: '{{ec2_group_name}}-auto-create-3'
+ state: present
+ register: result
+ ignore_errors: true
+
+ - name: assert you can't create a new group from a rule target with no description
+ assert:
+ that:
+ - result is failed
+
+ - name: Create a group with a target of a separate group
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-1'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: tcp
+ ports:
+ - 22
+ - 80
+ group_name: '{{ec2_group_name}}-auto-create-3'
+ group_desc: '{{ec2_group_description}}'
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result.warning is not defined
+
+ - name: Create a 4th group
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-4'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ state: present
+ rules:
+ - proto: tcp
+ ports:
+ - 22
+ cidr_ip: 0.0.0.0/0
+
+ - name: use recently created group in a rule
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-5'
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ description: '{{ec2_group_description}}'
+ rules:
+ - proto: tcp
+ ports:
+ - 443
+ group_name: '{{ec2_group_name}}-auto-create-4'
+ state: present
+
+ - assert:
+ that:
+ - result.warning is not defined
+
+ always:
+ - name: tidy up egress rule test security group
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-{{ item }}'
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ ignore_errors: yes
+ with_items: [5, 4, 3, 2, 1]
+ - name: tidy up egress rule test security group
+ ec2_group:
+ name: '{{ec2_group_name}}-auto-create-{{ item }}'
+ state: absent
+ vpc_id: '{{ vpc_result.vpc.id }}'
+ ignore_errors: yes
+ with_items: [1, 2, 3, 4, 5]
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/defaults/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/defaults/main.yml
new file mode 100644
index 00000000..df0082d9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# defaults file for test_ec2_key
+ec2_key_name: '{{resource_prefix}}'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/meta/main.yml
new file mode 100644
index 00000000..45f0cedf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/meta/main.yml
@@ -0,0 +1,4 @@
+dependencies:
+ - prepare_tests
+ - setup_sshkey
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/tasks/main.yml
new file mode 100644
index 00000000..69e7edcb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/tasks/main.yml
@@ -0,0 +1,137 @@
+---
+# TODO - name: test 'validate_certs' parameter
+# TODO - name: test creating key pair with another_key_material with force=yes
+# ============================================================
+
+- module_defaults:
+ group/aws:
+ region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ block:
+
+ # ============================================================
+ - name: test with no parameters
+ ec2_key:
+ register: result
+ ignore_errors: true
+
+ - name: assert failure when called with no parameters
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "missing required arguments: name"'
+
+ # ============================================================
+ - name: test removing a non-existent key pair
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: absent
+ register: result
+
+ - name: assert removing a non-existent key pair
+ assert:
+ that:
+ - 'not result.changed'
+
+ # ============================================================
+ - name: test creating a new key pair
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: present
+ register: result
+
+ - name: assert creating a new key pair
+ assert:
+ that:
+ - 'result.changed'
+ - '"key" in result'
+ - '"name" in result.key'
+ - '"fingerprint" in result.key'
+ - '"private_key" in result.key'
+ - 'result.key.name == "{{ec2_key_name}}"'
+
+ # ============================================================
+ - name: test removing an existent key
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: absent
+ register: result
+
+ - name: assert removing an existent key
+ assert:
+ that:
+ - 'result.changed'
+ - '"key" in result'
+ - 'result.key == None'
+
+ # ============================================================
+ - name: test state=present with key_material
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ key_material: '{{ key_material }}'
+ state: present
+ register: result
+
+ - name: assert state=present with key_material
+ assert:
+ that:
+ - 'result.changed == True'
+ - '"key" in result'
+ - '"name" in result.key'
+ - '"fingerprint" in result.key'
+ - '"private_key" not in result.key'
+ - 'result.key.name == "{{ec2_key_name}}"'
+ - 'result.key.fingerprint == "{{fingerprint}}"'
+
+ # ============================================================
+
+ - name: test force=no with another_key_material (expect changed=false)
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ key_material: '{{ another_key_material }}'
+ force: no
+ register: result
+
+ - name: assert force=no with another_key_material (expect changed=false)
+ assert:
+ that:
+ - 'not result.changed'
+ - 'result.key.fingerprint == "{{ fingerprint }}"'
+
+ # ============================================================
+
+ - name: test updating a key pair using another_key_material (expect changed=True)
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ key_material: '{{ another_key_material }}'
+ register: result
+
+ - name: assert updating a key pair using another_key_material (expect changed=True)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.key.fingerprint != "{{ fingerprint }}"'
+
+ # ============================================================
+ - name: test state=absent (expect changed=true)
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: absent
+ register: result
+
+ - name: assert state=absent with key_material (expect changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - '"key" in result'
+ - 'result.key == None'
+
+ always:
+
+ # ============================================================
+ - name: Always delete the key we might create
+ ec2_key:
+ name: '{{ ec2_key_name }}'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/aliases
new file mode 100644
index 00000000..412ce2c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/aliases
@@ -0,0 +1,3 @@
+non_local
+cloud/aws
+shippable/aws/group4
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml
new file mode 100644
index 00000000..df73cf79
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml
@@ -0,0 +1,141 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+
+ hosts: localhost
+
+ collections:
+ - community.aws
+
+ vars:
+ vpc_name: '{{ resource_prefix }}-vpc'
+ vpc_seed: '{{ resource_prefix }}'
+ vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
+ subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
+ ec2_ami_name: 'amzn2-ami-hvm-2.*-x86_64-gp2'
+ sshkey_file: '{{ resource_prefix }}_key'
+
+ tasks:
+
+ - name: Create an ssh key
+ shell: echo 'y' | ssh-keygen -P '' -f ../{{ sshkey_file }}
+
+ - name: Get available AZs
+ aws_az_info:
+ filters:
+ region-name: "{{ aws_region }}"
+ register: az_info
+
+ - name: Pick an AZ
+ set_fact:
+ availability_zone: "{{ az_info['availability_zones'][0]['zone_name'] }}"
+
+ # ============================================================
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ register: vpc_result
+
+ - set_fact:
+ vpc_id: "{{ vpc_result.vpc.id }}"
+
+ - name: create an internet gateway
+ ec2_vpc_igw:
+ vpc_id: "{{ vpc_id }}"
+ state: present
+ tags:
+ "Name": "{{ resource_prefix }}"
+ register: igw_result
+
+ - name: create a subnet
+ ec2_vpc_subnet:
+ cidr: "{{ vpc_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_id }}"
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ state: present
+ register: vpc_subnet_result
+
+ - name: create a public route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc_id }}"
+ tags:
+ "Name": "{{ resource_prefix }}"
+ subnets:
+ - "{{ vpc_subnet_result.subnet.id }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw_result.gateway_id }}"
+ register: public_route_table
+
+ - name: create a security group
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: "Created by {{ resource_prefix }}"
+ rules:
+ - proto: tcp
+ ports: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: icmp
+ from_port: -1
+ to_port: -1
+ state: present
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ register: vpc_sg_result
+
+ - name: Create a key
+ ec2_key:
+ name: '{{ resource_prefix }}'
+ key_material: "{{ lookup('file', '../' ~ sshkey_file ~ '.pub') }}"
+ state: present
+ register: ec2_key_result
+
+ - name: Get a list of images
+ ec2_ami_info:
+ filters:
+ owner-alias: amazon
+ name: "amzn2-ami-minimal-hvm-*"
+ description: "Amazon Linux 2 AMI *"
+ register: images_info
+
+ - name: Set facts to simplify use of extra resources
+ set_fact:
+ vpc_subnet_id: "{{ vpc_subnet_result.subnet.id }}"
+ vpc_sg_id: "{{ vpc_sg_result.group_id }}"
+ vpc_igw_id: "{{ igw_result.gateway_id }}"
+ vpc_route_table_id: "{{ public_route_table.route_table.id }}"
+ image_id: "{{ images_info.images | sort(attribute='creation_date') | reverse | first | json_query('image_id') }}"
+ ec2_key_name: "{{ ec2_key_result.key.name }}"
+
+ - name: Create an instance to test with
+ ec2_instance:
+ name: "{{ resource_prefix }}-ec2-metadata-facts"
+ image_id: "{{ image_id }}"
+ vpc_subnet_id: "{{ vpc_subnet_id }}"
+ security_group: "{{ vpc_sg_id }}"
+ instance_type: t2.micro
+ key_name: "{{ ec2_key_name }}"
+ network:
+ assign_public_ip: true
+ wait: true
+ wait_timeout: 300
+ register: ec2_instance
+
+ - set_fact:
+ ec2_instance_id: "{{ ec2_instance.instances[0].instance_id }}"
+
+ - name: Create inventory file
+ template:
+ src: ../templates/inventory.j2
+ dest: ../inventory
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml
new file mode 100644
index 00000000..cda4043c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml
@@ -0,0 +1,70 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+
+ hosts: localhost
+
+ collections:
+ - community.aws
+
+ tasks:
+ # ============================================================
+
+ - name: terminate the instance
+ ec2_instance:
+ state: absent
+ instance_ids:
+ - "{{ ec2_instance_id }}"
+ wait: True
+ ignore_errors: true
+ retries: 5
+
+ - name: remove ssh key
+ ec2_key:
+ name: "{{ ec2_key_name }}"
+ state: absent
+ ignore_errors: true
+
+ - name: remove the security group
+ ec2_group:
+ group_id: "{{ vpc_sg_id }}"
+ state: absent
+ ignore_errors: true
+ retries: 5
+
+ - name: remove the public route table
+ ec2_vpc_route_table:
+ vpc_id: "{{ vpc_id }}"
+ route_table_id: "{{ vpc_route_table_id }}"
+ lookup: id
+ state: absent
+ ignore_errors: true
+ retries: 5
+
+ - name: remove the subnet
+ ec2_vpc_subnet:
+ cidr: "{{ vpc_cidr }}"
+ az: "{{ availability_zone }}"
+ vpc_id: "{{ vpc_id }}"
+ state: absent
+ ignore_errors: true
+ retries: 5
+
+ - name: remove the internet gateway
+ ec2_vpc_igw:
+ vpc_id: "{{ vpc_id }}"
+ state: absent
+ ignore_errors: true
+ retries: 5
+
+ - name: remove the VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: "{{ vpc_cidr }}"
+ state: absent
+ ignore_errors: true
+ retries: 5
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml
new file mode 100644
index 00000000..fd49844b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml
@@ -0,0 +1,16 @@
+---
+- hosts: testhost
+ tasks:
+
+ - name: Wait for EC2 to be available
+ wait_for_connection:
+
+ - amazon.aws.ec2_metadata_facts:
+
+ - name: Assert initial metadata for the instance
+ assert:
+ that:
+ - ansible_ec2_ami_id == image_id
+ - ansible_ec2_placement_availability_zone == "{{ availability_zone }}"
+ - ansible_ec2_security_groups == "{{ resource_prefix }}-sg"
+ - ansible_ec2_user_data == "None"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/runme.sh b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/runme.sh
new file mode 100755
index 00000000..6f2bc466
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/runme.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -eux
+export ANSIBLE_HOST_KEY_CHECKING=False
+export ANSIBLE_SSH_ARGS='-o UserKnownHostsFile=/dev/null'
+
+CMD_ARGS=("$@")
+
+# Destroy Environment
+cleanup() {
+ ansible-playbook playbooks/teardown.yml -i inventory -c local "${CMD_ARGS[@]}"
+}
+trap "cleanup" EXIT
+
+# create test resources and inventory
+ansible-playbook playbooks/setup.yml -c local "$@"
+
+# test ec2_instance_metadata
+ansible-playbook playbooks/test_metadata.yml -i inventory \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/templates/inventory.j2 b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/templates/inventory.j2
new file mode 100644
index 00000000..186edb8f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/templates/inventory.j2
@@ -0,0 +1,20 @@
+[testhost]
+"{{ ec2_instance.instances[0].public_ip_address }}"
+
+[testhost:vars]
+ansible_user=ec2-user
+ansible_ssh_private_key_file="{{ sshkey_file }}"
+ansible_python_interpreter=/usr/bin/env python
+
+[all:vars]
+# Template vars that will need to be used in used in tests and teardown
+vpc_id="{{ vpc_id }}"
+vpc_subnet_id="{{ vpc_subnet_id }}"
+vpc_sg_id="{{ vpc_sg_id }}"
+vpc_cidr="{{ vpc_cidr }}"
+vpc_igw="{{ vpc_igw_id }}"
+vpc_route_table_id="{{ vpc_route_table_id }}"
+ec2_key_name="{{ ec2_key_name }}"
+availability_zone="{{ availability_zone }}"
+image_id="{{ image_id }}"
+ec2_instance_id="{{ ec2_instance_id }}"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/aliases
new file mode 100644
index 00000000..1dcb36b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+shippable/aws/group4
+ec2_snapshot_info
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/defaults/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/defaults/main.yml
new file mode 100644
index 00000000..dc1f0f70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for ec2_snapshot
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/main.yml
new file mode 100644
index 00000000..448d2f81
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/main.yml
@@ -0,0 +1,277 @@
+---
+# Tests for EC2 Snapshot
+#
+# Tests ec2_snapshot:
+# - Snapshot creation
+# - Create with last_snapshot_min_age
+# - Snapshot deletion
+#
+# Tests ec2_snapshot_info:
+# - Listing snapshots for filter: tag
+#
+# Possible Bugs:
+# - check_mode not supported
+#
+- name: Integration testing for ec2_snapshot
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+
+ collections:
+ - community.aws
+
+
+ block:
+ - ec2_ami_info:
+ owners: amazon
+ filters:
+ architecture: x86_64
+ virtualization-type: hvm
+ root-device-type: ebs
+ name: "amzn-ami-hvm*"
+ register: amis
+
+ - name: Setup an instance for testing
+ ec2_instance:
+ name: '{{ resource_prefix }}'
+ instance_type: t2.nano
+ image_id: "{{ (amis.images | sort(attribute='creation_date') | last).image_id }}"
+ wait: yes
+ volumes:
+ - device_name: /dev/xvda
+ ebs:
+ volume_size: 8
+ delete_on_termination: true
+ register: instance
+
+ - set_fact:
+ volume_id: '{{ instance.instances[0].block_device_mappings[0].ebs.volume_id }}'
+ instance_id: '{{ instance.instances[0].instance_id }}'
+ device_name: '{{ instance.instances[0].block_device_mappings[0].device_name }}'
+
+# JR: Check mode not supported
+# - name: Take snapshot (check mode)
+# ec2_snapshot:
+# instance_id: '{{ instance_id }}'
+# check_mode: true
+# snapshot_tags:
+# Test: '{{ resource_prefix }}'
+# register: result
+# - assert:
+# that:
+# - result is changed
+
+ - name: Take snapshot of volume
+ ec2_snapshot:
+ volume_id: '{{ volume_id }}'
+ register: result
+
+ # The Name tag is created automatically as the instance_name; ie the resource_prefix
+ - name: Get info about snapshots
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ register: info_result
+
+ - assert:
+ that:
+ - result is changed
+ - info_result is not changed
+ - info_result.snapshots| length == 1
+ - info_result.snapshots[0].snapshot_id == result.snapshot_id
+ - info_result.snapshots[0].volume_id == result.volume_id
+ - info_result.snapshots[0].volume_size == result.volume_size
+ - info_result.snapshots[0].tags == result.tags
+
+ - name: Get info about snapshots (check_mode)
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ register: info_check
+ check_mode: yes
+
+ - assert:
+ that:
+ - info_check is not changed
+ - info_check.snapshots| length == 1
+ - info_check.snapshots[0].snapshot_id == result.snapshot_id
+ - info_check.snapshots[0].volume_id == result.volume_id
+ - info_check.snapshots[0].volume_size == result.volume_size
+ - info_check.snapshots[0].tags == result.tags
+
+# JR: Check mode not supported
+# - name: Take snapshot if most recent >1hr (False) (check mode)
+# ec2_snapshot:
+# volume_id: '{{ volume_id }}'
+# snapshot_tags:
+# Name: '{{ resource_prefix }}'
+# last_snapshot_min_age: 60
+# check_mode: true
+# register: result
+# - assert:
+# that:
+# - result is not changed
+
+ - name: Take snapshot if most recent >1hr (False)
+ ec2_snapshot:
+ volume_id: '{{ volume_id }}'
+ last_snapshot_min_age: 60
+ register: result
+
+ - name: Get info about snapshots
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ register: info_result
+
+ - assert:
+ that:
+ - result is not changed
+ - info_result.snapshots| length == 1
+
+ - name: Pause so we can do a last_snapshot_min_age test
+ pause:
+ minutes: 1
+
+# JR: Check mode not supported
+# - name: Take snapshot if most recent >1min (True) (check mode)
+# ec2_snapshot:
+# volume_id: '{{ volume_id }}'
+# snapshot_tags:
+# Name: '{{ resource_prefix }}'
+# last_snapshot_min_age: 1
+# check_mode: true
+# register: result
+# - assert:
+# that:
+# - result is changed
+
+ - name: Take snapshot if most recent >1min (True)
+ ec2_snapshot:
+ volume_id: '{{ volume_id }}'
+ last_snapshot_min_age: 1
+ register: result
+
+ - name: Get info about snapshots
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ register: info_result
+
+ - assert:
+ that:
+ - result is changed
+ - info_result.snapshots| length == 2
+ - '"{{ result.snapshot_id }}" in "{{ info_result| community.general.json_query("snapshots[].snapshot_id") }}"'
+
+# JR: Check mode not supported
+# - name: Take snapshot with a tag (check mode)
+# ec2_snapshot:
+# volume_id: '{{ volume_id }}'
+# snapshot_tags:
+# MyTag: '{{ resource_prefix }}'
+# register: result
+# - assert:
+# that:
+# - result is changed
+
+ # Wait at least 15 seconds between concurrent volume snapshots.
+ - name: Prevent SnapshotCreationPerVolumeRateExceeded errors
+ pause:
+ seconds: 15
+
+ - name: Take snapshot and tag it
+ ec2_snapshot:
+ volume_id: '{{ volume_id }}'
+ snapshot_tags:
+ MyTag: '{{ resource_prefix }}'
+ register: tagged_result
+
+ - name: Get info about snapshots by tag
+ ec2_snapshot_info:
+ filters:
+ "tag:MyTag": '{{ resource_prefix }}'
+ register: tag_info_result
+
+ - set_fact:
+ tagged_snapshot_id: '{{ tag_info_result.snapshots[0].snapshot_id }}'
+
+ - assert:
+ that:
+ - tagged_result is changed
+ - tagged_result.tags| length == 2
+ - tag_info_result.snapshots| length == 1
+ - tagged_result.tags.MyTag == "{{ resource_prefix }}"
+ - '"{{ tagged_result.snapshot_id }}" == "{{ tagged_snapshot_id }}"'
+
+ - name: Get info about all snapshots for this test
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ register: info_result
+
+ - assert:
+ that:
+ - info_result.snapshots| length == 3
+
+ - name: Delete the tagged snapshot
+ ec2_snapshot:
+ state: absent
+ snapshot_id: '{{ tagged_snapshot_id }}'
+
+ - name: Get info about all snapshots for this test
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ register: info_result
+
+ - assert:
+ that:
+ - info_result.snapshots| length == 2
+ - '"{{ tagged_snapshot_id }}" not in "{{ info_result| community.general.json_query("snapshots[].snapshot_id") }}"'
+
+ - name: Delete snapshots
+ ec2_snapshot:
+ state: absent
+ snapshot_id: '{{ item.snapshot_id }}'
+ with_items: '{{ info_result.snapshots }}'
+
+ - name: Get info about all snapshots for this test
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ register: info_result
+
+ - assert:
+ that:
+ - info_result.snapshots| length == 0
+
+ always:
+
+ - name: Snapshots to delete
+ ec2_snapshot_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}'
+ register: tagged_snapshots
+
+ - name: Delete tagged snapshots
+ ec2_snapshot:
+ state: absent
+ snapshot_id: '{{ item.snapshot_id }}'
+ with_items: '{{ tagged_snapshots.snapshots }}'
+ ignore_errors: true
+
+ - name: Delete instance
+ ec2_instance:
+ instance_ids: '{{ instance_id }}'
+ state: absent
+ ignore_errors: true
+
+ - name: Delete volume
+ ec2_vol:
+ id: '{{ volume_id }}'
+ state: absent
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/aliases
new file mode 100644
index 00000000..be56eee8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+shippable/aws/group2
+ec2_tag_info
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/defaults/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/defaults/main.yml
new file mode 100644
index 00000000..6aa39c73
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for test_ec2_tag
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/tasks/main.yml
new file mode 100644
index 00000000..bf36afcb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/tasks/main.yml
@@ -0,0 +1,143 @@
+---
+# tasks file for test_ec2_tag
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - name: Create an EC2 volume so we have something to tag
+ ec2_vol:
+ name: "{{ resource_prefix }} ec2_tag volume"
+ volume_size: 1
+ state: present
+ zone: "{{ aws_region }}a"
+ register: volume
+
+ - name: List the tags on the volume (ec2_tag)
+ ec2_tag:
+ resource: "{{ volume.volume_id }}"
+ state: list
+ register: result
+ - name: List the tags on the volume (ec2_tag_info)
+ ec2_tag_info:
+ resource: "{{ volume.volume_id }}"
+ register: result_info
+
+ - assert:
+ that:
+ - result.tags | length == 1
+ - result.tags.Name == '{{ resource_prefix }} ec2_tag volume'
+ - result_info.tags | length == 1
+ - result_info.tags.Name == '{{ resource_prefix }} ec2_tag volume'
+
+ - name: Set some new tags on the volume
+ ec2_tag:
+ resource: "{{ volume.volume_id }}"
+ state: present
+ tags:
+ foo: foo
+ bar: baz
+ baz: also baz
+ register: result
+ - name: List the new tags on the volume
+ ec2_tag_info:
+ resource: "{{ volume.volume_id }}"
+ register: result_info
+
+ - assert:
+ that:
+ - result is changed
+ - result.tags | length == 4
+ - result.added_tags | length == 3
+ - result.tags.Name == '{{ resource_prefix }} ec2_tag volume'
+ - result.tags.foo == 'foo'
+ - result.tags.bar == 'baz'
+ - result.tags.baz == 'also baz'
+ - result_info.tags | length == 4
+ - result_info.tags.Name == '{{ resource_prefix }} ec2_tag volume'
+ - result_info.tags.foo == 'foo'
+ - result_info.tags.bar == 'baz'
+ - result_info.tags.baz == 'also baz'
+
+ - name: Remove a tag by name
+ ec2_tag:
+ resource: "{{ volume.volume_id }}"
+ state: absent
+ tags:
+ baz:
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.removed_tags | length == 1
+ - "'baz' in result.removed_tags"
+
+ - name: Don't remove a tag
+ ec2_tag:
+ resource: "{{ volume.volume_id }}"
+ state: absent
+ tags:
+ foo: baz
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: Remove a tag
+ ec2_tag:
+ resource: "{{ volume.volume_id }}"
+ state: absent
+ tags:
+ foo: foo
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.tags | length == 2
+ - "'added_tags' not in result"
+ - result.removed_tags | length == 1
+ - result.tags.Name == '{{ resource_prefix }} ec2_tag volume'
+ - result.tags.bar == 'baz'
+
+ - name: Set an exclusive tag
+ ec2_tag:
+ resource: "{{ volume.volume_id }}"
+ purge_tags: true
+ tags:
+ baz: quux
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.tags | length == 1
+ - result.added_tags | length == 1
+ - result.removed_tags | length == 2
+ - result.tags.baz == 'quux'
+
+ - name: Remove all tags
+ ec2_tag:
+ resource: "{{ volume.volume_id }}"
+ purge_tags: true
+ tags: {}
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.tags | length == 0
+
+ always:
+ - name: Remove the volume
+ ec2_vol:
+ id: "{{ volume.volume_id }}"
+ state: absent
+ register: result
+ until: result is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/vars/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/vars/main.yml
new file mode 100644
index 00000000..c2d0654a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for test_ec2_tag
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/aliases
new file mode 100644
index 00000000..b2f0c65b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+shippable/aws/group3
+ec2_vol_info
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/defaults/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/defaults/main.yml
new file mode 100644
index 00000000..eb2594bc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/defaults/main.yml
@@ -0,0 +1,5 @@
+vpc_name: '{{ resource_prefix }}-vpc'
+vpc_seed: '{{ resource_prefix }}'
+vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
+subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
+ec2_ami_name: 'amzn2-ami-hvm-2.*-x86_64-gp2' \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/main.yml
new file mode 100644
index 00000000..70e711b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/main.yml
@@ -0,0 +1,5 @@
+- hosts: localhost
+ connection: local
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+ - include_tasks: 'tasks/main.yml' \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/meta/main.yml
new file mode 100644
index 00000000..bc4ebde8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml
new file mode 100644
index 00000000..2cbb44e2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml
@@ -0,0 +1,27 @@
+- set_fact:
+ virtualenv: "{{ remote_tmp_dir }}/virtualenv"
+ virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
+
+- set_fact:
+ virtualenv_interpreter: "{{ virtualenv }}/bin/python"
+
+- pip:
+ name: virtualenv
+
+- pip:
+ name:
+ - 'boto3>=1.16.33'
+ - 'botocore>=1.13.0'
+ - 'coverage<5'
+ - 'boto>=2.49.0'
+ virtualenv: "{{ virtualenv }}"
+ virtualenv_command: "{{ virtualenv_command }}"
+ virtualenv_site_packages: no
+
+- include_tasks: tests.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+
+- file:
+ path: "{{ virtualenv }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/tests.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/tests.yml
new file mode 100644
index 00000000..42cd8de3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/tests.yml
@@ -0,0 +1,546 @@
+---
+
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key | default(omit) }}'
+ aws_secret_key: '{{ aws_secret_key | default(omit) }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region | default(omit) }}'
+
+ collections:
+ - amazon.aws
+
+ block:
+ - name: list available AZs
+ aws_az_info:
+ register: region_azs
+
+ - name: pick an AZ for testing
+ set_fact:
+ availability_zone: "{{ region_azs.availability_zones[0].zone_name }}"
+
+ - name: Create a test VPC
+ ec2_vpc_net:
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: ec2_vol testing
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: testing_vpc
+
+ - name: Create a test subnet
+ ec2_vpc_subnet:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_cidr }}"
+ tags:
+ Name: ec2_vol testing
+ ResourcePrefix: "{{ resource_prefix }}"
+ az: '{{ availability_zone }}'
+ register: testing_subnet
+
+ - name: Find AMI to use
+ ec2_ami_info:
+ owners: 'amazon'
+ filters:
+ name: '{{ ec2_ami_name }}'
+ register: ec2_amis
+
+ - name: Set fact with latest AMI
+ vars:
+ latest_ami: '{{ ec2_amis.images | sort(attribute="creation_date") | last }}'
+ set_fact:
+ ec2_ami_image: '{{ latest_ami.image_id }}'
+
+ # ==== ec2_vol tests ===============================================
+
+ - name: create a volume (validate module defaults)
+ ec2_vol:
+ volume_size: 1
+ zone: "{{ availability_zone }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: volume1
+
+ - name: check task return attributes
+ assert:
+ that:
+ - volume1.changed
+ - "'volume' in volume1"
+ - "'volume_id' in volume1"
+ - "'volume_type' in volume1"
+ - "'device' in volume1"
+ - volume1.volume.status == 'available'
+ - volume1.volume_type == 'standard'
+ - "'attachment_set' in volume1.volume"
+ - "'instance_id' in volume1.volume.attachment_set"
+ - not volume1.volume.attachment_set.instance_id
+ - not volume1.volume.encrypted
+ - volume1.volume.tags.ResourcePrefix == "{{ resource_prefix }}"
+
+ # no idempotency check needed here
+
+ - name: create another volume (override module defaults)
+ ec2_vol:
+ encrypted: yes
+ volume_size: 4
+ volume_type: io1
+ iops: 101
+ name: "{{ resource_prefix }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ zone: "{{ availability_zone }}"
+ register: volume2
+
+ - name: check task return attributes
+ assert:
+ that:
+ - volume2.changed
+ - "'volume' in volume2"
+ - "'volume_id' in volume2"
+ - "'volume_type' in volume2"
+ - "'device' in volume2"
+ - volume2.volume.status == 'available'
+ - volume2.volume_type == 'io1'
+ - volume2.volume.iops == 101
+ - volume2.volume.size == 4
+ - volume2.volume.encrypted
+ - volume2.volume.tags.ResourcePrefix == "{{ resource_prefix }}"
+
+ - name: create another volume (override module defaults) (idempotent)
+ ec2_vol:
+ encrypted: yes
+ volume_size: 4
+ volume_type: io1
+ iops: 101
+ name: "{{ resource_prefix }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ zone: "{{ availability_zone }}"
+ register: volume2_idem
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not volume2_idem.changed
+
+ - name: create snapshot from volume
+ ec2_snapshot:
+ volume_id: "{{ volume1.volume_id }}"
+ description: "Resource Prefix - {{ resource_prefix }}"
+ snapshot_tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: vol1_snapshot
+
+ - name: check task return attributes
+ assert:
+ that:
+ - vol1_snapshot.changed
+
+ - name: create a volume from a snapshot
+ ec2_vol:
+ snapshot: "{{ vol1_snapshot.snapshot_id }}"
+ encrypted: yes
+ volume_type: gp2
+ volume_size: 1
+ zone: "{{ availability_zone }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: volume3
+
+ - name: check task return attributes
+ assert:
+ that:
+ - volume3.changed
+ - "volume3.volume.snapshot_id == vol1_snapshot.snapshot_id"
+
+ - name: create an ec2 instance
+ ec2_instance:
+ name: "{{ resource_prefix }}"
+ vpc_subnet_id: "{{ testing_subnet.subnet.id }}"
+ instance_type: t3.nano
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: test_instance
+
+ - name: check task return attributes
+ assert:
+ that:
+ - test_instance.changed
+
+ - name: attach existing volume to an instance
+ ec2_vol:
+ id: "{{ volume1.volume_id }}"
+ instance: "{{ test_instance.instance_ids[0] }}"
+ device_name: /dev/sdg
+ delete_on_termination: no
+ register: vol_attach_result
+
+ - name: check task return attributes
+ assert:
+ that:
+ - vol_attach_result.changed
+ - "'device' in vol_attach_result and vol_attach_result.device == '/dev/sdg'"
+ - "'volume' in vol_attach_result"
+ - vol_attach_result.volume.attachment_set.status in ['attached', 'attaching']
+ - vol_attach_result.volume.attachment_set.instance_id == test_instance.instance_ids[0]
+ - vol_attach_result.volume.attachment_set.device == '/dev/sdg'
+
+# Failing
+# - "vol_attach_result.volume.attachment_set.deleteOnTermination"
+
+ - name: attach existing volume to an instance (idempotent)
+ ec2_vol:
+ id: "{{ volume1.volume_id }}"
+ instance: "{{ test_instance.instance_ids[0] }}"
+ device_name: /dev/sdg
+ delete_on_termination: no
+ register: vol_attach_result
+
+ - name: check task return attributes
+ assert:
+ that:
+ - "not vol_attach_result.changed"
+ - "vol_attach_result.volume.attachment_set.status == 'attached'"
+
+ - name: attach a new volume to an instance
+ ec2_vol:
+ instance: "{{ test_instance.instance_ids[0] }}"
+ device_name: /dev/sdh
+ volume_size: 1
+ volume_type: gp2
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: new_vol_attach_result
+
+ - name: check task return attributes
+ assert:
+ that:
+ - new_vol_attach_result.changed
+ - "'device' in new_vol_attach_result and new_vol_attach_result.device == '/dev/sdh'"
+ - "'volume' in new_vol_attach_result"
+ - new_vol_attach_result.volume.attachment_set.status in ['attached', 'attaching']
+ - new_vol_attach_result.volume.attachment_set.instance_id == test_instance.instance_ids[0]
+ - new_vol_attach_result.volume.attachment_set.device == '/dev/sdh'
+
+ - name: attach a new volume to an instance (idempotent)
+ ec2_vol:
+ instance: "{{ test_instance.instance_ids[0] }}"
+ device_name: /dev/sdh
+ volume_size: 1
+ volume_type: gp2
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: new_vol_attach_result_idem
+ ignore_errors: true
+
+ - name: check task return attributes
+ assert:
+ that:
+ - "not new_vol_attach_result_idem.changed"
+ - "'Volume mapping for /dev/sdh already exists' in new_vol_attach_result_idem.msg"
+
+ - name: create a volume from a snapshot and attach to the instance
+ ec2_vol:
+ instance: "{{ test_instance.instance_ids[0] }}"
+ device_name: /dev/sdi
+ snapshot: "{{ vol1_snapshot.snapshot_id }}"
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: attach_new_vol_from_snapshot_result
+
+ - name: check task return attributes
+ assert:
+ that:
+ - attach_new_vol_from_snapshot_result.changed
+ - "'device' in attach_new_vol_from_snapshot_result and attach_new_vol_from_snapshot_result.device == '/dev/sdi'"
+ - "'volume' in attach_new_vol_from_snapshot_result"
+ - attach_new_vol_from_snapshot_result.volume.attachment_set.status in ['attached', 'attaching']
+ - attach_new_vol_from_snapshot_result.volume.attachment_set.instance_id == test_instance.instance_ids[0]
+
+ - name: list volumes attached to instance
+ ec2_vol:
+ instance: "{{ test_instance.instance_ids[0] }}"
+ state: list
+ register: inst_vols
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not inst_vols.changed
+ - "'volumes' in inst_vols"
+ - inst_vols.volumes | length == 4
+
+ - name: get info on ebs volumes
+ ec2_vol_info:
+ register: ec2_vol_info
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not ec2_vol_info.failed
+
+ - name: get info on ebs volumes
+ ec2_vol_info:
+ filters:
+ attachment.instance-id: "{{ test_instance.instance_ids[0] }}"
+ register: ec2_vol_info
+
+ - name: check task return attributes
+ assert:
+ that:
+ - ec2_vol_info.volumes | length == 4
+
+ - name: detach volume from the instance
+ ec2_vol:
+ id: "{{ new_vol_attach_result.volume_id }}"
+ instance: ""
+ register: new_vol_attach_result
+
+ - name: check task return attributes
+ assert:
+ that:
+ - new_vol_attach_result.changed
+ - new_vol_attach_result.volume.status == 'available'
+
+ - name: detach volume from the instance (idempotent)
+ ec2_vol:
+ id: "{{ new_vol_attach_result.volume_id }}"
+ instance: ""
+ register: new_vol_attach_result_idem
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not new_vol_attach_result_idem.changed
+
+ - name: must not change because of missing parameter modify_volume
+ ec2_vol:
+ id: "{{ new_vol_attach_result.volume_id }}"
+ zone: "{{ availability_zone }}"
+ volume_type: gp3
+ register: changed_gp3_volume
+
+ - name: volume must not changed
+ assert:
+ that:
+ - not changed_gp3_volume.changed
+
+ - name: change existing volume to gp3
+ ec2_vol:
+ id: "{{ new_vol_attach_result.volume_id }}"
+ zone: "{{ availability_zone }}"
+ volume_type: gp3
+ modify_volume: yes
+ register: changed_gp3_volume
+
+ - name: check that volume_type has changed
+ assert:
+ that:
+ - changed_gp3_volume.volume_type == 'gp3'
+ - changed_gp3_volume.changed
+
+ - name: volume must be from type gp3 (idempotent)
+ ec2_vol:
+ id: "{{ new_vol_attach_result.volume_id }}"
+ zone: "{{ availability_zone }}"
+ volume_type: gp3
+ modify_volume: yes
+ register: changed_gp3_volume
+ retries: 3
+ delay: 3
+ until: not changed_gp3_volume.failed
+ # retry because ebs change is to slow
+
+ - name: must not changed (idempotent)
+ assert:
+ that:
+ - changed_gp3_volume.volume_type == 'gp3'
+ - not changed_gp3_volume.changed
+
+ - name: re-read volume information to validate new volume_type
+ ec2_vol_info:
+ filters:
+ volume-id: "{{ changed_gp3_volume.volume_id }}"
+ register: verify_gp3_change
+
+ - name: volume type must be gp3
+ assert:
+ that:
+ - v.type == 'gp3'
+ vars:
+ v: "{{ verify_gp3_change.volumes[0] }}"
+
+ - name: delete volume
+ ec2_vol:
+ id: "{{ volume2.volume_id }}"
+ state: absent
+ register: delete_volume_result
+
+ - name: check task return attributes
+ assert:
+ that:
+ - "delete_volume_result.changed"
+
+ - name: delete volume (idempotent)
+ ec2_vol:
+ id: "{{ volume2.volume_id }}"
+ state: absent
+ register: delete_volume_result_idem
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not delete_volume_result_idem.changed
+ - '"Volume {{ volume2.volume_id }} does not exist" in delete_volume_result_idem.msg'
+
+ # Originally from ec2_vol_info
+
+ - name: Create test volume with Destroy on Terminate
+ ec2_vol:
+ instance: "{{ test_instance.instance_ids[0] }}"
+ volume_size: 4
+ name: "{{ resource_prefix }}_delete_on_terminate"
+ device_name: /dev/sdj
+ iops: 100
+ tags:
+ Tag Name with Space-and-dash: Tag Value with Space-and-dash
+ delete_on_termination: yes
+ register: dot_volume
+
+ - name: Gather volume info without any filters
+ ec2_vol_info:
+ register: volume_info_wo_filters
+ check_mode: no
+
+ - name: Check if info are returned without filters
+ assert:
+ that:
+ - "volume_info_wo_filters.volumes is defined"
+
+ - name: Gather volume info
+ ec2_vol_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}_delete_on_terminate"
+ register: volume_info
+ check_mode: no
+
+ - name: Format check
+ assert:
+ that:
+ - "volume_info.volumes|length == 1"
+ - "v.attachment_set.attach_time is defined"
+ - "v.attachment_set.device is defined and v.attachment_set.device == dot_volume.device"
+ - "v.attachment_set.instance_id is defined and v.attachment_set.instance_id == test_instance.instance_ids[0]"
+ - "v.attachment_set.status is defined and v.attachment_set.status == 'attached'"
+ - "v.create_time is defined"
+ - "v.encrypted is defined and v.encrypted == false"
+ - "v.id is defined and v.id == dot_volume.volume_id"
+ - "v.iops is defined and v.iops == 100"
+ - "v.region is defined and v.region == aws_region"
+ - "v.size is defined and v.size == 4"
+ - "v.snapshot_id is defined and v.snapshot_id == ''"
+ - "v.status is defined and v.status == 'in-use'"
+ - "v.tags.Name is defined and v.tags.Name == resource_prefix + '_delete_on_terminate'"
+ - "v.tags['Tag Name with Space-and-dash'] == 'Tag Value with Space-and-dash'"
+ - "v.type is defined and v.type == 'io1'"
+ - "v.zone is defined and v.zone == test_instance.instances[0].placement.availability_zone"
+ vars:
+ v: "{{ volume_info.volumes[0] }}"
+
+ - name: New format check
+ assert:
+ that:
+ - "v.attachment_set.delete_on_termination is defined"
+ vars:
+ v: "{{ volume_info.volumes[0] }}"
+ when: ansible_version.full is version('2.7', '>=')
+
+ - name: test create a new gp3 volume
+ ec2_vol:
+ volume_size: 1
+ volume_type: gp3
+ zone: "{{ availability_zone }}"
+ throughput: 130
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: gp3_volume
+
+ - name: check that volume_type is gp3
+ assert:
+ that:
+ - gp3_volume.volume_type == 'gp3'
+ - gp3_volume.changed
+ - gp3_volume.volume.throughput == 130
+
+ - name: increase throughput
+ ec2_vol:
+ volume_size: 1
+ volume_type: gp3
+ zone: "{{ availability_zone }}"
+ throughput: 131
+ tags:
+ ResourcePrefix: "{{ resource_prefix }}"
+ register: gp3_volume
+
+ - name: check that throughput has changed
+ assert:
+ that:
+ - gp3_volume.volume_type == 'gp3'
+ - gp3_volume.changed
+ - gp3_volume.volume.throughput == 131
+
+
+ # ==== Cleanup ============================================================
+
+ always:
+ - name: Describe the instance before we delete it
+ ec2_instance_info:
+ instance_ids:
+ - "{{ test_instance.instance_ids[0] }}"
+ ignore_errors: yes
+ register: pre_delete
+
+ - debug:
+ var: pre_delete
+
+ - name: delete test instance
+ ec2_instance:
+ instance_ids:
+ - "{{ test_instance.instance_ids[0] }}"
+ state: terminated
+ ignore_errors: yes
+
+ - name: delete volumes
+ ec2_vol:
+ id: "{{ item.volume_id }}"
+ state: absent
+ ignore_errors: yes
+ with_items:
+ - "{{ volume1 }}"
+ - "{{ volume2 }}"
+ - "{{ volume3 }}"
+ - "{{ new_vol_attach_result }}"
+ - "{{ attach_new_vol_from_snapshot_result }}"
+ - "{{ dot_volume }}"
+ - "{{ gp3_volume }}"
+
+ - name: delete snapshot
+ ec2_snapshot:
+ snapshot_id: "{{ vol1_snapshot.snapshot_id }}"
+ state: absent
+ ignore_errors: yes
+
+ - name: delete test subnet
+ ec2_vpc_subnet:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_cidr }}"
+ state: absent
+ ignore_errors: yes
+
+ - name: delete test VPC
+ ec2_vpc_net:
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/aliases
new file mode 100644
index 00000000..a112c3d1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group1
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml
new file mode 100644
index 00000000..26403c17
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+# defaults file for ec2_dhcp_option_info tests
+vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24'
+# default option sets get an AWS domain_name, which is different in us-east-1
+aws_domain_name: "{{ (aws_region == 'us-east-1') | ternary('ec2.internal', aws_region + '.compute.internal') }}" \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml
new file mode 100644
index 00000000..4e4e6787
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml
@@ -0,0 +1,931 @@
+---
+# ============================================================
+# Known issues:
+#
+# `check_mode` throws a traceback when providing options
+# there is no way to associate the `default` option set in the module
+# ec2_vpc_dhcp_option_info needs to use camel_dict_to_snake_dict(..., ignore_list=['Tags'])
+# Purging tags does nothing, but reports changed
+# The module doesn't store/return tags in the new_options dictionary
+# Adding tags is silently ignored and no change is made
+# always reassociated (changed=True) when vpc_id is provided without options
+#
+# ============================================================
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default('') }}"
+ region: "{{ aws_region }}"
+
+ collections:
+ - community.general
+
+ block:
+
+ # DHCP option set can be attached to multiple VPCs, we don't want to use any that
+ # don't belong to this test run
+ - name: find all DHCP option sets that already exist before running tests
+ ec2_vpc_dhcp_option_info:
+ register: result
+
+ - set_fact:
+ preexisting_option_sets: "{{ result | community.general.json_query('dhcp_options[*].dhcp_options_id') | list }}"
+
+ - name: create a VPC with a default DHCP option set to test inheritance and delete_old
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}"
+ cidr_block: "{{ vpc_cidr }}"
+ state: present
+ register: vpc
+
+ - name: ensure a DHCP option set is attached to the VPC
+ assert:
+ that:
+ - vpc.vpc.dhcp_options_id is defined
+
+ - set_fact:
+ vpc_id: "{{ vpc.vpc.id }}"
+ default_options_id: "{{ vpc.vpc.dhcp_options_id }}"
+
+## ============================================
+ - name: Option Sets can be attached to multiple VPCs, create a new one if the test VPC is reusing a pre-existing one
+ when: vpc.vpc.dhcp_options_id in preexisting_option_sets
+ block:
+ - name: Create the new option set
+ ec2_vpc_dhcp_option:
+ state: present
+ domain_name: "{{ aws_domain_name }}"
+ dns_servers:
+ - AmazonProvidedDNS
+ delete_old: True
+ tags:
+ Name: "{{ resource_prefix }}"
+ register: new_dhcp_options
+
+ - assert:
+ that:
+ - new_dhcp_options.dhcp_options_id not in preexisting_option_sets
+
+ - name: Attach the new option set to the VPC
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ dhcp_options_id: "{{ new_dhcp_options.dhcp_options_id }}"
+
+ - name: find the VPC's associated option set
+ ec2_vpc_net_info:
+ vpc_ids: "{{ vpc_id }}"
+ register: vpc_info
+
+ - set_fact:
+ original_dhcp_options_id: "{{ vpc_info.vpcs[0].dhcp_options_id }}"
+
+ - name: get information about the DHCP option
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ original_dhcp_options_id }}"]
+ register: original_dhcp_options_info
+
+ - set_fact:
+ original_config: "{{ original_dhcp_options_info.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}"
+
+ - assert:
+ that:
+ - original_dhcp_options_info.dhcp_options | length == 1
+ - original_config.keys() | list | sort == ['domain-name', 'domain-name-servers']
+ - original_config['domain-name'][0]['value'] == "{{ aws_domain_name }}"
+ - original_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS'
+ - original_dhcp_options_id not in preexisting_option_sets
+
+## ============================================
+
+ # FIXME: always reassociated to lowest alphanum dhcp_options_id when vpc_id is provided without options,
+ # This task will return an unpredictable dhcp_option_id so we can't assert anything about the option's values
+ - name: test a DHCP option exists (check mode)
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ register: found_dhcp_options
+ check_mode: true
+
+ - assert:
+ that:
+ # FIXME: options have to be provided to match the option associated with the VPC
+ - not found_dhcp_options.changed
+ - not found_dhcp_options.new_options
+
+ # FIXME: always reassociated when vpc_id is provided without options, so here we provide the default options
+ - name: test a DHCP option exists
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ domain_name: "{{ aws_domain_name }}"
+ dns_servers:
+ - AmazonProvidedDNS
+ tags:
+ Name: "{{ resource_prefix }}"
+ register: found_dhcp_options
+
+ - assert:
+ that:
+ - found_dhcp_options is not changed
+ - found_dhcp_options.dhcp_options_id is defined
+ - not found_dhcp_options.changed or dhcp_options is defined and dhcp_options.dhcp_options_id == found_dhcp_options.dhcp_options_id
+
+ # Create a DHCP option set that inherits from the default set and does not delete the old set
+
+ # FIXME: check mode causes a traceback
+ #- name: create a DHCP option set that inherits from the default set (check mode)
+ # ec2_vpc_dhcp_option:
+ # state: present
+ # vpc_id: "{{ vpc_id }}"
+ # inherit_existing: True
+ # ntp_servers:
+ # - 10.0.0.2
+ # - 10.0.1.2
+ # netbios_name_servers:
+ # - 10.0.0.1
+ # - 10.0.1.1
+ # netbios_node_type: 2
+ # delete_old: False
+ # register: dhcp_options
+ # check_mode: true
+
+ #- assert:
+ # that:
+ # - dhcp_options.changed
+
+ - name: create a DHCP option set that inherits from the default set
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios_node_type: 2
+ delete_old: False
+ register: dhcp_options
+
+ - assert:
+ that:
+ - dhcp_options.changed
+ - dhcp_options.new_options
+ - dhcp_options.new_options.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers']
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ # FIXME: module/aws randomly returns as a string or list
+ - dhcp_options.new_options['netbios-node-type'] in ['2', ['2']]
+ # found I think is false, because the options are different and no id is provided?
+ - dhcp_options.new_options['domain-name'] in ["{{ aws_domain_name }}", ["{{ aws_domain_name }}"]]
+ - dhcp_options.new_options['domain-name-servers'] in ['AmazonProvidedDNS', ['AmazonProvidedDNS']]
+ - original_dhcp_options_id != dhcp_options.dhcp_options_id
+
+ - set_fact:
+ new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+
+ - name: get information about the new DHCP option
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ new_dhcp_options_id }}"]
+ register: new_dhcp_options
+
+ - set_fact:
+ new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}"
+
+ - assert:
+ that:
+ - new_config.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers']
+ - new_config['domain-name'][0]['value'] == "{{ aws_domain_name }}"
+ - new_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS'
+ - new_config['ntp-servers'] | community.general.json_query('[*].value') | list | sort == ['10.0.0.2', '10.0.1.2']
+ - new_config['netbios-name-servers'] | community.general.json_query('[*].value') | list | sort == ['10.0.0.1', '10.0.1.1']
+ - new_config['netbios-node-type'][0]['value'] in ['2', ['2']]
+
+ # FIXME: no way to associate `default` in the module
+ - name: Re-associate the default DHCP options set so that the new one can be deleted
+ ec2_vpc_dhcp_option:
+ vpc_id: '{{ vpc_id }}'
+ dhcp_options_id: '{{ default_options_id }}'
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result.changed
+ - result is success
+ - result.dhcp_options_id == '{{ default_options_id }}'
+
+ - name: delete it for the next test
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ new_dhcp_options_id }}"
+ state: absent
+
+ # Create a DHCP option set that does not inherit from the old set and doesn't delete the old set
+
+ # FIXME: check mode causes a traceback
+ #- name: create a DHCP option set that does not inherit from the default set (check mode)
+ # ec2_vpc_dhcp_option:
+ # state: present
+ # vpc_id: "{{ vpc_id }}"
+ # inherit_existing: False
+ # ntp_servers:
+ # - 10.0.0.2
+ # - 10.0.1.2
+ # netbios_name_servers:
+ # - 10.0.0.1
+ # - 10.0.1.1
+ # netbios_node_type: 2
+ # delete_old: False
+ # register: dhcp_options
+ # check_mode: true
+
+ #- assert:
+ # that:
+ # - dhcp_options.changed
+
+ - name: create a DHCP option set that does not inherit from the default set
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios_node_type: 2
+ delete_old: False
+ register: dhcp_options
+
+ - assert:
+ that:
+ - dhcp_options.changed
+ - dhcp_options.new_options
+ # FIXME extra keys are returned unpredictably
+ - dhcp_options.new_options.keys() | list | sort is superset(['netbios-name-servers', 'netbios-node-type', 'ntp-servers'])
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ # found should be false, so listified - found does [0] assignment
+# - dhcp_options.new_options['netbios-node-type'] == ['2']
+ - dhcp_options.new_options['netbios-node-type'] in ['2', ['2']]
+ - original_dhcp_options_id != dhcp_options.dhcp_options_id
+
+ - set_fact:
+ new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+
+ - name: get information about the new DHCP option
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ new_dhcp_options_id }}"]
+ register: new_dhcp_options
+
+ - set_fact:
+ new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}"
+
+ - assert:
+ that:
+ - new_config.keys() | list | sort == ['netbios-name-servers', 'netbios-node-type', 'ntp-servers']
+ - new_config['ntp-servers'] | community.general.json_query('[*].value') | list | sort == ['10.0.0.2', '10.0.1.2']
+ - new_config['netbios-name-servers'] | community.general.json_query('[*].value') | list | sort == ['10.0.0.1', '10.0.1.1']
+ - new_config['netbios-node-type'][0]['value'] == '2'
+
+ - name: disassociate the new DHCP option set so it can be deleted
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ original_dhcp_options_id }}"
+ vpc_id: "{{ vpc_id }}"
+ state: present
+
+ - name: delete it for the next test
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ new_dhcp_options_id }}"
+ state: absent
+
+ # Create a DHCP option set that inherits from the default set overwrites a default and deletes the old set
+
+ # FIXME: check mode traceback
+ #- name: create a DHCP option set that inherits from the default set and deletes the original set (check mode)
+ # ec2_vpc_dhcp_option:
+ # state: present
+ # vpc_id: "{{ vpc_id }}"
+ # inherit_existing: True
+ # domain_name: us-west-2.compute.internal
+ # ntp_servers:
+ # - 10.0.0.2
+ # - 10.0.1.2
+ # netbios_name_servers:
+ # - 10.0.0.1
+ # - 10.0.1.1
+ # netbios_node_type: 2
+ # delete_old: True
+ # register: dhcp_options
+ # check_mode: true
+
+ #- assert:
+ # that:
+ # - dhcp_options.changed
+
+ # FIXME: doesn't delete the original set
+ - name: create a DHCP option set that inherits from the default set and deletes the original set
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: True
+ domain_name: '{{ aws_domain_name }}'
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios_node_type: 1
+ delete_old: True
+ register: dhcp_options
+
+ - assert:
+ that:
+ - dhcp_options.changed
+ - dhcp_options.new_options
+ - dhcp_options.new_options.keys() | list | sort is superset(['domain-name', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers'])
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - dhcp_options.new_options['netbios-node-type'] in ['1', ['1']]
+ - dhcp_options.new_options['domain-name'] in ["{{ aws_domain_name }}", ["{{ aws_domain_name }}"]]
+ - original_dhcp_options_id != dhcp_options.dhcp_options_id
+
+ - set_fact:
+ new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+
+ - name: get information about the new DHCP option
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ new_dhcp_options_id }}"]
+ register: new_dhcp_options
+
+ - set_fact:
+ new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}"
+
+ - assert:
+ that:
+ - new_config.keys() | list | sort is superset(['domain-name', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers'])
+ - new_config['domain-name'][0]['value'] in ["{{ aws_domain_name }}", ["{{ aws_domain_name }}"]]
+ - new_config['ntp-servers'] | community.general.json_query('[*].value') | list | sort == ['10.0.0.2', '10.0.1.2']
+ - new_config['netbios-name-servers'] | community.general.json_query('[*].value') | list | sort == ['10.0.0.1', '10.0.1.1']
+ - new_config['netbios-node-type'][0]['value'] == '1'
+
+ - name: verify the original set was deleted
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ original_dhcp_options_id }}"]
+ register: dhcp_options
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - dhcp_options.failed
+ - '"does not exist" in dhcp_options.error.message'
+ ignore_errors: yes # FIXME - remove line and the following retry tasks
+
+ - name: try to delete the original again
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ original_dhcp_options_id }}"
+ state: absent
+
+ - name: verify the original set was deleted
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ original_dhcp_options_id }}"]
+ register: dhcp_options
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - dhcp_options.failed
+ - '"does not exist" in dhcp_options.error.message'
+
+ - set_fact:
+ original_dhcp_options_id: "{{ new_dhcp_options_id }}"
+
+ # Create a DHCP option set that does not inherit from the old set and deletes the old set
+
+ # FIXME: check mode causes a traceback
+ #- name: create a DHCP option set that does not inherit from the default set and deletes the original set (check mode)
+ # ec2_vpc_dhcp_option:
+ # state: present
+ # vpc_id: "{{ vpc_id }}"
+ # inherit_existing: False
+ # domain_name: "{{ (aws_region == 'us-east-1') | ternary('ec2.internal', aws_region + '.compute.internal') }}"
+ # dns_servers:
+ # - AmazonProvidedDNS
+ # delete_old: True
+ # register: dhcp_options
+ # check_mode: true
+
+ #- assert:
+ # that:
+ # - dhcp_options.changed
+
+ - name: create a DHCP option set that does not inherit from the default set and deletes the original set
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ domain_name: "{{ aws_domain_name }}"
+ dns_servers:
+ - AmazonProvidedDNS
+ delete_old: True
+ register: dhcp_options
+
+ - assert:
+ that:
+ - dhcp_options.new_options
+ - dhcp_options.new_options.keys() | list | sort is superset(['domain-name', 'domain-name-servers'])
+ - dhcp_options.new_options['domain-name'] in ["{{ aws_domain_name }}", ["{{ aws_domain_name }}"]]
+ - dhcp_options.new_options['domain-name-servers'] in ['AmazonProvidedDNS', ['AmazonProvidedDNS']]
+ - original_dhcp_options_id != dhcp_options.dhcp_options_id
+
+ - set_fact:
+ new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+
+ - name: get information about the new DHCP option
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ new_dhcp_options_id }}"]
+ register: new_dhcp_options
+
+ - set_fact:
+ new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}"
+
+ - assert:
+ that:
+ - new_config.keys() | list | sort == ['domain-name', 'domain-name-servers']
+ - new_config['domain-name'][0]['value'] == "{{ aws_domain_name }}"
+ - new_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS'
+
+ - name: verify the original set was deleted
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ original_dhcp_options_id }}"]
+ register: dhcp_options
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - dhcp_options.failed
+ - '"does not exist" in dhcp_options.error.message'
+
+ - set_fact:
+ original_dhcp_options_id: "{{ new_dhcp_options_id }}"
+
+ # Create a DHCP option set with tags
+
+ # FIXME: check mode causes a traceback
+ #- name: create a DHCP option set with tags (check mode)
+ # ec2_vpc_dhcp_option:
+ # state: present
+ # vpc_id: "{{ vpc_id }}"
+ # inherit_existing: False
+ # delete_old: True
+ # ntp_servers:
+ # - 10.0.0.2
+ # - 10.0.1.2
+ # netbios_name_servers:
+ # - 10.0.0.1
+ # - 10.0.1.1
+ # tags:
+ # CreatedBy: ansible-test
+ # Collection: amazon.aws
+ # register: dhcp_options
+ # check_mode: true
+
+ #- assert:
+ # that:
+ # - dhcp_options.changed
+
+ - name: create a DHCP option set with tags
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags:
+ CreatedBy: ansible-test
+ Collection: amazon.aws
+ register: dhcp_options
+
+ - assert:
+ that:
+ - dhcp_options.changed
+ - dhcp_options.new_options.keys() | list | sort is superset(['ntp-servers', 'netbios-name-servers'])
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - original_dhcp_options_id != dhcp_options.dhcp_options_id
+ # FIXME: tags are not returned by the module
+# - dhcp_options.tags.keys() | length == 2
+# - dhcp_options.tags['CreatedBy'] is 'ansible-test'
+# - dhcp_options.tags['Collection'] is 'amazon.aws'
+
+ - set_fact:
+ new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+
+ # FIXME: ec2_vpc_dhcp_option_info needs to use camel_dict_to_snake_dict(..., ignore_list=['Tags'])
+ - name: check if the expected tags are associated
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ new_dhcp_options_id }}"]
+ register: dhcp_options_info
+
+ - assert:
+ that:
+ - dhcp_options_info.dhcp_options[0].tags is defined
+ - dhcp_options_info.dhcp_options[0].tags | length == 2
+ - dhcp_options_info.dhcp_options[0].tags['collection'] == 'amazon.aws'
+ - dhcp_options_info.dhcp_options[0].tags['created_by'] == 'ansible-test'
+
+ - name: test no changes with the same tags (check mode)
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags:
+ CreatedBy: ansible-test
+ Collection: amazon.aws
+ register: dhcp_options
+ check_mode: true
+
+ - assert:
+ that:
+ - not dhcp_options.changed
+ - dhcp_options.new_options.keys() | list | sort == ['netbios-name-servers', 'ntp-servers']
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+
+ - name: test no changes with the same tags
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags:
+ CreatedBy: ansible-test
+ Collection: amazon.aws
+ register: dhcp_options
+
+ - name: check if the expected tags are associated
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"]
+ register: dhcp_options_info
+
+ - assert:
+ that:
+ - not dhcp_options.changed
+ - dhcp_options.new_options.keys() | list | sort == ['netbios-name-servers', 'ntp-servers']
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+ - new_dhcp_options_id == dhcp_options.dhcp_options_id
+ - dhcp_options_info.dhcp_options[0].tags is defined
+ - dhcp_options_info.dhcp_options[0].tags.keys() | length == 2
+ - dhcp_options_info.dhcp_options[0].tags['collection'] == 'amazon.aws'
+ - dhcp_options_info.dhcp_options[0].tags['created_by'] == 'ansible-test'
+
+ - name: test no changes without specifying tags (check mode)
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ register: dhcp_options
+ check_mode: true
+
+ - assert:
+ that:
+ - not dhcp_options.changed
+ - dhcp_options.new_options.keys() | list | sort == ['netbios-name-servers', 'ntp-servers']
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+
+ - name: test no changes without specifying tags
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ register: dhcp_options
+
+ - name: check if the expected tags are associated
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"]
+ register: dhcp_options_info
+
+ - assert:
+ that:
+ - not dhcp_options.changed
+ - dhcp_options.new_options.keys() | list | sort == ['netbios-name-servers', 'ntp-servers']
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+ - new_dhcp_options_id == dhcp_options.dhcp_options_id
+ - dhcp_options_info.dhcp_options[0].tags is defined
+ - dhcp_options_info.dhcp_options[0].tags.keys() | length == 2
+ - dhcp_options_info.dhcp_options[0].tags['collection'] == 'amazon.aws'
+ - dhcp_options_info.dhcp_options[0].tags['created_by'] == 'ansible-test'
+
+ # FIXME: the additional tag is silently ignored and no change is made
+ - name: add a tag without using dhcp_options_id
+ ec2_vpc_dhcp_option:
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags:
+ CreatedBy: ansible-test
+ Collection: amazon.aws
+ another: tag
+ register: dhcp_options
+
+ - name: check if the expected tags are associated
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"]
+ register: dhcp_options_info
+
+ - assert:
+ that:
+ #- dhcp_options.changed
+ - dhcp_options.new_options.keys() | list | sort == ['netbios-name-servers', 'ntp-servers']
+ - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1']
+ - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2']
+ - new_dhcp_options_id == dhcp_options.dhcp_options_id
+ - dhcp_options_info.dhcp_options[0].tags is defined
+ - dhcp_options_info.dhcp_options[0].tags.keys() | length == 2
+ #- dhcp_options_info.dhcp_options[0].tags.keys() | length == 3
+ - dhcp_options_info.dhcp_options[0].tags['collection'] == 'amazon.aws'
+ - dhcp_options_info.dhcp_options[0].tags['created_by'] == 'ansible-test'
+
+ # FIXME: another check_mode traceback
+ #- name: add and removing tags (check mode)
+ # ec2_vpc_dhcp_option:
+ # dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+ # state: present
+ # vpc_id: "{{ vpc_id }}"
+ # inherit_existing: False
+ # delete_old: True
+ # ntp_servers:
+ # - 10.0.0.2
+ # - 10.0.1.2
+ # netbios_name_servers:
+ # - 10.0.0.1
+ # - 10.0.1.1
+ # tags:
+ # AnsibleTest: integration
+ # Collection: amazon.aws
+ # register: dhcp_options
+ # check_mode: true
+
+ #- assert:
+ # that:
+ # - dhcp_options.changed
+
+ - name: add and remove tags
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags:
+ AnsibleTest: integration
+ Collection: amazon.aws
+ register: dhcp_options
+
+ - name: check if the expected tags are associated
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"]
+ register: dhcp_options_info
+
+ - assert:
+ that:
+ - dhcp_options.changed
+ - not dhcp_options.new_options
+ - new_dhcp_options_id == dhcp_options.dhcp_options_id
+ - dhcp_options_info.dhcp_options[0].tags is defined
+ - dhcp_options_info.dhcp_options[0].tags.keys() | length == 2
+ - dhcp_options_info.dhcp_options[0].tags['collection'] == 'amazon.aws'
+ - dhcp_options_info.dhcp_options[0].tags['ansible_test'] == 'integration'
+
+ - name: add tags with different cases
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags:
+ "lowercase spaced": 'hello cruel world'
+ "Title Case": 'Hello Cruel World'
+ CamelCase: 'SimpleCamelCase'
+ snake_case: 'simple_snake_case'
+ register: dhcp_options
+
+ - name: check if the expected tags are associated
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"]
+ register: dhcp_options_info
+
+ - assert:
+ that:
+ - dhcp_options.changed
+ - not dhcp_options.new_options
+ - new_dhcp_options_id == dhcp_options.dhcp_options_id
+ - dhcp_options_info.dhcp_options[0].tags is defined
+ - dhcp_options_info.dhcp_options[0].tags.keys() | length == 4
+ - dhcp_options_info.dhcp_options[0].tags['lowercase spaced'] == 'hello cruel world'
+# FIXME: these tags are returned incorrectly now
+# - dhcp_options_info.dhcp_options[0].tags['Title Case'] == 'Hello Cruel World'
+# - dhcp_options_info.dhcp_options[0].tags['CamelCase'] == 'SimpleCamelCase'
+ - dhcp_options_info.dhcp_options[0].tags['snake_case'] == 'simple_snake_case'
+
+ # FIXME does nothing, but reports changed
+ - name: test purging all tags
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags: {}
+ register: dhcp_options
+
+ - name: check if the expected tags are associated
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"]
+ register: dhcp_options_info
+
+ - assert:
+ that:
+ - dhcp_options.changed
+ - new_dhcp_options_id == dhcp_options.dhcp_options_id
+ #- not dhcp_options_info.dhcp_options[0].tags
+
+ - name: test no changes removing all tags
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}"
+ state: present
+ vpc_id: "{{ vpc_id }}"
+ inherit_existing: False
+ delete_old: True
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ tags: {}
+ register: dhcp_options
+
+ - name: check if the expected tags are associated
+ ec2_vpc_dhcp_option_info:
+ dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"]
+ register: dhcp_options_info
+
+ - assert:
+ that:
+ #- not dhcp_options.changed
+ - new_dhcp_options_id == dhcp_options.dhcp_options_id
+ #- not dhcp_options_info.dhcp_options[0].tags
+
+ # FIXME: check mode returns changed as False
+ - name: remove the DHCP option set (check mode)
+ ec2_vpc_dhcp_option:
+ state: absent
+ vpc_id: "{{ vpc_id }}"
+ dhcp_options_id: "{{ new_dhcp_options_id }}"
+ register: dhcp_options
+ check_mode: true
+
+ #- assert:
+ # that:
+ # - dhcp_options.changed
+
+ # FIXME: does nothing - the module should associate "default" with the VPC provided
+ - name: removing the DHCP option set
+ ec2_vpc_dhcp_option:
+ state: absent
+ vpc_id: "{{ vpc_id }}"
+ dhcp_options_id: "{{ new_dhcp_options_id }}"
+ register: dhcp_options
+
+ #- assert:
+ # that:
+ # - dhcp_options.changed
+
+ - name: remove the DHCP option set again (check mode)
+ ec2_vpc_dhcp_option:
+ state: absent
+ vpc_id: "{{ vpc_id }}"
+ dhcp_options_id: "{{ new_dhcp_options_id }}"
+ register: dhcp_options
+ check_mode: true
+
+ - assert:
+ that:
+ - not dhcp_options.changed
+
+ - name: remove the DHCP option set again
+ ec2_vpc_dhcp_option:
+ state: absent
+ vpc_id: "{{ vpc_id }}"
+ dhcp_options_id: "{{ new_dhcp_options_id }}"
+ register: dhcp_options
+
+ - assert:
+ that:
+ - not dhcp_options.changed
+
+ always:
+
+ - name: Re-associate the default DHCP options set so that the new one(s) can be deleted
+ ec2_vpc_dhcp_option:
+ vpc_id: '{{ vpc_id }}'
+ dhcp_options_id: '{{ default_options_id }}'
+ state: present
+ register: result
+ when: vpc_id is defined
+ ignore_errors: yes
+
+ - name: Query all option sets created by the test
+ ec2_vpc_dhcp_option_info:
+ filters:
+ "tag:Name": "*'{{ resource_prefix }}*"
+ register: option_sets
+
+ - name: clean up DHCP option sets
+ ec2_vpc_dhcp_option:
+ state: absent
+ dhcp_options_id: "{{ original_dhcp_options_id }}"
+ vpc_id: "{{ vpc_id }}"
+ when: original_dhcp_options_id is defined
+ ignore_errors: yes
+
+ - name: clean up DHCP option sets
+ ec2_vpc_dhcp_option:
+ state: absent
+ dhcp_options_id: "{{ new_dhcp_options_id }}"
+ vpc_id: "{{ vpc_id }}"
+ when: new_dhcp_options_id is defined
+ ignore_errors: yes
+
+ - name: Delete the VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}"
+ cidr_block: "{{ vpc_cidr }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/aliases
new file mode 100644
index 00000000..fb765ef7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/aliases
@@ -0,0 +1,3 @@
+ec2_vpc_net_info
+cloud/aws
+shippable/aws/group1
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/defaults/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/defaults/main.yml
new file mode 100644
index 00000000..3289b278
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+# defaults file for ec2_vpc_net
+vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24'
+vpc_cidr_a: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24'
+vpc_cidr_b: '10.{{ 256 | random(seed=resource_prefix) }}.2.0/24'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/tasks/main.yml
new file mode 100644
index 00000000..728667ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/tasks/main.yml
@@ -0,0 +1,1318 @@
+---
+- name: Setup AWS Environment
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+
+ block:
+
+ # ============================================================
+
+ - name: Get the current caller identity facts
+ aws_caller_info:
+ register: caller_facts
+
+ - name: run the module without parameters
+ ec2_vpc_net:
+ ignore_errors: yes
+ register: result
+
+ - name: assert failure
+ assert:
+ that:
+ - result is failed
+ - result.msg.startswith("missing required arguments")
+
+ # ============================================================
+
+ - name: attempt to create a VPC without providing connnection information
+ module_defaults: { group/aws: {} }
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ region: us-east-1
+ ignore_errors: yes
+ register: result
+
+ - name: assert connection failure
+ assert:
+ that:
+ - result is failed
+ - '"Unable to locate credentials" in result.msg'
+
+ # ============================================================
+
+ - name: Fetch existing VPC info
+ ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+ - name: Check no-one is using the Prefix before we start
+ assert:
+ that:
+ - vpc_info.vpcs | length == 0
+
+ - name: test check mode creating a VPC
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ check_mode: true
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: check for a change
+ assert:
+ that:
+ - result is changed
+ - vpc_info.vpcs | length == 0
+
+ # ============================================================
+
+ - name: create a VPC
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ ipv6_cidr: True
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert the VPC was created successfully
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - vpc_info.vpcs | length == 1
+
+ - name: assert the output
+ assert:
+ that:
+ - '"cidr_block" in result.vpc'
+ - result.vpc.cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 1
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - '"classic_link_enabled" in result.vpc'
+ - result.vpc.dhcp_options_id.startswith("dopt-")
+ - result.vpc.id.startswith("vpc-")
+ - '"instance_tenancy" in result.vpc'
+ - result.vpc.ipv6_cidr_block_association_set | length == 1
+ - result.vpc.ipv6_cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | ansible.netcommon.ipv6
+ - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"]
+ - '"is_default" in result.vpc'
+ - '"state" in result.vpc'
+ - result.vpc.tags.keys() | length == 1
+ - result.vpc.tags.Name == resource_prefix
+
+ - name: set the first VPC's details as facts for comparison and cleanup
+ set_fact:
+ vpc_1_result: "{{ result }}"
+ vpc_1: "{{ result.vpc.id }}"
+ vpc_1_ipv6_cidr: "{{ result.vpc.ipv6_cidr_block_association_set.0.ipv6_cidr_block }}"
+ default_dhcp_options_id: "{{ result.vpc.dhcp_options_id }}"
+
+ - name: create a VPC (retry)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ ipv6_cidr: True
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert nothing changed
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - vpc_info.vpcs | length == 1
+ - '"cidr_block" in result.vpc'
+ - result.vpc.cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 1
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - '"classic_link_enabled" in result.vpc'
+ - result.vpc.dhcp_options_id.startswith("dopt-")
+ - result.vpc.id.startswith("vpc-")
+ - '"instance_tenancy" in result.vpc'
+ - result.vpc.ipv6_cidr_block_association_set | length == 1
+ - result.vpc.ipv6_cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | ansible.netcommon.ipv6
+ - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"]
+ - '"is_default" in result.vpc'
+ - '"state" in result.vpc'
+ - result.vpc.tags.keys() | length == 1
+ - result.vpc.tags.Name == resource_prefix
+ - result.vpc.id == vpc_1
+
+ # ============================================================
+
+ - name: VPC info (no filters)
+ ec2_vpc_net_info:
+ register: vpc_info
+ retries: 3
+ delay: 3
+ until: '"InvalidVpcID.NotFound" not in ( vpc_info.msg | default("") )'
+
+ - name: Test that our new VPC shows up in the results
+ assert:
+ that:
+ - vpc_1 in ( vpc_info | community.general.json_query("vpcs[].vpc_id") | list )
+
+ - name: VPC info (Simple tag filter)
+ ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: Test vpc_info results
+ assert:
+ that:
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 1
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id == result.vpc.cidr_block_association_set[0].association_id
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block == result.vpc.cidr_block_association_set[0].cidr_block
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - '"classic_link_dns_supported" in vpc_info.vpcs[0]'
+ - '"classic_link_enabled" in vpc_info.vpcs[0]'
+ - vpc_info.vpcs[0].dhcp_options_id == result.vpc.dhcp_options_id
+ - ( vpc_info.vpcs[0].enable_dns_hostnames | bool ) == True
+ - ( vpc_info.vpcs[0].enable_dns_support | bool ) == True
+ - vpc_info.vpcs[0].id == result.vpc.id
+ - '"instance_tenancy" in vpc_info.vpcs[0]'
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set | length == 1
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].association_id == result.vpc.ipv6_cidr_block_association_set[0].association_id
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block == result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"]
+ - '"is_default" in vpc_info.vpcs[0]'
+ - vpc_info.vpcs[0].owner_id == caller_facts.account
+ - '"state" in vpc_info.vpcs[0]'
+ - vpc_info.vpcs[0].vpc_id == result.vpc.id
+
+ # ============================================================
+
+ - name: Try to add IPv6 CIDR when one already exists
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ ipv6_cidr: True
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: Assert no changes made
+ assert:
+ that:
+ - '"Only one IPv6 CIDR is permitted per VPC, {{ result.vpc.id }} already has CIDR {{ vpc_1_ipv6_cidr }}" in result.warnings'
+ - result is not changed
+ - vpc_info.vpcs | length == 1
+
+ # ============================================================
+
+ - name: test check mode creating an identical VPC (multi_ok)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ ipv6_cidr: True
+ multi_ok: yes
+ check_mode: true
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert a change would be made
+ assert:
+ that:
+ - result is changed
+ - name: assert a change was not actually made
+ assert:
+ that:
+ - vpc_info.vpcs | length == 1
+
+ # ============================================================
+
+ - name: create a VPC with a dedicated tenancy using the same CIDR and name
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ ipv6_cidr: True
+ tenancy: dedicated
+ multi_ok: yes
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert a new VPC was created
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.instance_tenancy == "dedicated"
+ - result.vpc.id != vpc_1
+ - vpc_info.vpcs | length == 2
+
+ - name: set the second VPC's details as facts for comparison and cleanup
+ set_fact:
+ vpc_2_result: "{{ result }}"
+ vpc_2: "{{ result.vpc.id }}"
+
+ # ============================================================
+
+ - name: VPC info (Simple VPC-ID filter)
+ ec2_vpc_net_info:
+ filters:
+ "vpc-id": "{{ vpc_2 }}"
+ register: vpc_info
+
+ - name: Test vpc_info results
+ assert:
+ that:
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 1
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id == result.vpc.cidr_block_association_set[0].association_id
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block == result.vpc.cidr_block_association_set[0].cidr_block
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - '"classic_link_dns_supported" in vpc_info.vpcs[0]'
+ - '"classic_link_enabled" in vpc_info.vpcs[0]'
+ - vpc_info.vpcs[0].dhcp_options_id == result.vpc.dhcp_options_id
+ - ( vpc_info.vpcs[0].enable_dns_hostnames | bool ) == True
+ - ( vpc_info.vpcs[0].enable_dns_support | bool ) == True
+ - vpc_info.vpcs[0].id == vpc_2
+ - '"instance_tenancy" in vpc_info.vpcs[0]'
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set | length == 1
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].association_id == result.vpc.ipv6_cidr_block_association_set[0].association_id
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block == result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block
+ - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"]
+ - '"is_default" in vpc_info.vpcs[0]'
+ - vpc_info.vpcs[0].owner_id == caller_facts.account
+ - '"state" in vpc_info.vpcs[0]'
+ - vpc_info.vpcs[0].vpc_id == vpc_2
+
+ # ============================================================
+
+ # This will only fail if there are already *2* vpcs otherwise ec2_vpc_net
+ # assumes you want to update your existing VPC...
+ - name: attempt to create another VPC with the same CIDR and name without multi_ok
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ ipv6_cidr: True
+ tenancy: dedicated
+ multi_ok: no
+ register: new_result
+ ignore_errors: yes
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert failure
+ assert:
+ that:
+ - new_result is failed
+ - '"If you would like to create the VPC anyway please pass True to the multi_ok param" in new_result.msg'
+ - vpc_info.vpcs | length == 2
+
+ # ============================================================
+
+ # FIXME: right now if there are multiple matching VPCs they cannot be removed,
+ # as there is no vpc_id option for idempotence. A workaround is to retag the VPC.
+ - name: remove Name tag on new VPC
+ ec2_tag:
+ state: absent
+ resource: "{{ vpc_2 }}"
+ tags:
+ Name: "{{ resource_prefix }}"
+
+ - name: add a unique name tag
+ ec2_tag:
+ state: present
+ resource: "{{ vpc_2 }}"
+ tags:
+ Name: "{{ resource_prefix }}-changed"
+
+ - name: delete one of the VPCs
+ ec2_vpc_net:
+ state: absent
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}-changed"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert success
+ assert:
+ that:
+ - result is changed
+ - not result.vpc
+ - vpc_info.vpcs | length == 1
+
+ # ============================================================
+
+ - name: attempt to delete a VPC that doesn't exist
+ ec2_vpc_net:
+ state: absent
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}-changed"
+ register: result
+
+ - name: assert no changes were made
+ assert:
+ that:
+ - result is not changed
+ - not result.vpc
+
+ # ============================================================
+
+ - name: create a DHCP option set to use in next test
+ ec2_vpc_dhcp_option:
+ dns_servers:
+ - 4.4.4.4
+ - 8.8.8.8
+ tags:
+ Name: "{{ resource_prefix }}"
+ register: new_dhcp
+ - name: assert the DHCP option set was successfully created
+ assert:
+ that:
+ - new_dhcp is changed
+
+ - name: modify the DHCP options set for a VPC (check_mode)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ dhcp_opts_id: "{{ new_dhcp.dhcp_options_id }}"
+ register: result
+ check_mode: True
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert the DHCP option set changed but didn't update
+ assert:
+ that:
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].dhcp_options_id == default_dhcp_options_id
+
+ - name: modify the DHCP options set for a VPC
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ dhcp_opts_id: "{{ new_dhcp.dhcp_options_id }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert the DHCP option set changed
+ assert:
+ that:
+ - result is changed
+ - result.vpc.id == vpc_1
+ - default_dhcp_options_id != result.vpc.dhcp_options_id
+ - result.vpc.dhcp_options_id == new_dhcp.dhcp_options_id
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].dhcp_options_id == new_dhcp.dhcp_options_id
+
+ - name: modify the DHCP options set for a VPC (retry)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ dhcp_opts_id: "{{ new_dhcp.dhcp_options_id }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert the DHCP option set changed
+ assert:
+ that:
+ - result is not changed
+ - result.vpc.id == vpc_1
+ - result.vpc.dhcp_options_id == new_dhcp.dhcp_options_id
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].dhcp_options_id == new_dhcp.dhcp_options_id
+
+ # ============================================================
+
+ # XXX #62677
+ #- name: disable dns_hostnames (check mode)
+ # ec2_vpc_net:
+ # state: present
+ # cidr_block: "{{ vpc_cidr }}"
+ # name: "{{ resource_prefix }}"
+ # dns_hostnames: False
+ # register: result
+ # check_mode: True
+ #- ec2_vpc_net_info:
+ # filters:
+ # "tag:Name": "{{ resource_prefix }}"
+ # register: vpc_info
+
+ #- name: assert changed was set but not made
+ # assert:
+ # that:
+ # - result is successful
+ # - result is changed
+ # - vpc_info.vpcs | length == 1
+ # - vpc_info.vpcs[0].enable_dns_hostnames | bool == True
+ # - vpc_info.vpcs[0].enable_dns_support | bool == True
+
+ - name: disable dns_hostnames
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ dns_hostnames: False
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert a change was made
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].enable_dns_hostnames | bool == False
+ - vpc_info.vpcs[0].enable_dns_support | bool == True
+
+ - name: disable dns_hostnames (retry)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ dns_hostnames: False
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert a change was made
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].enable_dns_hostnames | bool == False
+ - vpc_info.vpcs[0].enable_dns_support | bool == True
+
+ # XXX #62677
+ #- name: disable dns_support (check mode)
+ # ec2_vpc_net:
+ # state: present
+ # cidr_block: "{{ vpc_cidr }}"
+ # name: "{{ resource_prefix }}"
+ # dns_hostnames: False
+ # dns_support: False
+ # check_mode: True
+ # register: result
+ #- ec2_vpc_net_info:
+ # filters:
+ # "tag:Name": "{{ resource_prefix }}"
+ # register: vpc_info
+
+ #- name: assert changed was set but not made
+ # assert:
+ # that:
+ # - result is successful
+ # - result is changed
+ # - result.vpc.id == vpc_1
+ # - vpc_info.vpcs | length == 1
+ # - vpc_info.vpcs[0].enable_dns_hostnames | bool == False
+ # - vpc_info.vpcs[0].enable_dns_support | bool == True
+
+ - name: disable dns_support
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ dns_hostnames: False
+ dns_support: False
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert a change was made
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].enable_dns_hostnames | bool == False
+ - vpc_info.vpcs[0].enable_dns_support | bool == False
+
+ - name: disable dns_support (retry)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ dns_hostnames: False
+ dns_support: False
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert a change was not made
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].enable_dns_hostnames | bool == False
+ - vpc_info.vpcs[0].enable_dns_support | bool == False
+
+ # XXX #62677
+ #- name: re-enable dns_support (check mode)
+ # ec2_vpc_net:
+ # state: present
+ # cidr_block: "{{ vpc_cidr }}"
+ # name: "{{ resource_prefix }}"
+ # register: result
+ # check_mode: True
+ #- ec2_vpc_net_info:
+ # filters:
+ # "tag:Name": "{{ resource_prefix }}"
+ # register: vpc_info
+
+ #- name: assert a change was made
+ # assert:
+ # that:
+ # - result is successful
+ # - result is changed
+ # - result.vpc.id == vpc_1
+ # - vpc_info.vpcs | length == 1
+ # - vpc_info.vpcs[0].enable_dns_hostnames | bool == True
+ # - vpc_info.vpcs[0].enable_dns_support | bool == True
+
+ - name: re-enable dns_support
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert a change was made
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].enable_dns_hostnames | bool == True
+ - vpc_info.vpcs[0].enable_dns_support | bool == True
+
+ - name: re-enable dns_support (retry)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert a change was not made
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].enable_dns_hostnames | bool == True
+ - vpc_info.vpcs[0].enable_dns_support | bool == True
+
+ # ============================================================
+
+ - name: modify tags (check mode)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ tags:
+ Ansible: Test
+ check_mode: true
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert the VPC has Name but not Ansible tag
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - result.vpc.tags | length == 1
+ - result.vpc.tags.Name == resource_prefix
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].tags | length == 1
+ - vpc_info.vpcs[0].tags.Name == resource_prefix
+
+ - name: modify tags
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ tags:
+ Ansible: Test
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+ until:
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].tags | length == 2
+ retries: 5
+ delay: 5
+
+ - name: assert the VPC has Name and Ansible tags
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - result.vpc.tags | length == 2
+ - result.vpc.tags.Ansible == "Test"
+ - result.vpc.tags.Name == resource_prefix
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].tags | length == 2
+ - vpc_info.vpcs[0].tags.Ansible == "Test"
+ - vpc_info.vpcs[0].tags.Name == resource_prefix
+
+ - name: modify tags (no change)
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ dns_support: True
+ dns_hostnames: True
+ tags:
+ Ansible: Test
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert the VPC has Name and Ansible tags
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - result.vpc.id == vpc_1
+ - result.vpc.tags|length == 2
+ - result.vpc.tags.Ansible == "Test"
+ - result.vpc.tags.Name == resource_prefix
+ - vpc_info.vpcs | length == 1
+ - vpc_info.vpcs[0].tags|length == 2
+ - vpc_info.vpcs[0].tags.Ansible == "Test"
+ - vpc_info.vpcs[0].tags.Name == resource_prefix
+
+ # ============================================================
+
+ # #62678
+ #- name: modify CIDR (check mode)
+ # ec2_vpc_net:
+ # state: present
+ # cidr_block:
+ # - "{{ vpc_cidr }}"
+ # - "{{ vpc_cidr_a }}"
+ # name: "{{ resource_prefix }}"
+ # check_mode: true
+ # register: result
+ #- ec2_vpc_net_info:
+ # filters:
+ # "tag:Name": "{{ resource_prefix }}"
+ # register: vpc_info
+
+ #- name: Check the CIDRs weren't changed
+ # assert:
+ # that:
+ # - result is successful
+ # - result is changed
+ # - result.vpc.id == vpc_1
+ # - vpc_info.vpcs | length == 1
+ # - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ # - vpc_cidr in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ # - vpc_cidr_a not in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ # - vpc_cidr_b not in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ # - vpc_info.vpcs[0].cidr_block_association_set | length == 1
+ # - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ # - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ # - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ # - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ # - vpc_cidr in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ # - vpc_cidr_a not in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ # - vpc_cidr_b not in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+
+ - name: modify CIDR
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_a }}"
+ name: "{{ resource_prefix }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert the CIDRs changed
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 2
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_a in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_b not in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 2
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_b not in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+
+ - name: modify CIDR (no change)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_a }}"
+ name: "{{ resource_prefix }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert the CIDRs didn't change
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 2
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_a in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_b not in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 2
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_b not in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+
+ # #62678
+ #- name: modify CIDR - no purge (check mode)
+ # ec2_vpc_net:
+ # state: present
+ # cidr_block:
+ # - "{{ vpc_cidr }}"
+ # - "{{ vpc_cidr_b }}"
+ # name: "{{ resource_prefix }}"
+ # check_mode: true
+ # register: result
+ #- ec2_vpc_net_info:
+ # filters:
+ # "tag:Name": "{{ resource_prefix }}"
+ # register: vpc_info
+
+ #- name: Check the CIDRs weren't changed
+ # assert:
+ # that:
+ # - result is successful
+ # - result is changed
+ # - vpc_info.vpcs | length == 1
+ # - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ # - vpc_cidr in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ # - vpc_cidr_a in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ # - vpc_cidr_b not in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ # - vpc_info.vpcs[0].cidr_block_association_set | length == 2
+ # - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ # - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ # - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ # - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ # - vpc_cidr in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ # - vpc_cidr_a in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ # - vpc_cidr_b not in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+
+ - name: modify CIDR - no purge
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_b }}"
+ name: "{{ resource_prefix }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert the CIDRs changed
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 3
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_a in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_b in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 3
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+
+ - name: modify CIDR - no purge (no change)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_b }}"
+ name: "{{ resource_prefix }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert the CIDRs didn't change
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 3
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_a in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_b in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 3
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+
+ - name: modify CIDR - no purge (no change - list all - check mode)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_a }}"
+ - "{{ vpc_cidr_b }}"
+ name: "{{ resource_prefix }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert the CIDRs didn't change
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 3
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_a in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_b in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 3
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+
+ - name: modify CIDR - no purge (no change - list all)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_a }}"
+ - "{{ vpc_cidr_b }}"
+ name: "{{ resource_prefix }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert the CIDRs didn't change
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 3
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_a in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_b in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 3
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+
+ - name: modify CIDR - no purge (no change - different order - check mode)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_b }}"
+ - "{{ vpc_cidr_a }}"
+ name: "{{ resource_prefix }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert the CIDRs didn't change
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 3
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_a in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_b in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 3
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+
+ - name: modify CIDR - no purge (no change - different order)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_b }}"
+ - "{{ vpc_cidr_a }}"
+ name: "{{ resource_prefix }}"
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert the CIDRs didn't change
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc.cidr_block_association_set | length == 3
+ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_a in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_b in (result.vpc | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_info.vpcs[0].cidr_block_association_set | length == 3
+ - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ - vpc_cidr in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_a in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+
+ # #62678
+ #- name: modify CIDR - purge (check mode)
+ # ec2_vpc_net:
+ # state: present
+ # cidr_block:
+ # - "{{ vpc_cidr }}"
+ # - "{{ vpc_cidr_b }}"
+ # name: "{{ resource_prefix }}"
+ # purge_cidrs: yes
+ # check_mode: true
+ # register: result
+ #- ec2_vpc_net_info:
+ # filters:
+ # "tag:Name": "{{ resource_prefix }}"
+ # register: vpc_info
+
+ #- name: Check the CIDRs weren't changed
+ # assert:
+ # that:
+ # - result is successful
+ # - result is changed
+ # - vpc_info.vpcs | length == 1
+ # - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ # - vpc_info.vpcs[0].cidr_block_association_set | length == 3
+ # - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
+ # - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
+ # - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
+ # - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
+ # - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
+ # - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
+ # - vpc_cidr in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ # - vpc_cidr_a in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+ # - vpc_cidr_b in (vpc_info.vpcs[0] | community.general.json_query("cidr_block_association_set[*].cidr_block") | list)
+
+ - name: modify CIDR - purge
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_b }}"
+ name: "{{ resource_prefix }}"
+ purge_cidrs: yes
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert the CIDRs changed
+ vars:
+ cidr_query: 'cidr_block_association_set[?cidr_block_state.state == `associated`].cidr_block'
+ assert:
+ that:
+ - result is successful
+ - result is changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc | community.general.json_query(cidr_query) | list | length == 2
+ - vpc_cidr in (result.vpc | community.general.json_query(cidr_query) | list)
+ - vpc_cidr_a not in (result.vpc | community.general.json_query(cidr_query) | list)
+ - vpc_cidr_b in (result.vpc | community.general.json_query(cidr_query) | list)
+ - vpc_info.vpcs[0] | community.general.json_query(cidr_query) | list | length == 2
+ - vpc_cidr in (vpc_info.vpcs[0] | community.general.json_query(cidr_query) | list)
+ - vpc_cidr_a not in (vpc_info.vpcs[0] | community.general.json_query(cidr_query) | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0] | community.general.json_query(cidr_query) | list)
+
+ - name: modify CIDR - purge (no change)
+ ec2_vpc_net:
+ state: present
+ cidr_block:
+ - "{{ vpc_cidr }}"
+ - "{{ vpc_cidr_b }}"
+ name: "{{ resource_prefix }}"
+ purge_cidrs: yes
+ register: result
+ - ec2_vpc_net_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}"
+ register: vpc_info
+
+ - name: assert the CIDRs didn't change
+ vars:
+ cidr_query: 'cidr_block_association_set[?cidr_block_state.state == `associated`].cidr_block'
+ assert:
+ that:
+ - result is successful
+ - result is not changed
+ - result.vpc.id == vpc_1
+ - vpc_info.vpcs | length == 1
+ - result.vpc.cidr_block == vpc_cidr
+ - vpc_info.vpcs[0].cidr_block == vpc_cidr
+ - result.vpc | community.general.json_query(cidr_query) | list | length == 2
+ - vpc_cidr in (result.vpc | community.general.json_query(cidr_query) | list)
+ - vpc_cidr_a not in (result.vpc | community.general.json_query(cidr_query) | list)
+ - vpc_cidr_b in (result.vpc | community.general.json_query(cidr_query) | list)
+ - vpc_info.vpcs[0] | community.general.json_query(cidr_query) | list | length == 2
+ - vpc_cidr in (vpc_info.vpcs[0] | community.general.json_query(cidr_query) | list)
+ - vpc_cidr_a not in (vpc_info.vpcs[0] | community.general.json_query(cidr_query) | list)
+ - vpc_cidr_b in (vpc_info.vpcs[0] | community.general.json_query(cidr_query) | list)
+
+ # ============================================================
+
+ - name: test check mode to delete a VPC
+ ec2_vpc_net:
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ state: absent
+ check_mode: true
+ register: result
+
+ - name: assert that a change would have been made
+ assert:
+ that:
+ - result is changed
+
+ # ============================================================
+
+ always:
+
+ - name: replace the DHCP options set so the new one can be deleted
+ ec2_vpc_net:
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ state: present
+ multi_ok: no
+ dhcp_opts_id: "{{ default_dhcp_options_id }}"
+ ignore_errors: true
+
+ - name: remove the DHCP option set
+ ec2_vpc_dhcp_option:
+ dhcp_options_id: "{{ new_dhcp.dhcp_options_id }}"
+ state: absent
+ ignore_errors: true
+
+ - name: Describe VPCs before deleting them (for debugging)
+ ec2_vpc_net_info:
+ ignore_errors: true
+
+ - name: remove the VPC
+ ec2_vpc_net:
+ cidr_block: "{{ vpc_cidr }}"
+ name: "{{ resource_prefix }}"
+ state: absent
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/aliases
new file mode 100644
index 00000000..4654de3a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+shippable/aws/group2
+ec2_vpc_subnet_info \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/defaults/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/defaults/main.yml
new file mode 100644
index 00000000..20e440c9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# defaults file for ec2_vpc_subnet
+ec2_vpc_subnet_name: '{{resource_prefix}}'
+ec2_vpc_subnet_description: 'Created by ansible integration tests'
+vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16'
+subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24'
+subnet_cidr_b: '10.{{ 256 | random(seed=resource_prefix) }}.2.0/24'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/tasks/main.yml
new file mode 100644
index 00000000..6dc24d61
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/tasks/main.yml
@@ -0,0 +1,692 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+
+ - name: list available AZs
+ aws_az_info:
+ register: region_azs
+
+ - name: pick an AZ for testing
+ set_fact:
+ subnet_az: "{{ region_azs.availability_zones[0].zone_name }}"
+
+ # ============================================================
+ - name: create a VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: present
+ cidr_block: "{{ vpc_cidr }}"
+ ipv6_cidr: True
+ tags:
+ Name: "{{ resource_prefix }}-vpc"
+ Description: "Created by ansible-test"
+ register: vpc_result
+
+ - set_fact:
+ vpc_ipv6_cidr: "{{ vpc_result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block }}"
+
+ - set_fact:
+ subnet_ipv6_cidr: "{{ vpc_ipv6_cidr | regex_replace('::/.*', '::/64') }}"
+
+ # ============================================================
+ - name: check subnet does not exist
+ ec2_vpc_subnet_info:
+ filters:
+ "tag:Name": '{{ec2_vpc_subnet_name}}'
+ register: vpc_subnet_info
+
+ - name: Assert info result is zero
+ assert:
+ that:
+ - (vpc_subnet_info.subnets|length) == 0
+
+ - name: create subnet (expected changed=true) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ subnet_az }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ check_mode: true
+ register: vpc_subnet_create
+
+ - name: assert creation would happen
+ assert:
+ that:
+ - vpc_subnet_create is changed
+
+ - name: create subnet (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ subnet_az }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ register: vpc_subnet_create
+
+ - name: assert creation happened (expected changed=true)
+ assert:
+ that:
+ - 'vpc_subnet_create'
+ - 'vpc_subnet_create.subnet.id.startswith("subnet-")'
+ - '"Name" in vpc_subnet_create.subnet.tags and vpc_subnet_create.subnet.tags["Name"] == ec2_vpc_subnet_name'
+ - '"Description" in vpc_subnet_create.subnet.tags and vpc_subnet_create.subnet.tags["Description"] == ec2_vpc_subnet_description'
+
+ - name: get info about the subnet
+ ec2_vpc_subnet_info:
+ subnet_ids: '{{ vpc_subnet_create.subnet.id }}'
+ register: vpc_subnet_info
+
+ - name: Assert info result matches create result
+ assert:
+ that:
+ - 'vpc_subnet_info.subnets | length == 1'
+ - '"assign_ipv6_address_on_creation" in subnet_info'
+ - 'subnet_info.assign_ipv6_address_on_creation == False'
+ - '"availability_zone" in subnet_info'
+ - 'subnet_info.availability_zone == subnet_az'
+ - '"available_ip_address_count" in subnet_info'
+ - '"cidr_block" in subnet_info'
+ - 'subnet_info.cidr_block == subnet_cidr'
+ - '"default_for_az" in subnet_info'
+ - '"id" in subnet_info'
+ - 'subnet_info.id == vpc_subnet_create.subnet.id'
+ - '"map_public_ip_on_launch" in subnet_info'
+ - 'subnet_info.map_public_ip_on_launch == False'
+ - '"state" in subnet_info'
+ - '"subnet_id" in subnet_info'
+ - 'subnet_info.subnet_id == vpc_subnet_create.subnet.id'
+ - '"tags" in subnet_info'
+ - 'subnet_info.tags["Description"] == ec2_vpc_subnet_description'
+ - 'subnet_info.tags["Name"] == vpc_subnet_create.subnet.tags["Name"]'
+ - '"vpc_id" in subnet_info'
+ - 'subnet_info.vpc_id == vpc_result.vpc.id'
+ vars:
+ subnet_info: '{{ vpc_subnet_info.subnets[0] }}'
+
+ # ============================================================
+ - name: recreate subnet (expected changed=false) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ subnet_az }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ check_mode: true
+ register: vpc_subnet_recreate
+
+ - name: assert recreation changed nothing (expected changed=false)
+ assert:
+ that:
+ - vpc_subnet_recreate is not changed
+
+ - name: recreate subnet (expected changed=false)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ subnet_az }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ register: vpc_subnet_recreate
+
+ - name: assert recreation changed nothing (expected changed=false)
+ assert:
+ that:
+ - vpc_subnet_recreate is not changed
+ - 'vpc_subnet_recreate.subnet == vpc_subnet_create.subnet'
+
+ # ============================================================
+ - name: update subnet so instances launched in it are assigned an IP (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ subnet_az }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ map_public: true
+ check_mode: true
+ register: vpc_subnet_modify
+
+ - name: assert subnet changed
+ assert:
+ that:
+ - vpc_subnet_modify is changed
+
+ - name: update subnet so instances launched in it are assigned an IP
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ subnet_az }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ map_public: true
+ register: vpc_subnet_modify
+
+ - name: assert subnet changed
+ assert:
+ that:
+ - vpc_subnet_modify is changed
+ - vpc_subnet_modify.subnet.map_public_ip_on_launch
+
+ # ============================================================
+ - name: add invalid ipv6 block to subnet (expected failed)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ subnet_az }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: 2001:db8::/64
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ register: vpc_subnet_ipv6_failed
+ ignore_errors: yes
+
+ - name: assert failure happened (expected failed)
+ assert:
+ that:
+ - vpc_subnet_ipv6_failed is failed
+ - "'Couldn\\'t associate ipv6 cidr' in vpc_subnet_ipv6_failed.msg"
+
+ # ============================================================
+ - name: add a tag (expected changed=true) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ subnet_az }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ AnotherTag: SomeValue
+ state: present
+ check_mode: true
+ register: vpc_subnet_add_a_tag
+
+ - name: assert tag addition happened (expected changed=true)
+ assert:
+ that:
+ - vpc_subnet_add_a_tag is changed
+
+ - name: add a tag (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ subnet_az }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ AnotherTag: SomeValue
+ state: present
+ register: vpc_subnet_add_a_tag
+
+ - name: assert tag addition happened (expected changed=true)
+ assert:
+ that:
+ - vpc_subnet_add_a_tag is changed
+ - '"Name" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["Name"] == ec2_vpc_subnet_name'
+ - '"Description" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["Description"] == ec2_vpc_subnet_description'
+ - '"AnotherTag" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["AnotherTag"] == "SomeValue"'
+
+ - name: Get info by tag
+ ec2_vpc_subnet_info:
+ filters:
+ "tag:Name": '{{ec2_vpc_subnet_name}}'
+ register: vpc_subnet_info_by_tag
+
+ - name: assert info matches expected output
+ assert:
+ that:
+ - 'vpc_subnet_info_by_tag.subnets[0].id == vpc_subnet_add_a_tag.subnet.id'
+ - (vpc_subnet_info_by_tag.subnets[0].tags|length) == 3
+ - '"Description" in vpc_subnet_info_by_tag.subnets[0].tags and vpc_subnet_info_by_tag.subnets[0].tags["Description"] == ec2_vpc_subnet_description'
+ - '"AnotherTag" in vpc_subnet_info_by_tag.subnets[0].tags and vpc_subnet_info_by_tag.subnets[0].tags["AnotherTag"] == "SomeValue"'
+
+ # ============================================================
+ - name: remove tags with default purge_tags=true (expected changed=true) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ subnet_az }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ AnotherTag: SomeValue
+ state: present
+ check_mode: true
+ register: vpc_subnet_remove_tags
+
+ - name: assert tag removal happened (expected changed=true)
+ assert:
+ that:
+ - vpc_subnet_remove_tags is changed
+
+ - name: remove tags with default purge_tags=true (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ subnet_az }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ AnotherTag: SomeValue
+ state: present
+ register: vpc_subnet_remove_tags
+
+ - name: assert tag removal happened (expected changed=true)
+ assert:
+ that:
+ - vpc_subnet_remove_tags is changed
+ - '"Name" not in vpc_subnet_remove_tags.subnet.tags'
+ - '"Description" not in vpc_subnet_remove_tags.subnet.tags'
+ - '"AnotherTag" in vpc_subnet_remove_tags.subnet.tags and vpc_subnet_remove_tags.subnet.tags["AnotherTag"] == "SomeValue"'
+
+ - name: Check tags by info
+ ec2_vpc_subnet_info:
+ subnet_id: '{{ vpc_subnet_remove_tags.subnet.id }}'
+ register: vpc_subnet_info_removed_tags
+
+ - name: assert info matches expected output
+ assert:
+ that:
+ - '"Name" not in vpc_subnet_info_removed_tags.subnets[0].tags'
+ - '"Description" not in vpc_subnet_info_removed_tags.subnets[0].tags'
+ - '"AnotherTag" in vpc_subnet_info_removed_tags.subnets[0].tags and vpc_subnet_info_removed_tags.subnets[0].tags["AnotherTag"] == "SomeValue"'
+
+
+ # ============================================================
+ - name: change tags with purge_tags=false (expected changed=true) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ subnet_az }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ purge_tags: false
+ check_mode: true
+ register: vpc_subnet_change_tags
+
+ - name: assert tag addition happened (expected changed=true)
+ assert:
+ that:
+ - vpc_subnet_change_tags is changed
+
+ - name: change tags with purge_tags=false (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ subnet_az }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ state: present
+ purge_tags: false
+ register: vpc_subnet_change_tags
+
+ - name: assert tag addition happened (expected changed=true)
+ assert:
+ that:
+ - vpc_subnet_change_tags is changed
+ - '"Name" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["Name"] == ec2_vpc_subnet_name'
+ - '"Description" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["Description"] == ec2_vpc_subnet_description'
+ - '"AnotherTag" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["AnotherTag"] == "SomeValue"'
+
+ # ============================================================
+ - name: test state=absent (expected changed=true) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ check_mode: true
+ register: result
+
+ - name: assert state=absent (expected changed=true)
+ assert:
+ that:
+ - result is changed
+
+ - name: test state=absent (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ register: result
+
+ - name: assert state=absent (expected changed=true)
+ assert:
+ that:
+ - result is changed
+
+ # ============================================================
+ - name: test state=absent (expected changed=false) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ check_mode: true
+ register: result
+
+ - name: assert state=absent (expected changed=false)
+ assert:
+ that:
+ - result is not changed
+
+ - name: test state=absent (expected changed=false)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ register: result
+
+ - name: assert state=absent (expected changed=false)
+ assert:
+ that:
+ - result is not changed
+
+ # ============================================================
+ - name: create subnet without AZ (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: present
+ check_mode: true
+ register: subnet_without_az
+
+ - name: check that subnet without AZ works fine
+ assert:
+ that:
+ - subnet_without_az is changed
+
+ - name: create subnet without AZ
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: present
+ register: subnet_without_az
+
+ - name: check that subnet without AZ works fine
+ assert:
+ that:
+ - subnet_without_az is changed
+
+ # ============================================================
+ - name: remove subnet without AZ (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ check_mode: true
+ register: result
+
+ - name: assert state=absent (expected changed=true)
+ assert:
+ that:
+ - result is changed
+
+ - name: remove subnet without AZ
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+ register: result
+
+ - name: assert state=absent (expected changed=true)
+ assert:
+ that:
+ - result is changed
+
+
+ # ============================================================
+ - name: create subnet with IPv6 (expected changed=true) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: "{{ subnet_ipv6_cidr }}"
+ assign_instances_ipv6: true
+ state: present
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ check_mode: true
+ register: vpc_subnet_ipv6_create
+
+ - name: assert creation with IPv6 happened (expected changed=true)
+ assert:
+ that:
+ - vpc_subnet_ipv6_create is changed
+
+ - name: create subnet with IPv6 (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: "{{ subnet_ipv6_cidr }}"
+ assign_instances_ipv6: true
+ state: present
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ register: vpc_subnet_ipv6_create
+
+ - name: assert creation with IPv6 happened (expected changed=true)
+ assert:
+ that:
+ - vpc_subnet_ipv6_create is changed
+ - 'vpc_subnet_ipv6_create.subnet.id.startswith("subnet-")'
+ - "vpc_subnet_ipv6_create.subnet.ipv6_cidr_block == subnet_ipv6_cidr"
+ - '"Name" in vpc_subnet_ipv6_create.subnet.tags and vpc_subnet_ipv6_create.subnet.tags["Name"] == ec2_vpc_subnet_name'
+ - '"Description" in vpc_subnet_ipv6_create.subnet.tags and vpc_subnet_ipv6_create.subnet.tags["Description"] == ec2_vpc_subnet_description'
+ - 'vpc_subnet_ipv6_create.subnet.assign_ipv6_address_on_creation'
+
+ # ============================================================
+ - name: recreate subnet (expected changed=false) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: "{{ subnet_ipv6_cidr }}"
+ assign_instances_ipv6: true
+ state: present
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ check_mode: true
+ register: vpc_subnet_ipv6_recreate
+
+ - name: assert recreation changed nothing (expected changed=false)
+ assert:
+ that:
+ - vpc_subnet_ipv6_recreate is not changed
+
+ - name: recreate subnet (expected changed=false)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: "{{ subnet_ipv6_cidr }}"
+ assign_instances_ipv6: true
+ state: present
+ tags:
+ Name: '{{ec2_vpc_subnet_name}}'
+ Description: '{{ec2_vpc_subnet_description}}'
+ register: vpc_subnet_ipv6_recreate
+
+ - name: assert recreation changed nothing (expected changed=false)
+ assert:
+ that:
+ - vpc_subnet_ipv6_recreate is not changed
+ - 'vpc_subnet_ipv6_recreate.subnet == vpc_subnet_ipv6_create.subnet'
+
+ # ============================================================
+ - name: change subnet assign_instances_ipv6 attribute (expected changed=true) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: "{{ subnet_ipv6_cidr }}"
+ assign_instances_ipv6: false
+ state: present
+ purge_tags: false
+ check_mode: true
+ register: vpc_change_attribute
+
+ - name: assert assign_instances_ipv6 attribute changed (expected changed=true)
+ assert:
+ that:
+ - vpc_change_attribute is changed
+
+ - name: change subnet assign_instances_ipv6 attribute (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: "{{ subnet_ipv6_cidr }}"
+ assign_instances_ipv6: false
+ state: present
+ purge_tags: false
+ register: vpc_change_attribute
+
+ - name: assert assign_instances_ipv6 attribute changed (expected changed=true)
+ assert:
+ that:
+ - vpc_change_attribute is changed
+ - 'not vpc_change_attribute.subnet.assign_ipv6_address_on_creation'
+
+ # ============================================================
+ - name: add second subnet with duplicate ipv6 cidr (expected failure)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr_b }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ ipv6_cidr: "{{ subnet_ipv6_cidr }}"
+ state: present
+ purge_tags: false
+ register: vpc_add_duplicate_ipv6
+ ignore_errors: true
+
+ - name: assert graceful failure (expected failed)
+ assert:
+ that:
+ - vpc_add_duplicate_ipv6 is failed
+ - "'The IPv6 CIDR \\'{{ subnet_ipv6_cidr }}\\' conflicts with another subnet' in vpc_add_duplicate_ipv6.msg"
+
+ # ============================================================
+ - name: remove subnet ipv6 cidr (expected changed=true) (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: present
+ purge_tags: false
+ check_mode: true
+ register: vpc_remove_ipv6_cidr
+
+ - name: assert subnet ipv6 cidr removed (expected changed=true)
+ assert:
+ that:
+ - vpc_remove_ipv6_cidr is changed
+
+ - name: remove subnet ipv6 cidr (expected changed=true)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: present
+ purge_tags: false
+ register: vpc_remove_ipv6_cidr
+
+ - name: assert subnet ipv6 cidr removed (expected changed=true)
+ assert:
+ that:
+ - vpc_remove_ipv6_cidr is changed
+ - "vpc_remove_ipv6_cidr.subnet.ipv6_cidr_block == ''"
+ - 'not vpc_remove_ipv6_cidr.subnet.assign_ipv6_address_on_creation'
+
+ # ============================================================
+ - name: test adding a tag that looks like a boolean to the subnet (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: present
+ purge_tags: false
+ tags:
+ looks_like_boolean: true
+ check_mode: true
+ register: vpc_subnet_info
+
+ - name: assert a tag was added
+ assert:
+ that:
+ - vpc_subnet_info is changed
+
+ - name: test adding a tag that looks like a boolean to the subnet
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: present
+ purge_tags: false
+ tags:
+ looks_like_boolean: true
+ register: vpc_subnet_info
+
+ - name: assert a tag was added
+ assert:
+ that:
+ - vpc_subnet_info is changed
+ - 'vpc_subnet_info.subnet.tags.looks_like_boolean == "True"'
+
+ # ============================================================
+ - name: test idempotence adding a tag that looks like a boolean (CHECK MODE)
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: present
+ purge_tags: false
+ tags:
+ looks_like_boolean: true
+ check_mode: true
+ register: vpc_subnet_info
+
+ - name: assert tags haven't changed
+ assert:
+ that:
+ - vpc_subnet_info is not changed
+
+ - name: test idempotence adding a tag that looks like a boolean
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: present
+ purge_tags: false
+ tags:
+ looks_like_boolean: true
+ register: vpc_subnet_info
+
+ - name: assert tags haven't changed
+ assert:
+ that:
+ - vpc_subnet_info is not changed
+
+ always:
+
+ ################################################
+ # TEARDOWN STARTS HERE
+ ################################################
+
+ - name: tidy up subnet
+ ec2_vpc_subnet:
+ cidr: "{{ subnet_cidr }}"
+ vpc_id: "{{ vpc_result.vpc.id }}"
+ state: absent
+
+ - name: tidy up VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ state: absent
+ cidr_block: "{{ vpc_cidr }}"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/aliases
new file mode 100644
index 00000000..a112c3d1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group1
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml
new file mode 100644
index 00000000..7e4c3106
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml
@@ -0,0 +1,11 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ vars:
+ template_name: "../templates/{{ template | default('inventory.yml.j2') }}"
+ tasks:
+ - name: write inventory config file
+ copy:
+ dest: ../test.aws_ec2.yml
+ content: "{{ lookup('template', template_name) }}"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml
new file mode 100644
index 00000000..f67fff1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml
@@ -0,0 +1,9 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: write inventory config file
+ copy:
+ dest: ../test.aws_ec2.yml
+ content: ""
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml
new file mode 100644
index 00000000..64e8da4c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml
@@ -0,0 +1,63 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ collections:
+ - community.general
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+
+ - include_tasks: setup.yml
+# - pause:
+# seconds: 240
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ # Create new host, add it to inventory and then terminate it without updating the cache
+
+ - name: create a new host
+ ec2:
+ image: '{{ image_id }}'
+ exact_count: 1
+ count_tag:
+ Name: '{{ resource_prefix }}'
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ instance_type: t2.micro
+ wait: yes
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ ignore_errors: yes
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml
new file mode 100644
index 00000000..8af11b1c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml
@@ -0,0 +1,51 @@
+- name: get image ID to create an instance
+ ec2_ami_info:
+ filters:
+ architecture: x86_64
+ owner-id: '125523088429'
+ virtualization-type: hvm
+ root-device-type: ebs
+ name: 'Fedora-Atomic-27*'
+ register: fedora_images
+
+- set_fact:
+ image_id: '{{ fedora_images.images.0.image_id }}'
+ vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16'
+ subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24'
+
+- name: create a VPC to work in
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ state: present
+ name: '{{ resource_prefix }}_setup'
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ register: setup_vpc
+
+- set_fact:
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+
+- name: create a subnet to use for creating an ec2 instance
+ ec2_vpc_subnet:
+ az: '{{ aws_region }}a'
+ tags: '{{ resource_prefix }}_setup'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: '{{ subnet_cidr }}'
+ state: present
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ register: setup_subnet
+
+- set_fact:
+ subnet_id: '{{ setup_subnet.subnet.id }}'
+
+- name: create a security group to use for creating an ec2 instance
+ ec2_group:
+ name: '{{ resource_prefix }}_setup'
+ description: 'created by Ansible integration tests'
+ state: present
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ register: setup_sg
+
+- set_fact:
+ sg_id: '{{ setup_sg.group_id }}'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml
new file mode 100644
index 00000000..c782421d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml
@@ -0,0 +1,31 @@
+- set_fact:
+ vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16'
+ subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24'
+
+- name: remove setup security group
+ ec2_group:
+ name: '{{ resource_prefix }}_setup'
+ description: 'created by Ansible integration tests'
+ state: absent
+ vpc_id: '{{ vpc_id }}'
+ ignore_errors: yes
+
+- name: remove setup subnet
+ ec2_vpc_subnet:
+ az: '{{ aws_region }}a'
+ tags: '{{ resource_prefix }}_setup'
+ vpc_id: '{{ vpc_id }}'
+ cidr: '{{ subnet_cidr }}'
+ state: absent
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ ignore_errors: yes
+
+- name: remove setup VPC
+ ec2_vpc_net:
+ cidr_block: '{{ vpc_cidr }}'
+ state: absent
+ name: '{{ resource_prefix }}_setup'
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml
new file mode 100644
index 00000000..cc1b9a5a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml
@@ -0,0 +1,9 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: assert inventory was not populated by aws_ec2 inventory plugin
+ assert:
+ that:
+ - "'aws_ec2' not in groups"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml
new file mode 100644
index 00000000..d83cb0bf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml
@@ -0,0 +1,18 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: assert cache was used to populate inventory
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "groups.aws_ec2 | length == 1"
+
+ - meta: refresh_inventory
+
+ - name: assert refresh_inventory updated the cache
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml
new file mode 100644
index 00000000..80f4f023
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml
@@ -0,0 +1,86 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+
+ block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+
+ - include_tasks: setup.yml
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ # Create new host, refresh inventory, remove host, refresh inventory
+
+ - name: create a new host
+ ec2:
+ image: '{{ image_id }}'
+ exact_count: 1
+ count_tag:
+ Name: '{{ resource_prefix }}'
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ instance_type: t2.micro
+ wait: yes
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory and is no longer empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "groups.aws_ec2 | length == 1"
+ - "groups.aws_ec2.0 == '{{ resource_prefix }}'"
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ ignore_errors: yes
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml
new file mode 100644
index 00000000..bd4aaed3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml
@@ -0,0 +1,61 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+ - include_tasks: setup.yml
+
+ # Create new host, refresh inventory
+ - name: create a new host
+ ec2:
+ image: '{{ image_id }}'
+ exact_count: 1
+ count_tag:
+ Name: '{{ resource_prefix }}'
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ OtherTag: value
+ instance_type: t2.micro
+ wait: yes
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: register the current hostname
+ set_fact:
+ expected_hostname: "value_{{ resource_prefix }}"
+
+ - name: "Ensure we've got a hostvars entry for the new host"
+ assert:
+ that:
+ - expected_hostname in hostvars
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: "{{ sg_id }}"
+ vpc_subnet_id: "{{ subnet_id }}"
+ ignore_errors: yes
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml
new file mode 100644
index 00000000..2085bed9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml
@@ -0,0 +1,74 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+
+ - include_tasks: setup.yml
+
+ # Create new host, refresh inventory
+
+ - name: create a new host
+ ec2:
+ image: '{{ image_id }}'
+ exact_count: 1
+ count_tag:
+ Name: '{{ resource_prefix }}'
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ tag1: value1
+ tag2: value2
+ instance_type: t2.micro
+ wait: yes
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: register the keyed sg group name
+ set_fact:
+ sg_group_name: "security_groups_{{ sg_id | replace('-', '_') }}"
+
+ - name: register one of the keyed tag groups name
+ set_fact:
+ tag_group_name: "tag_Name_{{ resource_prefix | replace('-', '_') }}"
+
+ - name: assert the keyed groups and groups from constructed config were added to inventory and composite var added to hostvars
+ assert:
+ that:
+ # There are 9 groups: all, ungrouped, aws_ec2, sg keyed group, 3 tag keyed group (one per tag), arch keyed group, constructed group
+ - "groups | length == 9"
+ - "groups[tag_group_name] | length == 1"
+ - "groups[sg_group_name] | length == 1"
+ - "groups.arch_x86_64 | length == 1"
+ - "groups.tag_with_name_key | length == 1"
+ - vars.hostvars[groups.aws_ec2.0]['test_compose_var_sum'] == 'value1value2'
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: "{{ sg_id }}"
+ vpc_subnet_id: "{{ subnet_id }}"
+ ignore_errors: yes
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml
new file mode 100644
index 00000000..b1c14543
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml
@@ -0,0 +1,68 @@
+- name: Test updating inventory
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ - name: create a new host
+ ec2:
+ image: "{{ images[aws_region] }}"
+ exact_count: 1
+ count_tag:
+ Name: '{{ resource_prefix }}'
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ instance_type: t2.micro
+ wait: yes
+ group_id: '{{ setup_sg.group_id }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory and is no longer empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "groups.aws_ec2 | length == 1"
+ - "groups.aws_ec2.0 == '{{ resource_prefix }}'"
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: '{{ setup_sg.group_id }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ always:
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: '{{ setup_sg.group_id }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/runme.sh b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/runme.sh
new file mode 100755
index 00000000..d28e6970
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/runme.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# ensure test config is empty
+ansible-playbook playbooks/empty_inventory_config.yml "$@"
+
+export ANSIBLE_INVENTORY_ENABLED="amazon.aws.aws_ec2"
+
+# test with default inventory file
+#ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
+
+export ANSIBLE_INVENTORY=test.aws_ec2.yml
+
+# test empty inventory config
+#ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
+
+# generate inventory config and test using it
+#ansible-playbook playbooks/create_inventory_config.yml "$@"
+#ansible-playbook playbooks/test_populating_inventory.yml "$@"
+
+# generate inventory config with caching and test using it
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.yml.j2'" "$@"
+ansible-playbook playbooks/populate_cache.yml "$@"
+ansible-playbook playbooks/test_inventory_cache.yml "$@"
+
+# remove inventory cache
+rm -r aws_ec2_cache_dir/
+
+# generate inventory config with constructed features and test using it
+#ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.yml.j2'" "$@"
+#ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@"
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_concatenation.yml.j2'" "$@"
+ansible-playbook playbooks/test_populating_inventory_with_concatenation.yml "$@"
+
+# cleanup inventory config
+ansible-playbook playbooks/empty_inventory_config.yml "$@"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2 b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2
new file mode 100644
index 00000000..baac15be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2
@@ -0,0 +1,14 @@
+plugin: amazon.aws.aws_ec2
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+- '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}'
+hostnames:
+- tag:Name
+- dns-name
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2 b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2
new file mode 100644
index 00000000..8fe4e33f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2
@@ -0,0 +1,14 @@
+plugin: amazon.aws.aws_ec2
+cache: True
+cache_plugin: jsonfile
+cache_connection: aws_ec2_cache_dir
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+- '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2 b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2
new file mode 100644
index 00000000..035b1d7c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2
@@ -0,0 +1,15 @@
+plugin: amazon.aws.aws_ec2
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+- '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}'
+hostnames:
+ - name: 'tag:Name'
+ separator: '_'
+ prefix: 'tag:OtherTag'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2 b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2
new file mode 100644
index 00000000..c0ebcbfc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2
@@ -0,0 +1,22 @@
+plugin: amazon.aws.aws_ec2
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+- '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}'
+keyed_groups:
+- key: 'security_groups|community.general.json_query("[].group_id")'
+ prefix: security_groups
+- key: tags
+ prefix: tag
+- prefix: arch
+ key: architecture
+compose:
+ test_compose_var_sum: tags.tag1 + tags.tag2
+groups:
+ tag_with_name_key: '''Name'' in (tags | list)'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/test.aws_ec2.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/test.aws_ec2.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/test.aws_ec2.yml
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/aliases
new file mode 100644
index 00000000..56927195
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+unsupported
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml
new file mode 100644
index 00000000..f0a9030a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml
@@ -0,0 +1,11 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ vars:
+ template_name: "../templates/{{ template | default('inventory.j2') }}"
+ tasks:
+ - name: write inventory config file
+ copy:
+ dest: ../test.aws_rds.yml
+ content: "{{ lookup('template', template_name) }}"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml
new file mode 100644
index 00000000..d7e2cda3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml
@@ -0,0 +1,9 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: write inventory config file
+ copy:
+ dest: ../test.aws_rds.yml
+ content: ""
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml
new file mode 100644
index 00000000..aa757410
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml
@@ -0,0 +1,56 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ collections:
+ - community.aws
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+ - set_fact:
+ instance_id: '{{ resource_prefix }}-mariadb'
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "not groups.aws_rds"
+
+ # Create new host, add it to inventory and then terminate it without updating the cache
+
+ - name: create minimal mariadb instance in default VPC and default subnet group
+ rds_instance:
+ state: present
+ engine: mariadb
+ db_instance_class: db.t2.micro
+ allocated_storage: 20
+ instance_id: '{{ instance_id }}'
+ master_username: 'ansibletestuser'
+ master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
+ tags:
+ workload_type: other
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - assert:
+ that:
+ - groups.aws_rds
+
+ always:
+
+ - name: remove mariadb instance
+ rds_instance:
+ state: absent
+ engine: mariadb
+ skip_final_snapshot: yes
+ instance_id: '{{ instance_id }}'
+ ignore_errors: yes
+ when: setup_instance is defined
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml
new file mode 100644
index 00000000..49951357
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml
@@ -0,0 +1,9 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: assert inventory was not populated by aws_rds inventory plugin
+ assert:
+ that:
+ - "'aws_rds' not in groups"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml
new file mode 100644
index 00000000..7eadbad8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml
@@ -0,0 +1,18 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: assert cache was used to populate inventory
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "groups.aws_rds | length == 1"
+
+ - meta: refresh_inventory
+
+ - name: assert refresh_inventory updated the cache
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "not groups.aws_rds"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml
new file mode 100644
index 00000000..a34dd503
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml
@@ -0,0 +1,76 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ collections:
+ - community.aws
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ - set_fact:
+ instance_id: "{{ resource_prefix }}-mariadb"
+
+ - debug: var=groups
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "not groups.aws_rds"
+
+ # Create new host, refresh inventory, remove host, refresh inventory
+
+ - name: create minimal mariadb instance in default VPC and default subnet group
+ rds_instance:
+ state: present
+ engine: mariadb
+ db_instance_class: db.t2.micro
+ allocated_storage: 20
+ instance_id: '{{ instance_id }}'
+ master_username: 'ansibletestuser'
+ master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
+ tags:
+ workload_type: other
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory and is no longer empty
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "groups.aws_rds | length == 1"
+ - "groups.aws_rds.0 == '{{ instance_id }}'"
+
+ - name: remove mariadb instance
+ rds_instance:
+ state: absent
+ engine: mariadb
+ skip_final_snapshot: yes
+ instance_id: '{{ instance_id }}'
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "not groups.aws_rds"
+
+ always:
+
+ - name: remove mariadb instance
+ rds_instance:
+ state: absent
+ engine: mariadb
+ skip_final_snapshot: yes
+ instance_id: '{{ instance_id }}'
+ ignore_errors: yes
+ when: setup_instance is defined
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml
new file mode 100644
index 00000000..d0b1ea36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml
@@ -0,0 +1,64 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ collections:
+ - community.aws
+ tasks:
+
+ - module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ block:
+
+ - set_fact:
+ instance_id: "{{ resource_prefix }}-mariadb"
+
+ - name: create minimal mariadb instance in default VPC and default subnet group
+ rds_instance:
+ state: present
+ engine: mariadb
+ db_instance_class: db.t2.micro
+ allocated_storage: 20
+ instance_id: '{{ resource_prefix }}-mariadb'
+ master_username: 'ansibletestuser'
+ master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
+ tags:
+ workload_type: other
+ register: setup_instance
+
+ - meta: refresh_inventory
+ - debug: var=groups
+
+ - name: 'generate expected group name based off the db parameter groups'
+ vars:
+ parameter_group_name: '{{ setup_instance.db_parameter_groups[0].db_parameter_group_name }}'
+ set_fact:
+ parameter_group_key: 'rds_parameter_group_{{ parameter_group_name | replace(".", "_") }}'
+
+ - name: assert the keyed groups from constructed config were added to inventory
+ assert:
+ that:
+ # There are 6 groups: all, ungrouped, aws_rds, tag keyed group, engine keyed group, parameter group keyed group
+ - "groups | length == 6"
+ - '"all" in groups'
+ - '"ungrouped" in groups'
+ - '"aws_rds" in groups'
+ - '"tag_workload_type_other" in groups'
+ - '"rds_mariadb" in groups'
+ - 'parameter_group_key in groups'
+
+ always:
+
+ - name: remove mariadb instance
+ rds_instance:
+ state: absent
+ engine: mariadb
+ skip_final_snapshot: yes
+ instance_id: '{{ instance_id }}'
+ ignore_errors: yes
+ when: setup_instance is defined
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml
new file mode 100644
index 00000000..f14e4861
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml
@@ -0,0 +1,66 @@
+- name: test updating inventory
+ module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region }}'
+ collections:
+ - community.aws
+ block:
+ - set_fact:
+ instance_id: "{{ resource_prefix }}update"
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "not groups.aws_rds"
+
+ - name: create minimal mariadb instance in default VPC and default subnet group
+ rds_instance:
+ state: present
+ engine: mariadb
+ db_instance_class: db.t2.micro
+ allocated_storage: 20
+ instance_id: 'rds-mariadb-{{ resource_prefix }}'
+ master_username: 'ansibletestuser'
+ master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
+ tags:
+ workload_type: other
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory and is no longer empty
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "groups.aws_rds | length == 1"
+ - "groups.aws_rds.0 == '{{ resource_prefix }}'"
+
+ - name: remove mariadb instance
+ rds_instance:
+ state: absent
+ engine: mariadb
+ skip_final_snapshot: yes
+ instance_id: ansible-rds-mariadb-example
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_rds' in groups"
+ - "not groups.aws_rds"
+
+ always:
+
+ - name: remove mariadb instance
+ rds_instance:
+ state: absent
+ engine: mariadb
+ skip_final_snapshot: yes
+ instance_id: ansible-rds-mariadb-example
+ ignore_errors: yes
+ when: setup_instance is defined
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/runme.sh b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/runme.sh
new file mode 100755
index 00000000..7697fbb8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/runme.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# ensure test config is empty
+ansible-playbook playbooks/empty_inventory_config.yml "$@"
+
+export ANSIBLE_INVENTORY_ENABLED="amazon.aws.aws_rds"
+
+# test with default inventory file
+ansible-playbook playbooks/test_invalid_aws_rds_inventory_config.yml "$@"
+
+export ANSIBLE_INVENTORY=test.aws_rds.yml
+
+# test empty inventory config
+ansible-playbook playbooks/test_invalid_aws_rds_inventory_config.yml "$@"
+
+# generate inventory config and test using it
+ansible-playbook playbooks/create_inventory_config.yml "$@"
+ansible-playbook playbooks/test_populating_inventory.yml "$@"
+
+# generate inventory config with caching and test using it
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.j2'" "$@"
+ansible-playbook playbooks/populate_cache.yml "$@"
+ansible-playbook playbooks/test_inventory_cache.yml "$@"
+
+# remove inventory cache
+rm -r aws_rds_cache_dir/
+
+# generate inventory config with constructed features and test using it
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.j2'" "$@"
+ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@"
+
+# cleanup inventory config
+ansible-playbook playbooks/empty_inventory_config.yml "$@"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory.j2 b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory.j2
new file mode 100644
index 00000000..61a659ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory.j2
@@ -0,0 +1,10 @@
+plugin: amazon.aws.aws_rds
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+ - '{{ aws_region }}'
+filters:
+ db-instance-id: "{{ resource_prefix }}-mariadb"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2 b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2
new file mode 100644
index 00000000..6e9c40e9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2
@@ -0,0 +1,13 @@
+plugin: amazon.aws.aws_rds
+cache: True
+cache_plugin: jsonfile
+cache_connection: aws_rds_cache_dir
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+ - '{{ aws_region }}'
+filters:
+ db-instance-id: "{{ resource_prefix }}-mariadb"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2 b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2
new file mode 100644
index 00000000..a0636a97
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2
@@ -0,0 +1,17 @@
+plugin: amazon.aws.aws_rds
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+{% if security_token | default(false) %}
+aws_security_token: '{{ security_token }}'
+{% endif %}
+regions:
+ - '{{ aws_region }}'
+keyed_groups:
+ - key: 'db_parameter_groups|community.general.json_query("[].db_parameter_group_name")'
+ prefix: rds_parameter_group
+ - key: tags
+ prefix: tag
+ - key: engine
+ prefix: rds
+filters:
+ db-instance-id: "{{ resource_prefix }}-mariadb"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/test.aws_rds.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/test.aws_rds.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/test.aws_rds.yml
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/inventory b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/inventory
new file mode 100644
index 00000000..5093e858
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/inventory
@@ -0,0 +1,6 @@
+[tests]
+localhost
+
+[all:vars]
+ansible_connection=local
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/main.yml
new file mode 100644
index 00000000..139250a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/main.yml
@@ -0,0 +1,8 @@
+- hosts: all
+ gather_facts: no
+ collections:
+ - community.aws
+ - amazon.aws
+ roles:
+ # Test the behaviour of module_utils.core.AnsibleAWSModule.client (boto3)
+ - 'ansibleawsmodule.client'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/amazonroot.pem b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/amazonroot.pem
new file mode 100644
index 00000000..a6f3e92a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/amazonroot.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
+ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
+9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
+IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
+VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
+93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
+jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA
+A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI
+U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs
+N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv
+o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU
+5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy
+rqXRfboQnoZsG4q5WTP468SQvvG5
+-----END CERTIFICATE-----
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/isrg-x1.pem b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/isrg-x1.pem
new file mode 100644
index 00000000..b85c8037
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/isrg-x1.pem
@@ -0,0 +1,31 @@
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
+TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
+cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
+WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
+ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
+h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
+0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
+A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
+T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
+B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
+B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
+KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
+OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
+jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
+qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
+rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
+hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
+ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
+3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
+NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
+ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
+TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
+jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
+oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
+4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
+mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
+emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
+-----END CERTIFICATE-----
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py
new file mode 100644
index 00000000..5e2c8e3e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py
@@ -0,0 +1,46 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# A bare-minimum Ansible Module based on AnsibleAWSModule used for testing some
+# of the core behaviour around AWS/Boto3 connection details
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={},
+ supports_check_mode=True,
+ )
+
+ decorator = AWSRetry.jittered_backoff()
+ client = module.client('ec2', retry_decorator=decorator)
+
+ filters = ansible_dict_to_boto3_filter_list({'name': 'amzn2-ami-hvm-2.0.202006*-x86_64-gp2'})
+
+ try:
+ images = client.describe_images(aws_retry=True, ImageIds=[], Filters=filters, Owners=['amazon'], ExecutableUsers=[])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Fail JSON AWS')
+
+ # Return something, just because we can.
+ module.exit_json(
+ changed=False,
+ **camel_dict_to_snake_dict(images))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml
new file mode 100644
index 00000000..77589cc2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml
@@ -0,0 +1,5 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
+collections:
+ - amazon.aws
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml
new file mode 100644
index 00000000..7ad4e7a3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml
@@ -0,0 +1,202 @@
+---
+- name: 'Create temporary location for CA files'
+ tempfile:
+ state: directory
+ suffix: 'test-CAs'
+ register: ca_tmp
+
+- name: 'Ensure we have Amazons root CA available to us'
+ copy:
+ src: 'amazonroot.pem'
+ dest: '{{ ca_tmp.path }}/amazonroot.pem'
+ mode: 0644
+
+- name: 'Ensure we have a another CA (ISRG-X1) bundle available to us'
+ copy:
+ src: 'isrg-x1.pem'
+ dest: '{{ ca_tmp.path }}/isrg-x1.pem'
+ mode: 0644
+
+##################################################################################
+# Test disabling cert validation (make sure we don't error)
+
+- name: 'Test basic operation using default CA bundle (no validation) - parameter'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ validate_certs: False
+ register: default_bundle_result
+
+- assert:
+ that:
+ - default_bundle_result is successful
+
+##################################################################################
+# Tests using Amazon's CA (the one the endpoint certs should be signed with)
+
+- name: 'Test basic operation using Amazons root CA - parameter'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ aws_ca_bundle: '{{ ca_tmp.path }}/amazonroot.pem'
+ register: amazon_ca_result
+
+- assert:
+ that:
+ - amazon_ca_result is successful
+
+- name: 'Test basic operation using Amazons root CA - environment'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ environment:
+ AWS_CA_BUNDLE: '{{ ca_tmp.path }}/amazonroot.pem'
+ register: amazon_ca_result
+
+- assert:
+ that:
+ - amazon_ca_result is successful
+
+- name: 'Test basic operation using Amazons root CA (no validation) - parameter'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ aws_ca_bundle: '{{ ca_tmp.path }}/amazonroot.pem'
+ validate_certs: False
+ register: amazon_ca_result
+
+- assert:
+ that:
+ - amazon_ca_result is successful
+
+- name: 'Test basic operation using Amazons root CA (no validation) - environment'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ validate_certs: False
+ environment:
+ AWS_CA_BUNDLE: '{{ ca_tmp.path }}/amazonroot.pem'
+ register: amazon_ca_result
+
+- assert:
+ that:
+ - amazon_ca_result is successful
+
+##################################################################################
+# Tests using ISRG's CA (one that the endpoint certs *aren't* signed with)
+
+- name: 'Test basic operation using a different CA - parameter'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem'
+ register: isrg_ca_result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - isrg_ca_result is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg'
+ - '"Fail JSON AWS" in isrg_ca_result.msg'
+
+- name: 'Test basic operation using a different CA - environment'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ environment:
+ AWS_CA_BUNDLE: '{{ ca_tmp.path }}/isrg-x1.pem'
+ register: isrg_ca_result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - isrg_ca_result is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg'
+ - '"Fail JSON AWS" in isrg_ca_result.msg'
+
+- name: 'Test basic operation using a different CA (no validation) - parameter'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem'
+ validate_certs: False
+ register: isrg_ca_result
+
+- assert:
+ that:
+ - isrg_ca_result is successful
+
+- name: 'Test basic operation using a different CA (no validation) - environment'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ validate_certs: False
+ environment:
+ AWS_CA_BUNDLE: '{{ ca_tmp.path }}/isrg-x1.pem'
+ register: isrg_ca_result
+
+- assert:
+ that:
+ - isrg_ca_result is successful
+
+##################################################################################
+# https://github.com/ansible-collections/amazon.aws/issues/129
+- name: 'Test CA bundle is used when authenticating with a profile - implied validation'
+ example_module:
+ profile: 'test_profile'
+ aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem'
+ register: isrg_ca_result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - isrg_ca_result is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg'
+ - '"Fail JSON AWS" in isrg_ca_result.msg'
+
+- name: 'Test CA bundle is used when authenticating with a profile - explicit validation'
+ example_module:
+ profile: 'test_profile'
+ aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem'
+ validate_certs: True
+ register: isrg_ca_result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - isrg_ca_result is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg'
+ - '"Fail JSON AWS" in isrg_ca_result.msg'
+
+- name: 'Test CA bundle is used when authenticating with a profile - explicitly disable validation'
+ example_module:
+ profile: 'test_profile'
+ aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem'
+ validate_certs: False
+ register: isrg_ca_result
+
+- assert:
+ that:
+ - isrg_ca_result is success
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml
new file mode 100644
index 00000000..94925829
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml
@@ -0,0 +1,281 @@
+---
+##################################################################################
+# Tests using standard credential parameters
+
+- name: 'Test basic operation using simple credentials (simple-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+- name: 'Test basic operation using simple credentials (aws-parameters)'
+ example_module:
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+- name: 'Test basic operation using simple credentials (ec2-parameters)'
+ example_module:
+ ec2_region: '{{ aws_region }}'
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ access_token: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+##################################################################################
+# Tests using standard credentials from environment variables
+
+- name: 'Test basic operation using simple credentials (aws-environment)'
+ example_module:
+ environment:
+ AWS_REGION: '{{ aws_region }}'
+ AWS_ACCESS_KEY_ID: '{{ aws_access_key }}'
+ AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}'
+ AWS_SECURITY_TOKEN: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+- name: 'Test basic operation using simple credentials (aws2-environment)'
+ example_module:
+ environment:
+ AWS_DEFAULT_REGION: '{{ aws_region }}'
+ AWS_ACCESS_KEY: '{{ aws_access_key }}'
+ AWS_SECRET_KEY: '{{ aws_secret_key }}'
+ AWS_SESSION_TOKEN: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+- name: 'Test basic operation using simple credentials (ec2-environment)'
+ example_module:
+ environment:
+ EC2_REGION: '{{ aws_region }}'
+ EC2_ACCESS_KEY: '{{ aws_access_key }}'
+ EC2_SECRET_KEY: '{{ aws_secret_key }}'
+ EC2_SECURITY_TOKEN: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+##################################################################################
+# Tests for missing parameters
+
+- name: 'Test with missing region'
+ example_module:
+ region: '{{ omit }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: missing_region
+ ignore_errors: True
+
+- assert:
+ that:
+ - missing_region is failed
+ - '"requires a region" in missing_region.msg'
+
+- name: 'Test with missing access key'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ omit }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: missing_access
+ ignore_errors: True
+
+- assert:
+ that:
+ - missing_access is failed
+ - '"Partial credentials found" in missing_access.msg'
+ - '"aws_access_key_id" in missing_access.msg'
+
+- name: 'Test with missing secret key'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ omit }}'
+ security_token: '{{ security_token }}'
+ register: missing_secret
+ ignore_errors: True
+
+- assert:
+ that:
+ - missing_secret is failed
+ - '"Partial credentials found" in missing_secret.msg'
+ - '"aws_secret_access_key" in missing_secret.msg'
+
+- name: 'Test with missing security token'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ omit }}'
+ register: missing_token
+ ignore_errors: True
+
+- assert:
+ that:
+ - missing_token is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"AuthFailure" in missing_token.msg'
+ - '"Fail JSON AWS" in missing_token.msg'
+ - '"error" in missing_token'
+ - '"code" in missing_token.error'
+ - missing_token.error.code == 'AuthFailure'
+ - '"message" in missing_token.error'
+
+##################################################################################
+# Run an additional authentication request to ensure that we're out of any
+# deny-lists caused by bad requests
+- name: 'Perform valid authentication to avoid deny-listing'
+ example_module:
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: anti_denylist
+ until: anti_denylist is success
+ retries: 5
+ delay: 5
+
+##################################################################################
+# Tests for bad parameters
+
+- name: 'Test with bad region'
+ example_module:
+ region: 'junk-example'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: bad_region
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_region is failed
+ - '"msg" in bad_region'
+ - '"Could not connect to the endpoint URL" in bad_region.msg'
+ - '"Fail JSON AWS" in bad_region.msg'
+ - '"ec2.junk-example" in bad_region.msg'
+
+- name: 'Test with bad access key'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: 'junk-example'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: bad_access
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_access is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"AuthFailure" in bad_access.msg'
+ - '"Fail JSON AWS" in bad_access.msg'
+ - '"error" in bad_access'
+ - '"code" in bad_access.error'
+ - bad_access.error.code == 'AuthFailure'
+ - '"message" in bad_access.error'
+
+# Run an additional authentication request to ensure that we're out of any
+# deny-lists caused by bad requests
+- name: 'Perform valid authentication to avoid deny-listing'
+ example_module:
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: anti_denylist
+ until: anti_denylist is success
+ retries: 5
+ delay: 5
+
+- name: 'Test with bad secret key'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: 'junk-example'
+ security_token: '{{ security_token }}'
+ register: bad_secret
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_secret is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"AuthFailure" in bad_secret.msg'
+ - '"Fail JSON AWS" in bad_secret.msg'
+ - '"error" in bad_secret'
+ - '"code" in bad_secret.error'
+ - bad_secret.error.code == 'AuthFailure'
+ - '"message" in bad_secret.error'
+
+# Run an additional authentication request to ensure that we're out of any
+# deny-lists caused by bad requests
+- name: 'Perform valid authentication to avoid deny-listing'
+ example_module:
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: anti_denylist
+ until: anti_denylist is success
+ retries: 5
+ delay: 5
+
+- name: 'Test with bad security token'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: 'junk-example'
+ register: bad_token
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_token is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"AuthFailure" in bad_token.msg'
+ - '"Fail JSON AWS" in bad_token.msg'
+ - '"error" in bad_token'
+ - '"code" in bad_token.error'
+ - bad_token.error.code == 'AuthFailure'
+ - '"message" in bad_token.error'
+
+# Run an additional authentication request to ensure that we're out of any
+# deny-lists caused by bad requests
+- name: 'Perform valid authentication to avoid deny-listing'
+ example_module:
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: anti_denylist
+ until: anti_denylist is success
+ retries: 5
+ delay: 5
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml
new file mode 100644
index 00000000..590af913
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml
@@ -0,0 +1,123 @@
+---
+##################################################################################
+# Tests using Endpoints
+
+- name: 'Test basic operation using standard endpoint (aws-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ aws_endpoint_url: 'https://ec2.{{ aws_region }}.amazonaws.com'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: standard_endpoint_result
+
+- name: 'Check that we connected to the standard endpoint'
+ assert:
+ that:
+ - standard_endpoint_result is successful
+ - '"ec2:DescribeImages" in standard_endpoint_result.resource_actions'
+
+# The FIPS endpoints aren't available in every region, this will trigger errors
+# outside of: [ us-east-1, us-east-2, us-west-1, us-west-2 ]
+
+- name: 'Test basic operation using FIPS endpoint (aws-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ aws_endpoint_url: 'https://ec2-fips.us-east-1.amazonaws.com'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: fips_endpoint_result
+
+- name: 'Check that we connected to the FIPS endpoint'
+ assert:
+ that:
+ - fips_endpoint_result is successful
+ - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions'
+
+- name: 'Test basic operation using FIPS endpoint (aws-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ endpoint_url: 'https://ec2-fips.us-east-1.amazonaws.com'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: fips_endpoint_result
+
+- name: 'Check that we connected to the FIPS endpoint'
+ assert:
+ that:
+ - fips_endpoint_result is successful
+ - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions'
+
+- name: 'Test basic operation using FIPS endpoint (aws-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ ec2_url: 'https://ec2-fips.us-east-1.amazonaws.com'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: fips_endpoint_result
+
+- name: 'Check that we connected to the FIPS endpoint'
+ assert:
+ that:
+ - fips_endpoint_result is successful
+ - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions'
+
+##################################################################################
+# Tests using environment variables
+
+- name: 'Test basic operation using FIPS endpoint (aws-environment)'
+ example_module:
+ region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ environment:
+ AWS_URL: 'https://ec2-fips.us-east-1.amazonaws.com'
+ register: fips_endpoint_result
+
+- name: 'Check that we connected to the FIPS endpoint'
+ assert:
+ that:
+ - fips_endpoint_result is successful
+ - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions'
+
+- name: 'Test basic operation using FIPS endpoint (ec2-environment)'
+ example_module:
+ region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ environment:
+ EC2_URL: 'https://ec2-fips.us-east-1.amazonaws.com'
+ register: fips_endpoint_result
+
+- name: 'Check that we connected to the FIPS endpoint'
+ assert:
+ that:
+ - fips_endpoint_result is successful
+ - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions'
+
+##################################################################################
+# Tests using a bad endpoint URL
+# - This demonstrates that endpoint_url overrode region
+
+- name: 'Test with bad endpoint URL'
+ example_module:
+ region: '{{ aws_region }}'
+ endpoint_url: 'https://junk.{{ aws_region }}.amazonaws.com'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: bad_endpoint
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_endpoint is failed
+ - '"msg" in bad_endpoint'
+ - '"Could not connect to the endpoint URL" in bad_endpoint.msg'
+ - '"Fail JSON AWS" in bad_endpoint.msg'
+ - '"junk.{{ aws_region }}.amazonaws.com" in bad_endpoint.msg'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml
new file mode 100644
index 00000000..dc61fad6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: 'Tests around standard credentials'
+ include_tasks: 'credentials.yml'
+
+- name: 'Tests around profiles'
+ include_tasks: 'profiles.yml'
+
+- name: 'Tests around endpoints'
+ include_tasks: 'endpoints.yml'
+
+- name: 'Tests around CA Bundles'
+ include_tasks: 'ca_bundle.yml'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml
new file mode 100644
index 00000000..17b85038
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml
@@ -0,0 +1,57 @@
+---
+##################################################################################
+# Tests using profiles instead of directly consuming credentials
+
+- name: 'Test basic operation using profile (simple-parameters)'
+ example_module:
+ profile: 'test_profile'
+ register: profile_result
+
+- assert:
+ that:
+ - profile_result is successful
+
+- name: 'Test basic operation using profile (aws-parameters)'
+ example_module:
+ aws_profile: 'test_profile'
+ register: profile_result
+
+- assert:
+ that:
+ - profile_result is successful
+
+- name: 'Test basic operation using profile (aws-environment)'
+ example_module:
+ environment:
+ AWS_PROFILE: 'test_profile'
+ register: profile_result
+
+- assert:
+ that:
+ - profile_result is successful
+
+- name: 'Test basic operation using profile (aws2-environment)'
+ example_module:
+ environment:
+ AWS_DEFAULT_PROFILE: 'test_profile'
+ register: profile_result
+
+- assert:
+ that:
+ - profile_result is successful
+
+##################################################################################
+# Tests with bad profile
+
+- name: 'Test with bad profile'
+ example_module:
+ profile: 'junk-profile'
+ register: bad_profile
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_profile is failed
+ - '"msg" in bad_profile'
+ - '"junk-profile" in bad_profile.msg'
+ - '"could not be found" in bad_profile.msg'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/runme.sh b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/runme.sh
new file mode 100755
index 00000000..9b0536d2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/runme.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ANSIBLE_ROLES_PATH="../"
+# Boto3
+AWS_CONFIG_FILE="$( pwd )/boto3_config"
+# Boto2
+BOTO_CONFIG="$( pwd )/boto3_config"
+
+export ANSIBLE_ROLES_PATH
+export AWS_CONFIG_FILE
+export BOTO_CONFIG
+
+ansible-playbook setup.yml -i localhost "$@"
+ansible-playbook main.yml -i inventory "$@" -e "@session_credentials.yml"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/setup.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/setup.yml
new file mode 100644
index 00000000..9b219eb2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/setup.yml
@@ -0,0 +1,40 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ # ===========================================================
+ # While CI uses a dedicated session, the easiest way to run
+ # tests outside of CI is with a simple access/secret key pair.
+ #
+ # For consistency, use sts_session_token to grab session
+ # credentials if we're not already using a session
+ # Note: this can't be done within a session, hence the slightly
+ # strange dance
+ - name: 'Get a session token if we are using a basic key'
+ when:
+ - security_token is not defined
+ block:
+ - name: 'Get a session token'
+ sts_session_token:
+ region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ register: session_token
+ no_log: true
+ - name: 'Override initial tokens'
+ set_fact:
+ session_access_key: '{{ session_token.sts_creds.access_key }}'
+ session_secret_key: '{{ session_token.sts_creds.secret_key }}'
+ session_security_token: '{{ session_token.sts_creds.session_token }}'
+ no_log: true
+
+ - name: 'Write out credentials'
+ template:
+ dest: './session_credentials.yml'
+ src: 'session_credentials.yml.j2'
+
+ - name: 'Write out boto config file'
+ template:
+ dest: './boto3_config'
+ src: 'boto_config.j2'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/boto_config.j2 b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/boto_config.j2
new file mode 100644
index 00000000..f8668f05
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/boto_config.j2
@@ -0,0 +1,5 @@
+[profile test_profile]
+region = {{ aws_region }}
+aws_access_key_id = {{ session_access_key | default(aws_access_key) }}
+aws_secret_access_key = {{ session_secret_key | default(aws_secret_key) }}
+aws_security_token = {{ session_security_token | default(security_token) }}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/session_credentials.yml.j2 b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/session_credentials.yml.j2
new file mode 100644
index 00000000..bb030439
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/templates/session_credentials.yml.j2
@@ -0,0 +1,3 @@
+aws_access_key: {{ session_access_key | default(aws_access_key) }}
+aws_secret_key: {{ session_secret_key | default(aws_secret_key) }}
+security_token: {{ session_security_token | default(security_token) }}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/aliases
new file mode 100644
index 00000000..72a9fb4f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group4
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/connect_to_aws.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/connect_to_aws.yml
new file mode 100644
index 00000000..f0adfc72
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/connect_to_aws.yml
@@ -0,0 +1,8 @@
+- hosts: all
+ gather_facts: no
+ collections:
+ - community.aws
+ - amazon.aws
+ roles:
+ # Test the behaviour of module_utils.connect_to_aws
+ - 'connect_to_aws'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/ec2_connect.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/ec2_connect.yml
new file mode 100644
index 00000000..75ecd297
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/ec2_connect.yml
@@ -0,0 +1,8 @@
+- hosts: all
+ gather_facts: no
+ collections:
+ - community.aws
+ - amazon.aws
+ roles:
+ # Test the behaviour of module_utils.ec2.ec2_connect
+ - 'ec2_connect'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/inventory b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/inventory
new file mode 100644
index 00000000..5093e858
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/inventory
@@ -0,0 +1,6 @@
+[tests]
+localhost
+
+[all:vars]
+ansible_connection=local
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/files/amazonroot.pem b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/files/amazonroot.pem
new file mode 100644
index 00000000..a6f3e92a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/files/amazonroot.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
+ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
+9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
+IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
+VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
+93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
+jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA
+A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI
+U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs
+N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv
+o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU
+5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy
+rqXRfboQnoZsG4q5WTP468SQvvG5
+-----END CERTIFICATE-----
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/files/isrg-x1.pem b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/files/isrg-x1.pem
new file mode 100644
index 00000000..b85c8037
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/files/isrg-x1.pem
@@ -0,0 +1,31 @@
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
+TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
+cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
+WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
+ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
+h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
+0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
+A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
+T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
+B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
+B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
+KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
+OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
+jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
+qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
+rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
+hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
+ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
+3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
+NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
+ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
+TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
+jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
+oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
+4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
+mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
+emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
+-----END CERTIFICATE-----
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/library/example_module.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/library/example_module.py
new file mode 100644
index 00000000..543776a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/library/example_module.py
@@ -0,0 +1,59 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# A bare-minimum Ansible Module based on AnsibleAWSModule used for testing some
+# of the core behaviour around AWS/Boto3 connection details
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+try:
+ import boto.ec2
+except ImportError:
+ pass
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={},
+ supports_check_mode=True,
+ check_boto3=False,
+ )
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg="Fail JSON: No Region")
+
+ try:
+ client = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json_aws(e, msg='No Authentication Handler Found')
+ except AnsibleAWSError as e:
+ module.fail_json_aws(e, msg='Fail JSON AWS')
+
+ filters = {'name': 'amzn2-ami-hvm-2.0.202006*-x86_64-gp2'}
+
+ try:
+ images = client.get_all_images(image_ids=[], filters=filters, owners=['amazon'], executable_by=[])
+ except (boto.exception.BotoServerError, AnsibleAWSError) as e:
+ module.fail_json_aws(e, msg='Fail JSON AWS')
+
+ images_out = []
+ for image in images:
+ images_out.append(image.id)
+
+ # Return something, just because we can.
+ module.exit_json(
+ changed=False,
+ images=images_out)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/meta/main.yml
new file mode 100644
index 00000000..77589cc2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/meta/main.yml
@@ -0,0 +1,5 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
+collections:
+ - amazon.aws
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/tasks/credentials.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/tasks/credentials.yml
new file mode 100644
index 00000000..573532bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/tasks/credentials.yml
@@ -0,0 +1,212 @@
+---
+##################################################################################
+# Tests using standard credential parameters
+
+- name: 'Test basic operation using simple credentials (simple-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+- name: 'Test basic operation using simple credentials (aws-parameters)'
+ example_module:
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+- name: 'Test basic operation using simple credentials (ec2-parameters)'
+ example_module:
+ ec2_region: '{{ aws_region }}'
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ access_token: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+##################################################################################
+# Tests using standard credentials from environment variables
+
+- name: 'Test basic operation using simple credentials (aws-environment)'
+ example_module:
+ environment:
+ AWS_REGION: '{{ aws_region }}'
+ AWS_ACCESS_KEY_ID: '{{ aws_access_key }}'
+ AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}'
+ AWS_SECURITY_TOKEN: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+- name: 'Test basic operation using simple credentials (aws2-environment)'
+ example_module:
+ environment:
+ AWS_DEFAULT_REGION: '{{ aws_region }}'
+ AWS_ACCESS_KEY: '{{ aws_access_key }}'
+ AWS_SECRET_KEY: '{{ aws_secret_key }}'
+ AWS_SESSION_TOKEN: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+- name: 'Test basic operation using simple credentials (ec2-environment)'
+ example_module:
+ environment:
+ EC2_REGION: '{{ aws_region }}'
+ EC2_ACCESS_KEY: '{{ aws_access_key }}'
+ EC2_SECRET_KEY: '{{ aws_secret_key }}'
+ EC2_SECURITY_TOKEN: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+##################################################################################
+# Tests for missing parameters
+
+- name: 'Test with missing region'
+ example_module:
+ region: '{{ omit }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: missing_region
+ ignore_errors: True
+
+- assert:
+ that:
+ - missing_region is failed
+ - '"Fail JSON: No Region" in missing_region.msg'
+
+- name: 'Test with missing access key'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ omit }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: missing_access
+ ignore_errors: True
+
+- assert:
+ that:
+ - missing_access is failed
+ - '"No handler was ready to authenticate." in missing_access.msg'
+ #- '"aws_access_key_id" in missing_access.msg'
+
+- name: 'Test with missing secret key'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ omit }}'
+ security_token: '{{ security_token }}'
+ register: missing_secret
+ ignore_errors: True
+
+- assert:
+ that:
+ - missing_secret is failed
+ - '"No handler was ready to authenticate." in missing_secret.msg'
+ #- '"aws_secret_access_key" in missing_secret.msg'
+
+- name: 'Test with missing security token'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ omit }}'
+ register: missing_token
+ ignore_errors: True
+
+- assert:
+ that:
+ - missing_token is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"Fail JSON AWS" in missing_token.msg'
+ - '"AWS was not able to validate the provided access credentials" in missing_token.msg'
+
+
+##################################################################################
+# Tests for bad parameters
+
+- name: 'Test with bad region'
+ example_module:
+ region: 'junk-example'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: bad_region
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_region is failed
+ - '"msg" in bad_region'
+ - '"does not seem to be available" in bad_region.msg'
+ - '"If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" in bad_region.msg'
+
+- name: 'Test with bad access key'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: 'junk-example'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: bad_access
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_access is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"Fail JSON AWS" in missing_token.msg'
+ - '"AWS was not able to validate the provided access credentials" in missing_token.msg'
+
+- name: 'Test with bad secret key'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: 'junk-example'
+ security_token: '{{ security_token }}'
+ register: bad_secret
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_secret is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"Fail JSON AWS" in missing_token.msg'
+ - '"AWS was not able to validate the provided access credentials" in missing_token.msg'
+
+- name: 'Test with bad security token'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: 'junk-example'
+ register: bad_token
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_token is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"Fail JSON AWS" in missing_token.msg'
+ - '"AWS was not able to validate the provided access credentials" in missing_token.msg'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/tasks/endpoints.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/tasks/endpoints.yml
new file mode 100644
index 00000000..a2531e96
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/tasks/endpoints.yml
@@ -0,0 +1,105 @@
+---
+# Note: connect_to_aws currently *ignores* aws_endpoint_url
+#
+##################################################################################
+# Tests using Endpoints
+
+- name: 'Test basic operation using standard endpoint (aws-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ aws_endpoint_url: 'https://ec2.{{ aws_region }}.amazonaws.com'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: standard_endpoint_result
+
+- name: 'Check that we connected to the standard endpoint'
+ assert:
+ that:
+ - standard_endpoint_result is successful
+ #- '"ec2:DescribeImages" in standard_endpoint_result.resource_actions'
+
+- name: 'Test basic operation using standard endpoint (aws-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ endpoint_url: 'https://ec2.us-east-1.amazonaws.com'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: standard_endpoint_result
+
+- name: 'Check that we connected to the standard endpoint'
+ assert:
+ that:
+ - standard_endpoint_result is successful
+ #- '"ec2:DescribeImages" in standard_endpoint_result.resource_actions'
+
+- name: 'Test basic operation using standard endpoint (aws-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ ec2_url: 'https://ec2.us-east-1.amazonaws.com'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: standard_endpoint_result
+
+- name: 'Check that we connected to the standard endpoint'
+ assert:
+ that:
+ - standard_endpoint_result is successful
+ #- '"ec2:DescribeImages" in standard_endpoint_result.resource_actions'
+
+##################################################################################
+# Tests using environment variables
+
+- name: 'Test basic operation using standard endpoint (aws-environment)'
+ example_module:
+ region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ environment:
+ AWS_URL: 'https://ec2.us-east-1.amazonaws.com'
+ register: standard_endpoint_result
+
+- name: 'Check that we connected to the standard endpoint'
+ assert:
+ that:
+ - standard_endpoint_result is successful
+ #- '"ec2:DescribeImages" in standard_endpoint_result.resource_actions'
+
+- name: 'Test basic operation using standard endpoint (ec2-environment)'
+ example_module:
+ region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ environment:
+ EC2_URL: 'https://ec2.us-east-1.amazonaws.com'
+ register: standard_endpoint_result
+
+- name: 'Check that we connected to the standard endpoint'
+ assert:
+ that:
+ - standard_endpoint_result is successful
+ #- '"ec2:DescribeImages" in standard_endpoint_result.resource_actions'
+
+##################################################################################
+# Tests using a bad endpoint URL
+# - This demonstrates that endpoint_url overrode region
+
+- name: 'Test with bad endpoint URL'
+ example_module:
+ region: '{{ aws_region }}'
+ endpoint_url: 'https://junk.{{ aws_region }}.amazonaws.com'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: bad_endpoint
+ ignore_errors: True
+
+- assert:
+ that:
+ # endpoint_url is ignored by connect_to_aws
+ - bad_endpoint is successful
+ #- bad_endpoint is failed
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/tasks/main.yml
new file mode 100644
index 00000000..9e81b308
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: 'Tests around standard credentials'
+ include_tasks: 'credentials.yml'
+
+- name: 'Tests around profiles'
+ include_tasks: 'profiles.yml'
+
+- name: 'Tests around endpoints'
+ include_tasks: 'endpoints.yml'
+
+#- name: 'Tests around CA Bundles'
+# include_tasks: 'boto2_ec2/ca_bundle.yml'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/tasks/profiles.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/tasks/profiles.yml
new file mode 100644
index 00000000..bab095e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/connect_to_aws/tasks/profiles.yml
@@ -0,0 +1,63 @@
+---
+# Note: unlike boto3 modules, boto2 modules can't read region from the profile
+#
+##################################################################################
+# Tests using profiles instead of directly consuming credentials
+
+- name: 'Test basic operation using profile (simple-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ profile: 'test_profile'
+ register: profile_result
+
+- assert:
+ that:
+ - profile_result is successful
+
+- name: 'Test basic operation using profile (aws-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ aws_profile: 'test_profile'
+ register: profile_result
+
+- assert:
+ that:
+ - profile_result is successful
+
+- name: 'Test basic operation using profile (aws-environment)'
+ example_module:
+ region: '{{ aws_region }}'
+ environment:
+ AWS_PROFILE: 'test_profile'
+ register: profile_result
+
+- assert:
+ that:
+ - profile_result is successful
+
+- name: 'Test basic operation using profile (aws2-environment)'
+ example_module:
+ region: '{{ aws_region }}'
+ environment:
+ AWS_DEFAULT_PROFILE: 'test_profile'
+ register: profile_result
+
+- assert:
+ that:
+ - profile_result is successful
+
+##################################################################################
+# Tests with bad profile
+
+- name: 'Test with bad profile'
+ example_module:
+ region: '{{ aws_region }}'
+ profile: 'junk-profile'
+ register: bad_profile
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_profile is failed
+ - '"msg" in bad_profile'
+ - '"Profile given for AWS was not found." in bad_profile.msg'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/files/amazonroot.pem b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/files/amazonroot.pem
new file mode 100644
index 00000000..a6f3e92a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/files/amazonroot.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
+ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
+9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
+IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
+VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
+93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
+jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA
+A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI
+U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs
+N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv
+o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU
+5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy
+rqXRfboQnoZsG4q5WTP468SQvvG5
+-----END CERTIFICATE-----
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/files/isrg-x1.pem b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/files/isrg-x1.pem
new file mode 100644
index 00000000..b85c8037
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/files/isrg-x1.pem
@@ -0,0 +1,31 @@
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
+TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
+cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
+WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
+ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
+h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
+0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
+A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
+T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
+B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
+B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
+KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
+OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
+jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
+qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
+rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
+hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
+ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
+3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
+NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
+ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
+TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
+jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
+oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
+4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
+mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
+emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
+-----END CERTIFICATE-----
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/library/example_module.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/library/example_module.py
new file mode 100644
index 00000000..6bbc1a4a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/library/example_module.py
@@ -0,0 +1,51 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# A bare-minimum Ansible Module based on AnsibleAWSModule used for testing some
+# of the core behaviour around AWS/Boto3 connection details
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+try:
+ import boto.ec2
+except ImportError:
+ pass
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_connect
+
+
+def main():
+ module = AnsibleAWSModule(
+ argument_spec={},
+ supports_check_mode=True,
+ check_boto3=False,
+ )
+
+ try:
+ client = ec2_connect(module)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json_aws(e, msg='Failed to get connection')
+
+ filters = {'name': 'amzn2-ami-hvm-2.0.202006*-x86_64-gp2'}
+
+ try:
+ images = client.get_all_images(image_ids=[], filters=filters, owners=['amazon'], executable_by=[])
+ except boto.exception.BotoServerError as e:
+ module.fail_json_aws(e, msg='Fail JSON AWS')
+
+ images_out = []
+ for image in images:
+ images_out.append(image.id)
+
+ # Return something, just because we can.
+ module.exit_json(
+ changed=False,
+ images=images_out)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/meta/main.yml
new file mode 100644
index 00000000..77589cc2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/meta/main.yml
@@ -0,0 +1,5 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
+collections:
+ - amazon.aws
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/tasks/credentials.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/tasks/credentials.yml
new file mode 100644
index 00000000..1843a497
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/tasks/credentials.yml
@@ -0,0 +1,212 @@
+---
+##################################################################################
+# Tests using standard credential parameters
+
+- name: 'Test basic operation using simple credentials (simple-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+- name: 'Test basic operation using simple credentials (aws-parameters)'
+ example_module:
+ aws_region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+- name: 'Test basic operation using simple credentials (ec2-parameters)'
+ example_module:
+ ec2_region: '{{ aws_region }}'
+ ec2_access_key: '{{ aws_access_key }}'
+ ec2_secret_key: '{{ aws_secret_key }}'
+ access_token: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+##################################################################################
+# Tests using standard credentials from environment variables
+
+- name: 'Test basic operation using simple credentials (aws-environment)'
+ example_module:
+ environment:
+ AWS_REGION: '{{ aws_region }}'
+ AWS_ACCESS_KEY_ID: '{{ aws_access_key }}'
+ AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}'
+ AWS_SECURITY_TOKEN: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+- name: 'Test basic operation using simple credentials (aws2-environment)'
+ example_module:
+ environment:
+ AWS_DEFAULT_REGION: '{{ aws_region }}'
+ AWS_ACCESS_KEY: '{{ aws_access_key }}'
+ AWS_SECRET_KEY: '{{ aws_secret_key }}'
+ AWS_SESSION_TOKEN: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+- name: 'Test basic operation using simple credentials (ec2-environment)'
+ example_module:
+ environment:
+ EC2_REGION: '{{ aws_region }}'
+ EC2_ACCESS_KEY: '{{ aws_access_key }}'
+ EC2_SECRET_KEY: '{{ aws_secret_key }}'
+ EC2_SECURITY_TOKEN: '{{ security_token }}'
+ register: credential_result
+
+- assert:
+ that:
+ - credential_result is successful
+
+##################################################################################
+# Tests for missing parameters
+
+- name: 'Test with missing region'
+ example_module:
+ region: '{{ omit }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: missing_region
+ ignore_errors: True
+
+- assert:
+ that:
+ - missing_region is failed
+ - '"Either region or ec2_url must be specified" in missing_region.msg'
+
+- name: 'Test with missing access key'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ omit }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: missing_access
+ ignore_errors: True
+
+- assert:
+ that:
+ - missing_access is failed
+ - '"No handler was ready to authenticate." in missing_access.msg'
+ #- '"aws_access_key_id" in missing_access.msg'
+
+- name: 'Test with missing secret key'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ omit }}'
+ security_token: '{{ security_token }}'
+ register: missing_secret
+ ignore_errors: True
+
+- assert:
+ that:
+ - missing_secret is failed
+ - '"No handler was ready to authenticate." in missing_secret.msg'
+ #- '"aws_secret_access_key" in missing_secret.msg'
+
+- name: 'Test with missing security token'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ omit }}'
+ register: missing_token
+ ignore_errors: True
+
+- assert:
+ that:
+ - missing_token is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"Fail JSON AWS" in missing_token.msg'
+ - '"AWS was not able to validate the provided access credentials" in missing_token.msg'
+
+
+##################################################################################
+# Tests for bad parameters
+
+- name: 'Test with bad region'
+ example_module:
+ region: 'junk-example'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: bad_region
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_region is failed
+ - '"msg" in bad_region'
+ - '"does not seem to be available" in bad_region.msg'
+ - '"If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" in bad_region.msg'
+
+- name: 'Test with bad access key'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: 'junk-example'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: bad_access
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_access is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"Fail JSON AWS" in missing_token.msg'
+ - '"AWS was not able to validate the provided access credentials" in missing_token.msg'
+
+- name: 'Test with bad secret key'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: 'junk-example'
+ security_token: '{{ security_token }}'
+ register: bad_secret
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_secret is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"Fail JSON AWS" in missing_token.msg'
+ - '"AWS was not able to validate the provided access credentials" in missing_token.msg'
+
+- name: 'Test with bad security token'
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: 'junk-example'
+ register: bad_token
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_token is failed
+ # Caught when we try to do something, and passed to fail_json_aws
+ - '"Fail JSON AWS" in missing_token.msg'
+ - '"AWS was not able to validate the provided access credentials" in missing_token.msg'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/tasks/endpoints.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/tasks/endpoints.yml
new file mode 100644
index 00000000..a8a6ba20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/tasks/endpoints.yml
@@ -0,0 +1,119 @@
+---
+# Note 1: With boto3 we can use the FIPS endpoints as a minimal proxy for testing that
+# we're using something different. With boto2 the authentication fails.
+#
+##################################################################################
+# Tests using Endpoints
+
+- name: 'Test basic operation using standard endpoint (aws-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ aws_endpoint_url: 'https://ec2.{{ aws_region }}.amazonaws.com'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: standard_endpoint_result
+
+- name: 'Check that we connected to the standard endpoint'
+ assert:
+ that:
+ - standard_endpoint_result is successful
+ #- '"ec2:DescribeImages" in standard_endpoint_result.resource_actions'
+
+- name: 'Test basic operation using standard endpoint - no region (aws-parameters)'
+ example_module:
+ region: '{{ omit }}'
+ aws_endpoint_url: 'https://ec2.{{ aws_region }}.amazonaws.com'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: standard_endpoint_result
+
+- name: 'Check that we connected to the standard endpoint'
+ assert:
+ that:
+ - standard_endpoint_result is successful
+ #- '"ec2:DescribeImages" in standard_endpoint_result.resource_actions'
+
+- name: 'Test basic operation using standard endpoint (aws-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ endpoint_url: 'https://ec2.{{ aws_region }}.amazonaws.com'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: standard_endpoint_result
+
+- name: 'Check that we connected to the standard endpoint'
+ assert:
+ that:
+ - standard_endpoint_result is successful
+ #- '"ec2:DescribeImages" in standard_endpoint_result.resource_actions'
+
+- name: 'Test basic operation using standard endpoint (aws-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ ec2_url: 'https://ec2.{{ aws_region }}.amazonaws.com'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ register: standard_endpoint_result
+
+- name: 'Check that we connected to the standard endpoint'
+ assert:
+ that:
+ - standard_endpoint_result is successful
+ #- '"ec2:DescribeImages" in standard_endpoint_result.resource_actions'
+
+##################################################################################
+# Tests using environment variables
+
+- name: 'Test basic operation using standard endpoint (aws-environment)'
+ example_module:
+ region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ environment:
+ AWS_URL: 'https://ec2.{{ aws_region }}.amazonaws.com'
+ register: standard_endpoint_result
+
+- name: 'Check that we connected to the standard endpoint'
+ assert:
+ that:
+ - standard_endpoint_result is successful
+ #- '"ec2:DescribeImages" in standard_endpoint_result.resource_actions'
+
+- name: 'Test basic operation using standard endpoint (ec2-environment)'
+ example_module:
+ region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ aws_security_token: '{{ security_token }}'
+ environment:
+ EC2_URL: 'https://ec2.{{ aws_region }}.amazonaws.com'
+ register: standard_endpoint_result
+
+- name: 'Check that we connected to the standard endpoint'
+ assert:
+ that:
+ - standard_endpoint_result is successful
+ #- '"ec2:DescribeImages" in standard_endpoint_result.resource_actions'
+
+##################################################################################
+# Tests using a bad endpoint URL
+# - This demonstrates that endpoint_url overrode region
+
+- name: 'Test with bad endpoint URL'
+ example_module:
+ region: '{{ aws_region }}'
+ endpoint_url: 'https://junk.{{ aws_region }}.amazonaws.com'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ register: bad_endpoint
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_endpoint is failed
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/tasks/main.yml
new file mode 100644
index 00000000..9e81b308
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: 'Tests around standard credentials'
+ include_tasks: 'credentials.yml'
+
+- name: 'Tests around profiles'
+ include_tasks: 'profiles.yml'
+
+- name: 'Tests around endpoints'
+ include_tasks: 'endpoints.yml'
+
+#- name: 'Tests around CA Bundles'
+# include_tasks: 'boto2_ec2/ca_bundle.yml'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/tasks/profiles.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/tasks/profiles.yml
new file mode 100644
index 00000000..bab095e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/roles/ec2_connect/tasks/profiles.yml
@@ -0,0 +1,63 @@
+---
+# Note: unlike boto3 modules, boto2 modules can't read region from the profile
+#
+##################################################################################
+# Tests using profiles instead of directly consuming credentials
+
+- name: 'Test basic operation using profile (simple-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ profile: 'test_profile'
+ register: profile_result
+
+- assert:
+ that:
+ - profile_result is successful
+
+- name: 'Test basic operation using profile (aws-parameters)'
+ example_module:
+ region: '{{ aws_region }}'
+ aws_profile: 'test_profile'
+ register: profile_result
+
+- assert:
+ that:
+ - profile_result is successful
+
+- name: 'Test basic operation using profile (aws-environment)'
+ example_module:
+ region: '{{ aws_region }}'
+ environment:
+ AWS_PROFILE: 'test_profile'
+ register: profile_result
+
+- assert:
+ that:
+ - profile_result is successful
+
+- name: 'Test basic operation using profile (aws2-environment)'
+ example_module:
+ region: '{{ aws_region }}'
+ environment:
+ AWS_DEFAULT_PROFILE: 'test_profile'
+ register: profile_result
+
+- assert:
+ that:
+ - profile_result is successful
+
+##################################################################################
+# Tests with bad profile
+
+- name: 'Test with bad profile'
+ example_module:
+ region: '{{ aws_region }}'
+ profile: 'junk-profile'
+ register: bad_profile
+ ignore_errors: True
+
+- assert:
+ that:
+ - bad_profile is failed
+ - '"msg" in bad_profile'
+ - '"Profile given for AWS was not found." in bad_profile.msg'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/runme.sh b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/runme.sh
new file mode 100755
index 00000000..cbf09a54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/runme.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ANSIBLE_ROLES_PATH="../"
+# Boto3
+AWS_CONFIG_FILE="$( pwd )/boto3_config"
+# Boto2
+BOTO_CONFIG="$( pwd )/boto3_config"
+
+export ANSIBLE_ROLES_PATH
+export AWS_CONFIG_FILE
+export BOTO_CONFIG
+
+ansible-playbook setup.yml -i localhost "$@"
+ansible-playbook ec2_connect.yml -i inventory "$@" -e "@session_credentials.yml"
+ansible-playbook connect_to_aws.yml -i inventory "$@" -e "@session_credentials.yml"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/setup.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/setup.yml
new file mode 100644
index 00000000..9b219eb2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/setup.yml
@@ -0,0 +1,40 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ # ===========================================================
+ # While CI uses a dedicated session, the easiest way to run
+ # tests outside of CI is with a simple access/secret key pair.
+ #
+ # For consistency, use sts_session_token to grab session
+ # credentials if we're not already using a session
+ # Note: this can't be done within a session, hence the slightly
+ # strange dance
+ - name: 'Get a session token if we are using a basic key'
+ when:
+ - security_token is not defined
+ block:
+ - name: 'Get a session token'
+ sts_session_token:
+ region: '{{ aws_region }}'
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ register: session_token
+ no_log: true
+ - name: 'Override initial tokens'
+ set_fact:
+ session_access_key: '{{ session_token.sts_creds.access_key }}'
+ session_secret_key: '{{ session_token.sts_creds.secret_key }}'
+ session_security_token: '{{ session_token.sts_creds.session_token }}'
+ no_log: true
+
+ - name: 'Write out credentials'
+ template:
+ dest: './session_credentials.yml'
+ src: 'session_credentials.yml.j2'
+
+ - name: 'Write out boto config file'
+ template:
+ dest: './boto3_config'
+ src: 'boto_config.j2'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/templates/boto_config.j2 b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/templates/boto_config.j2
new file mode 100644
index 00000000..f8668f05
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/templates/boto_config.j2
@@ -0,0 +1,5 @@
+[profile test_profile]
+region = {{ aws_region }}
+aws_access_key_id = {{ session_access_key | default(aws_access_key) }}
+aws_secret_access_key = {{ session_secret_key | default(aws_secret_key) }}
+aws_security_token = {{ session_security_token | default(security_token) }}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/templates/session_credentials.yml.j2 b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/templates/session_credentials.yml.j2
new file mode 100644
index 00000000..bb030439
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_ec2/templates/session_credentials.yml.j2
@@ -0,0 +1,3 @@
+aws_access_key: {{ session_access_key | default(aws_access_key) }}
+aws_secret_key: {{ session_secret_key | default(aws_secret_key) }}
+security_token: {{ session_security_token | default(security_token) }}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/aliases
new file mode 100644
index 00000000..6e3860be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group2
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/inventory b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/inventory
new file mode 100644
index 00000000..5093e858
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/inventory
@@ -0,0 +1,6 @@
+[tests]
+localhost
+
+[all:vars]
+ansible_connection=local
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/main.yml
new file mode 100644
index 00000000..4edc3637
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/main.yml
@@ -0,0 +1,7 @@
+- hosts: all
+ gather_facts: no
+ collections:
+ - amazon.aws
+ roles:
+ # Test the behaviour of module_utils.core.AnsibleAWSModule.client (boto3)
+ - 'get_waiter'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/meta/main.yml
new file mode 100644
index 00000000..1f64f116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py
new file mode 100644
index 00000000..4e16fb1b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py
@@ -0,0 +1,39 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# A bare-minimum Ansible Module based on AnsibleAWSModule used for testing some
+# of the core behaviour around AWS/Boto3 connection details
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
+
+
+def main():
+ argument_spec = dict(
+ client=dict(required=True, type='str'),
+ waiter_name=dict(required=True, type='str'),
+ with_decorator=dict(required=False, type='bool', default=False),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ decorator = None
+ if module.params.get('with_decorator'):
+ decorator = AWSRetry.jittered_backoff()
+
+ client = module.client(module.params.get('client'), retry_decorator=decorator)
+ waiter = get_waiter(client, module.params.get('waiter_name'))
+
+ module.exit_json(changed=False, waiter_attributes=dir(waiter))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml
new file mode 100644
index 00000000..77589cc2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml
@@ -0,0 +1,5 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
+collections:
+ - amazon.aws
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml
new file mode 100644
index 00000000..466d9584
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml
@@ -0,0 +1,36 @@
+---
+- module_defaults:
+ example_module:
+ region: '{{ aws_region }}'
+ access_key: '{{ aws_access_key }}'
+ secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token | default(omit) }}'
+ block:
+ - name: 'Attempt to get a waiter (no retry decorator)'
+ example_module:
+ client: 'ec2'
+ waiter_name: 'internet_gateway_exists'
+ register: test_no_decorator
+
+ - assert:
+ that:
+ - test_no_decorator is succeeded
+ # Standard methods on a boto3 wrapper
+ - '"wait" in test_no_decorator.waiter_attributes'
+ - '"name" in test_no_decorator.waiter_attributes'
+ - '"config" in test_no_decorator.waiter_attributes'
+
+ - name: 'Attempt to get a waiter (with decorator)'
+ example_module:
+ client: 'ec2'
+ waiter_name: 'internet_gateway_exists'
+ with_decorator: True
+ register: test_with_decorator
+
+ - assert:
+ that:
+ - test_with_decorator is succeeded
+ # Standard methods on a boto3 wrapper
+ - '"wait" in test_with_decorator.waiter_attributes'
+ - '"name" in test_with_decorator.waiter_attributes'
+ - '"config" in test_with_decorator.waiter_attributes'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/runme.sh b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/runme.sh
new file mode 100755
index 00000000..78a6f6db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/runme.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ANSIBLE_ROLES_PATH="../"
+export ANSIBLE_ROLES_PATH
+
+ansible-playbook main.yml -i inventory "$@"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/prepare_tests/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/prepare_tests/tasks/main.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/prepare_tests/tasks/main.yml
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/aliases b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/aliases
new file mode 100644
index 00000000..a112c3d1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/group1
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/inventory b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/inventory
new file mode 100644
index 00000000..59a2423a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/inventory
@@ -0,0 +1,13 @@
+[tests]
+missing
+simple
+complex
+dotted
+tags
+encryption_kms
+encryption_sse
+public_access
+
+[all:vars]
+ansible_connection=local
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/main.yml
new file mode 100644
index 00000000..22fc0d64
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/main.yml
@@ -0,0 +1,12 @@
+---
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/s3_bucket/tasks/
+
+# VPC should get cleaned up once all hosts have run
+- hosts: all
+ gather_facts: no
+ strategy: free
+ #serial: 10
+ roles:
+ - s3_bucket
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/meta/main.yml
new file mode 100644
index 00000000..38b31be0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/meta/main.yml
@@ -0,0 +1,4 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
+ - setup_remote_tmp_dir
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml
new file mode 100644
index 00000000..b4fd58ad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+bucket_name: '{{ resource_prefix }}-{{ inventory_hostname | regex_replace("_","-") }}'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml
new file mode 100644
index 00000000..38b31be0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml
@@ -0,0 +1,4 @@
+dependencies:
+ - prepare_tests
+ - setup_ec2
+ - setup_remote_tmp_dir
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml
new file mode 100644
index 00000000..19736356
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml
@@ -0,0 +1,146 @@
+---
+- block:
+ - name: 'Create more complex s3_bucket'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ policy: "{{ lookup('template','policy.json') }}"
+ requester_pays: yes
+ versioning: yes
+ tags:
+ example: tag1
+ another: tag2
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ - output.name == '{{ bucket_name }}'
+ - output.requester_pays
+ - output.versioning.MfaDelete == 'Disabled'
+ - output.versioning.Versioning == 'Enabled'
+ - output.tags.example == 'tag1'
+ - output.tags.another == 'tag2'
+ - output.policy.Statement[0].Action == 's3:GetObject'
+ - output.policy.Statement[0].Effect == 'Allow'
+ - output.policy.Statement[0].Principal == '*'
+ - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ bucket_name }}/*'
+ - output.policy.Statement[0].Sid == 'AddPerm'
+
+ # ============================================================
+
+ - name: 'Pause to help with s3 bucket eventual consistency'
+ wait_for:
+ timeout: 10
+ delegate_to: localhost
+
+ - name: 'Try to update the same complex s3_bucket'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ policy: "{{ lookup('template','policy.json') }}"
+ requester_pays: yes
+ versioning: yes
+ tags:
+ example: tag1
+ another: tag2
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - output.name == '{{ bucket_name }}'
+ - output.requester_pays
+ - output.versioning.MfaDelete == 'Disabled'
+ - output.versioning.Versioning == 'Enabled'
+ - output.tags.example == 'tag1'
+ - output.tags.another == 'tag2'
+ - output.policy.Statement[0].Action == 's3:GetObject'
+ - output.policy.Statement[0].Effect == 'Allow'
+ - output.policy.Statement[0].Principal == '*'
+ - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ bucket_name }}/*'
+ - output.policy.Statement[0].Sid == 'AddPerm'
+
+ # ============================================================
+ - name: 'Update bucket policy on complex bucket'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ policy: "{{ lookup('template','policy-updated.json') }}"
+ requester_pays: yes
+ versioning: yes
+ tags:
+ example: tag1
+ another: tag2
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ - output.policy.Statement[0].Action == 's3:GetObject'
+ - output.policy.Statement[0].Effect == 'Deny'
+ - output.policy.Statement[0].Principal.AWS == '*'
+ - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ bucket_name }}/*'
+ - output.policy.Statement[0].Sid == 'AddPerm'
+
+ # ============================================================
+
+ - name: 'Pause to help with s3 bucket eventual consistency'
+ wait_for:
+ timeout: 10
+ delegate_to: localhost
+
+ - name: Update attributes for s3_bucket
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ policy: "{{ lookup('template','policy.json') }}"
+ requester_pays: no
+ versioning: no
+ tags:
+ example: tag1-udpated
+ another: tag2
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ - output.name == '{{ bucket_name }}'
+ - not output.requester_pays
+ - output.versioning.MfaDelete == 'Disabled'
+ - output.versioning.Versioning in ['Suspended', 'Disabled']
+ - output.tags.example == 'tag1-udpated'
+ - output.tags.another == 'tag2'
+ - output.policy.Statement[0].Action == 's3:GetObject'
+ - output.policy.Statement[0].Effect == 'Allow'
+ - output.policy.Statement[0].Principal == '*'
+ - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ bucket_name }}/*'
+ - output.policy.Statement[0].Sid == 'AddPerm'
+
+ - name: 'Delete complex test bucket'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Re-delete complex test bucket'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+
+ # ============================================================
+ always:
+ - name: 'Ensure all buckets are deleted'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml
new file mode 100644
index 00000000..7d4e0ae9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml
@@ -0,0 +1,54 @@
+---
+- block:
+ - name: 'Ensure bucket_name contains a .'
+ set_fact:
+ bucket_name: '{{ bucket_name }}.something'
+
+ # ============================================================
+ #
+ - name: 'Create bucket with dot in name'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ - output.name == '{{ bucket_name }}'
+
+
+ # ============================================================
+
+ - name: 'Pause to help with s3 bucket eventual consistency'
+ wait_for:
+ timeout: 10
+ delegate_to: localhost
+
+ - name: 'Delete s3_bucket with dot in name'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+
+ - name: 'Re-delete s3_bucket with dot in name'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+
+ # ============================================================
+ always:
+ - name: 'Ensure all buckets are deleted'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml
new file mode 100644
index 00000000..869dd402
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml
@@ -0,0 +1,88 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+
+ # ============================================================
+
+ - name: 'Create a simple bucket'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ register: output
+
+ - name: 'Enable aws:kms encryption with KMS master key'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ encryption: "aws:kms"
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.encryption
+ - output.encryption.SSEAlgorithm == 'aws:kms'
+
+ - name: 'Re-enable aws:kms encryption with KMS master key (idempotent)'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ encryption: "aws:kms"
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - output.encryption
+ - output.encryption.SSEAlgorithm == 'aws:kms'
+
+ # ============================================================
+
+ - name: Disable encryption from bucket
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ encryption: "none"
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - not output.encryption
+
+ - name: Disable encryption from bucket
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ encryption: "none"
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - not output.encryption
+
+ # ============================================================
+
+ - name: Delete encryption test s3 bucket
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ # ============================================================
+ always:
+ - name: Ensure all buckets are deleted
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml
new file mode 100644
index 00000000..699e8ae4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml
@@ -0,0 +1,88 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+
+ # ============================================================
+
+ - name: 'Create a simple bucket'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ register: output
+
+ - name: 'Enable AES256 encryption'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ encryption: 'AES256'
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.encryption
+ - output.encryption.SSEAlgorithm == 'AES256'
+
+ - name: 'Re-enable AES256 encryption (idempotency)'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ encryption: 'AES256'
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - output.encryption
+ - output.encryption.SSEAlgorithm == 'AES256'
+
+ # ============================================================
+
+ - name: Disable encryption from bucket
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ encryption: "none"
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - not output.encryption
+
+ - name: Disable encryption from bucket
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ encryption: "none"
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - not output.encryption
+
+ # ============================================================
+
+ - name: Delete encryption test s3 bucket
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ # ============================================================
+ always:
+ - name: Ensure all buckets are deleted
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml
new file mode 100644
index 00000000..8eba03ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml
@@ -0,0 +1,20 @@
+---
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/ec2_roles/tasks/
+#
+# ###############################################################################
+
+- name: "Wrap up all tests and setup AWS credentials"
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - debug:
+ msg: "{{ inventory_hostname }} start: {{ lookup('pipe','date') }}"
+ - include_tasks: '{{ inventory_hostname }}.yml'
+ - debug:
+ msg: "{{ inventory_hostname }} finish: {{ lookup('pipe','date') }}"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml
new file mode 100644
index 00000000..4d827680
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml
@@ -0,0 +1,26 @@
+---
+- name: 'Attempt to delete non-existent buckets'
+ block:
+ # ============================================================
+ #
+ # While in theory the 'simple' test case covers this there are
+ # ways in which eventual-consistency could catch us out.
+ #
+ - name: 'Delete non-existstent s3_bucket (never created)'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output is success
+ - output is not changed
+
+ # ============================================================
+ always:
+ - name: 'Ensure all buckets are deleted'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml
new file mode 100644
index 00000000..f7bc1984
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml
@@ -0,0 +1,114 @@
+---
+- module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+
+ # ============================================================
+
+ - name: 'Create a simple bucket with public access block configuration'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ public_access:
+ block_public_acls: true
+ block_public_policy: true
+ ignore_public_acls: true
+ restrict_public_buckets: true
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.public_access_block
+ - output.public_access_block.BlockPublicAcls
+ - output.public_access_block.BlockPublicPolicy
+ - output.public_access_block.IgnorePublicAcls
+ - output.public_access_block.RestrictPublicBuckets
+
+ - name: 'Re-configure public access block configuration'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ public_access:
+ block_public_acls: true
+ block_public_policy: false
+ ignore_public_acls: true
+ restrict_public_buckets: false
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.public_access_block
+ - output.public_access_block.BlockPublicAcls
+ - not output.public_access_block.BlockPublicPolicy
+ - output.public_access_block.IgnorePublicAcls
+ - not output.public_access_block.RestrictPublicBuckets
+
+ - name: 'Re-configure public access block configuration (idempotency)'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ public_access:
+ block_public_acls: true
+ block_public_policy: false
+ ignore_public_acls: true
+ restrict_public_buckets: false
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - output.public_access_block
+ - output.public_access_block.BlockPublicAcls
+ - not output.public_access_block.BlockPublicPolicy
+ - output.public_access_block.IgnorePublicAcls
+ - not output.public_access_block.RestrictPublicBuckets
+
+ - name: 'Delete public access block configuration'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ delete_public_access: true
+ register: output
+
+ - assert:
+ that:
+ - output is changed
+ - not output.public_access_block|bool
+
+ - name: 'Delete public access block configuration (idempotency)'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ delete_public_access: true
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - not output.public_access_block|bool
+
+ # ============================================================
+
+ - name: Delete testing s3 bucket
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ # ============================================================
+ always:
+ - name: Ensure all buckets are deleted
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml
new file mode 100644
index 00000000..5b445bd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml
@@ -0,0 +1,65 @@
+---
+- name: 'Run simple tests'
+ block:
+ # Note: s3_bucket doesn't support check_mode
+
+ # ============================================================
+ - name: 'Create a simple s3_bucket'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - output is success
+ - output is changed
+ - output.name == '{{ bucket_name }}'
+ - not output.requester_pays
+ - output.public_access is undefined
+
+ # ============================================================
+ - name: 'Try to update the simple bucket with the same values'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - output is success
+ - output is not changed
+ - output.name == '{{ bucket_name }}'
+ - not output.requester_pays
+
+ # ============================================================
+ - name: 'Delete the simple s3_bucket'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output is success
+ - output is changed
+
+ # ============================================================
+ - name: 'Re-delete the simple s3_bucket (idempotency)'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output is success
+ - output is not changed
+
+ # ============================================================
+ always:
+ - name: 'Ensure all buckets are deleted'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml
new file mode 100644
index 00000000..437dd2ca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml
@@ -0,0 +1,256 @@
+---
+- name: 'Run tagging tests'
+ block:
+
+ # ============================================================
+ - name: 'Create simple s3_bucket for testing tagging'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.name == '{{ bucket_name }}'
+
+ # ============================================================
+
+ - name: 'Add tags to s3 bucket'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ tags:
+ example: tag1
+ another: tag2
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.name == '{{ bucket_name }}'
+ - output.tags.example == 'tag1'
+ - output.tags.another == 'tag2'
+
+ - name: 'Re-Add tags to s3 bucket'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ tags:
+ example: tag1
+ another: tag2
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - output.name == '{{ bucket_name }}'
+ - output.tags.example == 'tag1'
+ - output.tags.another == 'tag2'
+
+ # ============================================================
+
+ - name: Remove a tag from an s3_bucket
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ tags:
+ example: tag1
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.name == '{{ bucket_name }}'
+ - output.tags.example == 'tag1'
+ - "'another' not in output.tags"
+
+ - name: Re-remove the tag from an s3_bucket
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ tags:
+ example: tag1
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - output.name == '{{ bucket_name }}'
+ - output.tags.example == 'tag1'
+ - "'another' not in output.tags"
+
+ ## ============================================================
+
+ #- name: 'Pause to help with s3 bucket eventual consistency'
+ # wait_for:
+ # timeout: 10
+ # delegate_to: localhost
+
+ ## ============================================================
+
+ - name: 'Add a tag for s3_bucket with purge_tags False'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ purge_tags: no
+ tags:
+ anewtag: here
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.name == '{{ bucket_name }}'
+ - output.tags.example == 'tag1'
+ - output.tags.anewtag == 'here'
+
+ - name: 'Re-add a tag for s3_bucket with purge_tags False'
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ purge_tags: no
+ tags:
+ anewtag: here
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - output.name == '{{ bucket_name }}'
+ - output.tags.example == 'tag1'
+ - output.tags.anewtag == 'here'
+
+ ## ============================================================
+
+ #- name: 'Pause to help with s3 bucket eventual consistency'
+ # wait_for:
+ # timeout: 10
+ # delegate_to: localhost
+
+ ## ============================================================
+
+ - name: Update a tag for s3_bucket with purge_tags False
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ purge_tags: no
+ tags:
+ anewtag: next
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.name == '{{ bucket_name }}'
+ - output.tags.example == 'tag1'
+ - output.tags.anewtag == 'next'
+
+ - name: Re-update a tag for s3_bucket with purge_tags False
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ purge_tags: no
+ tags:
+ anewtag: next
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - output.name == '{{ bucket_name }}'
+ - output.tags.example == 'tag1'
+ - output.tags.anewtag == 'next'
+
+ ## ============================================================
+
+ #- name: 'Pause to help with s3 bucket eventual consistency'
+ # wait_for:
+ # timeout: 10
+ # delegate_to: localhost
+
+ ## ============================================================
+
+ - name: Pass empty tags dict for s3_bucket with purge_tags False
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ purge_tags: no
+ tags: {}
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - output.name == '{{ bucket_name }}'
+ - output.tags.example == 'tag1'
+ - output.tags.anewtag == 'next'
+
+ ## ============================================================
+
+ #- name: 'Pause to help with s3 bucket eventual consistency'
+ # wait_for:
+ # timeout: 10
+ # delegate_to: localhost
+
+ ## ============================================================
+
+ - name: Do not specify any tag to ensure previous tags are not removed
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ register: output
+
+ - assert:
+ that:
+ - not output.changed
+ - output.name == '{{ bucket_name }}'
+ - output.tags.example == 'tag1'
+
+ # ============================================================
+
+ - name: Remove all tags
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ tags: {}
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+ - output.name == '{{ bucket_name }}'
+ - output.tags == {}
+
+ - name: Re-remove all tags
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: present
+ tags: {}
+ register: output
+
+ - assert:
+ that:
+ - output is not changed
+ - output.name == '{{ bucket_name }}'
+ - output.tags == {}
+
+ # ============================================================
+
+ - name: Delete bucket
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ register: output
+
+ - assert:
+ that:
+ - output.changed
+
+ # ============================================================
+ always:
+ - name: Ensure all buckets are deleted
+ s3_bucket:
+ name: '{{ bucket_name }}'
+ state: absent
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json
new file mode 100644
index 00000000..23aec6fb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json
@@ -0,0 +1,12 @@
+{
+ "Version":"2012-10-17",
+ "Statement":[
+ {
+ "Sid":"AddPerm",
+ "Effect":"Deny",
+ "Principal": {"AWS": "*"},
+ "Action":["s3:GetObject"],
+ "Resource":["arn:aws:s3:::{{bucket_name}}/*"]
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json
new file mode 100644
index 00000000..a2720aed
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json
@@ -0,0 +1,12 @@
+{
+ "Version":"2012-10-17",
+ "Statement":[
+ {
+ "Sid":"AddPerm",
+ "Effect":"Allow",
+ "Principal": "*",
+ "Action":["s3:GetObject"],
+ "Resource":["arn:aws:s3:::{{bucket_name}}/*"]
+ }
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/runme.sh b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/runme.sh
new file mode 100755
index 00000000..aa324772
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/runme.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+#
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
+
+
+set -eux
+
+export ANSIBLE_ROLES_PATH=../
+
+ansible-playbook main.yml -i inventory "$@"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2/defaults/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2/defaults/main.yml
new file mode 100644
index 00000000..fb1f88b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+resource_prefix: 'ansible-testing-'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2/tasks/common.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2/tasks/common.yml
new file mode 100644
index 00000000..bf23f539
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2/tasks/common.yml
@@ -0,0 +1,119 @@
+---
+
+# ============================================================
+- name: test with no parameters
+ action: "{{module_name}}"
+ register: result
+ ignore_errors: true
+
+- name: assert failure when called with no parameters
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "missing required arguments: name"'
+
+# ============================================================
+- name: test with only name
+ action: "{{module_name}} name={{ec2_key_name}}"
+ register: result
+ ignore_errors: true
+
+- name: assert failure when called with only 'name'
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "Either region or ec2_url must be specified"'
+
+# ============================================================
+- name: test invalid region parameter
+ action: "{{module_name}} name='{{ec2_key_name}}' region='asdf querty 1234'"
+ register: result
+ ignore_errors: true
+
+- name: assert invalid region parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("value of region must be one of:")'
+
+# ============================================================
+- name: test valid region parameter
+ action: "{{module_name}} name='{{ec2_key_name}}' region='{{ec2_region}}'"
+ register: result
+ ignore_errors: true
+
+- name: assert valid region parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+# ============================================================
+- name: test environment variable EC2_REGION
+ action: "{{module_name}} name='{{ec2_key_name}}'"
+ environment:
+ EC2_REGION: '{{ec2_region}}'
+ register: result
+ ignore_errors: true
+
+- name: assert environment variable EC2_REGION
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+# ============================================================
+- name: test invalid ec2_url parameter
+ action: "{{module_name}} name='{{ec2_key_name}}'"
+ environment:
+ EC2_URL: bogus.example.com
+ register: result
+ ignore_errors: true
+
+- name: assert invalid ec2_url parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+# ============================================================
+- name: test valid ec2_url parameter
+ action: "{{module_name}} name='{{ec2_key_name}}'"
+ environment:
+ EC2_URL: '{{ec2_url}}'
+ register: result
+ ignore_errors: true
+
+- name: assert valid ec2_url parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+# ============================================================
+- name: test credentials from environment
+ action: "{{module_name}} name='{{ec2_key_name}}'"
+ environment:
+ EC2_REGION: '{{ec2_region}}'
+ EC2_ACCESS_KEY: bogus_access_key
+ EC2_SECRET_KEY: bogus_secret_key
+ register: result
+ ignore_errors: true
+
+- name: assert ec2_key with valid ec2_url
+ assert:
+ that:
+ - 'result.failed'
+ - '"EC2ResponseError: 401 Unauthorized" in result.msg'
+
+# ============================================================
+- name: test credential parameters
+ action: "{{module_name}} name='{{ec2_key_name}}' ec2_region='{{ec2_region}}' ec2_access_key=bogus_access_key ec2_secret_key=bogus_secret_key"
+ register: result
+ ignore_errors: true
+
+- name: assert credential parameters
+ assert:
+ that:
+ - 'result.failed'
+ - '"EC2ResponseError: 401 Unauthorized" in result.msg'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2/vars/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2/vars/main.yml
new file mode 100644
index 00000000..3d7209ef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2/vars/main.yml
@@ -0,0 +1,3 @@
+---
+ec2_url: ec2.amazonaws.com
+ec2_region: us-east-1
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml
new file mode 100644
index 00000000..229037c8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml
@@ -0,0 +1,5 @@
+- name: delete temporary directory
+ include_tasks: default-cleanup.yml
+
+- name: delete temporary directory (windows)
+ include_tasks: windows-cleanup.yml
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml
new file mode 100644
index 00000000..39872d74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml
@@ -0,0 +1,5 @@
+- name: delete temporary directory
+ file:
+ path: "{{ remote_tmp_dir }}"
+ state: absent
+ no_log: yes
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml
new file mode 100644
index 00000000..1e0f51b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml
@@ -0,0 +1,11 @@
+- name: create temporary directory
+ tempfile:
+ state: directory
+ suffix: .test
+ register: remote_tmp_dir
+ notify:
+ - delete temporary directory
+
+- name: record temporary directory
+ set_fact:
+ remote_tmp_dir: "{{ remote_tmp_dir.path }}"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml
new file mode 100644
index 00000000..f8df391b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml
@@ -0,0 +1,10 @@
+- name: make sure we have the ansible_os_family and ansible_distribution_version facts
+ setup:
+ gather_subset: distribution
+ when: ansible_facts == {}
+
+- include_tasks: "{{ lookup('first_found', files)}}"
+ vars:
+ files:
+ - "{{ ansible_os_family | lower }}.yml"
+ - "default.yml"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml
new file mode 100644
index 00000000..32f372d0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml
@@ -0,0 +1,4 @@
+- name: delete temporary directory (windows)
+ ansible.windows.win_file:
+ path: '{{ remote_tmp_dir }}'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml
new file mode 100644
index 00000000..317c146d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml
@@ -0,0 +1,10 @@
+- name: create temporary directory
+ register: remote_tmp_dir
+ notify:
+ - delete temporary directory (windows)
+ ansible.windows.win_tempfile:
+ state: directory
+ suffix: .test
+- name: record temporary directory
+ set_fact:
+ remote_tmp_dir: '{{ remote_tmp_dir.path }}'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/tasks/main.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/tasks/main.yml
new file mode 100644
index 00000000..18c571b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/tasks/main.yml
@@ -0,0 +1,55 @@
+# (c) 2014, James Laska <jlaska@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: create a temp file
+ tempfile:
+ state: file
+ register: sshkey_file
+ tags:
+ - prepare
+
+- name: generate sshkey
+ shell: echo 'y' | ssh-keygen -P '' -f {{ sshkey_file.path }}
+ tags:
+ - prepare
+
+- name: create another temp file
+ tempfile:
+ state: file
+ register: another_sshkey_file
+ tags:
+ - prepare
+
+- name: generate another_sshkey
+ shell: echo 'y' | ssh-keygen -P '' -f {{ another_sshkey_file.path }}
+ tags:
+ - prepare
+
+- name: record fingerprint
+ shell: openssl rsa -in {{ sshkey_file.path }} -pubout -outform DER 2>/dev/null | openssl md5 -c
+ register: fingerprint
+ tags:
+ - prepare
+
+- name: set facts for future roles
+ set_fact:
+ sshkey: '{{ sshkey_file.path }}'
+ key_material: "{{ lookup('file', sshkey_file.path ~ '.pub') }}"
+ another_key_material: "{{ lookup('file', another_sshkey_file.path ~ '.pub') }}"
+ fingerprint: '{{ fingerprint.stdout.split()[1] }}'
+ tags:
+ - prepare
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/requirements.yml b/collections-debian-merged/ansible_collections/amazon/aws/tests/requirements.yml
new file mode 100644
index 00000000..3e967b19
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/requirements.yml
@@ -0,0 +1,4 @@
+integration_tests_dependencies:
+- ansible.windows
+- community.general
+unit_tests_dependencies: []
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/sanity/ignore-2.10.txt b/collections-debian-merged/ansible_collections/amazon/aws/tests/sanity/ignore-2.10.txt
new file mode 100644
index 00000000..6536b1ab
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/sanity/ignore-2.10.txt
@@ -0,0 +1,4 @@
+plugins/modules/ec2_tag.py validate-modules:parameter-state-invalid-choice
+plugins/modules/ec2_vol.py validate-modules:parameter-state-invalid-choice
+tests/utils/shippable/check_matrix.py replace-urlopen
+tests/utils/shippable/timing.py shebang \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/sanity/ignore-2.11.txt b/collections-debian-merged/ansible_collections/amazon/aws/tests/sanity/ignore-2.11.txt
new file mode 100644
index 00000000..6536b1ab
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/sanity/ignore-2.11.txt
@@ -0,0 +1,4 @@
+plugins/modules/ec2_tag.py validate-modules:parameter-state-invalid-choice
+plugins/modules/ec2_vol.py validate-modules:parameter-state-invalid-choice
+tests/utils/shippable/check_matrix.py replace-urlopen
+tests/utils/shippable/timing.py shebang \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/sanity/ignore-2.9.txt b/collections-debian-merged/ansible_collections/amazon/aws/tests/sanity/ignore-2.9.txt
new file mode 100644
index 00000000..b7cfe373
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/sanity/ignore-2.9.txt
@@ -0,0 +1,17 @@
+plugins/modules/aws_az_info.py pylint:ansible-deprecated-no-version
+plugins/modules/aws_caller_info.py pylint:ansible-deprecated-no-version
+plugins/modules/cloudformation_info.py pylint:ansible-deprecated-no-version
+plugins/modules/ec2.py pylint:ansible-deprecated-no-version
+plugins/modules/ec2_ami_info.py pylint:ansible-deprecated-no-version
+plugins/modules/ec2_eni_info.py pylint:ansible-deprecated-no-version
+plugins/modules/ec2_group_info.py pylint:ansible-deprecated-no-version
+plugins/modules/ec2_snapshot_info.py pylint:ansible-deprecated-no-version
+plugins/modules/ec2_tag.py pylint:ansible-deprecated-no-version
+plugins/modules/ec2_vol.py pylint:ansible-deprecated-no-version
+plugins/modules/ec2_vol_info.py pylint:ansible-deprecated-no-version
+plugins/modules/ec2_vpc_dhcp_option_info.py pylint:ansible-deprecated-no-version
+plugins/modules/ec2_vpc_net_info.py pylint:ansible-deprecated-no-version
+plugins/modules/ec2_vpc_subnet_info.py pylint:ansible-deprecated-no-version
+plugins/module_utils/ec2.py pylint:ansible-deprecated-no-version
+tests/utils/shippable/check_matrix.py replace-urlopen
+tests/utils/shippable/timing.py shebang
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/compat/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/compat/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/compat/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/compat/builtins.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/compat/builtins.py
new file mode 100644
index 00000000..349d310e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/compat/builtins.py
@@ -0,0 +1,33 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+#
+# Compat for python2.7
+#
+
+# One unittest needs to import builtins via __import__() so we need to have
+# the string that represents it
+try:
+ import __builtin__ # pylint: disable=unused-import
+except ImportError:
+ BUILTINS = 'builtins'
+else:
+ BUILTINS = '__builtin__'
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/compat/mock.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/compat/mock.py
new file mode 100644
index 00000000..0972cd2e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/compat/mock.py
@@ -0,0 +1,122 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python3.x's unittest.mock module
+'''
+import sys
+
+# Python 2.7
+
+# Note: Could use the pypi mock library on python3.x as well as python2.x. It
+# is the same as the python3 stdlib mock library
+
+try:
+ # Allow wildcard import because we really do want to import all of mock's
+ # symbols into this compat shim
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ from unittest.mock import *
+except ImportError:
+ # Python 2
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ try:
+ from mock import *
+ except ImportError:
+ print('You need the mock library installed on python2.x to run tests')
+
+
+# Prior to 3.4.4, mock_open cannot handle binary read_data
+if sys.version_info >= (3,) and sys.version_info < (3, 4, 4):
+ file_spec = None
+
+ def _iterate_read_data(read_data):
+ # Helper for mock_open:
+ # Retrieve lines from read_data via a generator so that separate calls to
+ # readline, read, and readlines are properly interleaved
+ sep = b'\n' if isinstance(read_data, bytes) else '\n'
+ data_as_list = [l + sep for l in read_data.split(sep)]
+
+ if data_as_list[-1] == sep:
+ # If the last line ended in a newline, the list comprehension will have an
+ # extra entry that's just a newline. Remove this.
+ data_as_list = data_as_list[:-1]
+ else:
+ # If there wasn't an extra newline by itself, then the file being
+ # emulated doesn't have a newline to end the last line remove the
+ # newline that our naive format() added
+ data_as_list[-1] = data_as_list[-1][:-1]
+
+ for line in data_as_list:
+ yield line
+
+ def mock_open(mock=None, read_data=''):
+ """
+ A helper function to create a mock to replace the use of `open`. It works
+ for `open` called directly or used as a context manager.
+
+ The `mock` argument is the mock object to configure. If `None` (the
+ default) then a `MagicMock` will be created for you, with the API limited
+ to methods or attributes available on standard file handles.
+
+ `read_data` is a string for the `read` methoddline`, and `readlines` of the
+ file handle to return. This is an empty string by default.
+ """
+ def _readlines_side_effect(*args, **kwargs):
+ if handle.readlines.return_value is not None:
+ return handle.readlines.return_value
+ return list(_data)
+
+ def _read_side_effect(*args, **kwargs):
+ if handle.read.return_value is not None:
+ return handle.read.return_value
+ return type(read_data)().join(_data)
+
+ def _readline_side_effect():
+ if handle.readline.return_value is not None:
+ while True:
+ yield handle.readline.return_value
+ for line in _data:
+ yield line
+
+ global file_spec
+ if file_spec is None:
+ import _io
+ file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
+
+ if mock is None:
+ mock = MagicMock(name='open', spec=open)
+
+ handle = MagicMock(spec=file_spec)
+ handle.__enter__.return_value = handle
+
+ _data = _iterate_read_data(read_data)
+
+ handle.write.return_value = None
+ handle.read.return_value = None
+ handle.readline.return_value = None
+ handle.readlines.return_value = None
+
+ handle.read.side_effect = _read_side_effect
+ handle.readline.side_effect = _readline_side_effect()
+ handle.readlines.side_effect = _readlines_side_effect
+
+ mock.return_value = handle
+ return mock
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/compat/unittest.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/compat/unittest.py
new file mode 100644
index 00000000..98f08ad6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/compat/unittest.py
@@ -0,0 +1,38 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python2.7's unittest module
+'''
+
+import sys
+
+# Allow wildcard import because we really do want to import all of
+# unittests's symbols into this compat shim
+# pylint: disable=wildcard-import,unused-wildcard-import
+if sys.version_info < (2, 7):
+ try:
+ # Need unittest2 on python2.6
+ from unittest2 import *
+ except ImportError:
+ print('You need unittest2 installed on python2.6.x to run tests')
+else:
+ from unittest import *
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/loader.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/loader.py
new file mode 100644
index 00000000..0ee47fbb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/loader.py
@@ -0,0 +1,116 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors import AnsibleParserError
+from ansible.parsing.dataloader import DataLoader
+from ansible.module_utils._text import to_bytes, to_text
+
+
+class DictDataLoader(DataLoader):
+
+ def __init__(self, file_mapping=None):
+ file_mapping = {} if file_mapping is None else file_mapping
+ assert type(file_mapping) == dict
+
+ super(DictDataLoader, self).__init__()
+
+ self._file_mapping = file_mapping
+ self._build_known_directories()
+ self._vault_secrets = None
+
+ def load_from_file(self, path, cache=True, unsafe=False):
+ path = to_text(path)
+ if path in self._file_mapping:
+ return self.load(self._file_mapping[path], path)
+ return None
+
+ # TODO: the real _get_file_contents returns a bytestring, so we actually convert the
+ # unicode/text it's created with to utf-8
+ def _get_file_contents(self, path):
+ path = to_text(path)
+ if path in self._file_mapping:
+ return (to_bytes(self._file_mapping[path]), False)
+ else:
+ raise AnsibleParserError("file not found: %s" % path)
+
+ def path_exists(self, path):
+ path = to_text(path)
+ return path in self._file_mapping or path in self._known_directories
+
+ def is_file(self, path):
+ path = to_text(path)
+ return path in self._file_mapping
+
+ def is_directory(self, path):
+ path = to_text(path)
+ return path in self._known_directories
+
+ def list_directory(self, path):
+ ret = []
+ path = to_text(path)
+ for x in (list(self._file_mapping.keys()) + self._known_directories):
+ if x.startswith(path):
+ if os.path.dirname(x) == path:
+ ret.append(os.path.basename(x))
+ return ret
+
+ def is_executable(self, path):
+ # FIXME: figure out a way to make paths return true for this
+ return False
+
+ def _add_known_directory(self, directory):
+ if directory not in self._known_directories:
+ self._known_directories.append(directory)
+
+ def _build_known_directories(self):
+ self._known_directories = []
+ for path in self._file_mapping:
+ dirname = os.path.dirname(path)
+ while dirname not in ('/', ''):
+ self._add_known_directory(dirname)
+ dirname = os.path.dirname(dirname)
+
+ def push(self, path, content):
+ rebuild_dirs = False
+ if path not in self._file_mapping:
+ rebuild_dirs = True
+
+ self._file_mapping[path] = content
+
+ if rebuild_dirs:
+ self._build_known_directories()
+
+ def pop(self, path):
+ if path in self._file_mapping:
+ del self._file_mapping[path]
+ self._build_known_directories()
+
+ def clear(self):
+ self._file_mapping = dict()
+ self._known_directories = []
+
+ def get_basedir(self):
+ return os.getcwd()
+
+ def set_vault_secrets(self, vault_secrets):
+ self._vault_secrets = vault_secrets
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/path.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/path.py
new file mode 100644
index 00000000..8de2aec2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/path.py
@@ -0,0 +1,8 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.amazon.aws.tests.unit.compat.mock import MagicMock
+from ansible.utils.path import unfrackpath
+
+
+mock_unfrackpath_noop = MagicMock(spec_set=unfrackpath, side_effect=lambda x, *args, **kwargs: x)
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/procenv.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/procenv.py
new file mode 100644
index 00000000..273959e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/procenv.py
@@ -0,0 +1,90 @@
+# (c) 2016, Matt Davis <mdavis@ansible.com>
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import json
+
+from contextlib import contextmanager
+from io import BytesIO, StringIO
+from ansible_collections.amazon.aws.tests.unit.compat import unittest
+from ansible.module_utils.six import PY3
+from ansible.module_utils._text import to_bytes
+
+
+@contextmanager
+def swap_stdin_and_argv(stdin_data='', argv_data=tuple()):
+ """
+ context manager that temporarily masks the test runner's values for stdin and argv
+ """
+ real_stdin = sys.stdin
+ real_argv = sys.argv
+
+ if PY3:
+ fake_stream = StringIO(stdin_data)
+ fake_stream.buffer = BytesIO(to_bytes(stdin_data))
+ else:
+ fake_stream = BytesIO(to_bytes(stdin_data))
+
+ try:
+ sys.stdin = fake_stream
+ sys.argv = argv_data
+
+ yield
+ finally:
+ sys.stdin = real_stdin
+ sys.argv = real_argv
+
+
+@contextmanager
+def swap_stdout():
+ """
+ context manager that temporarily replaces stdout for tests that need to verify output
+ """
+ old_stdout = sys.stdout
+
+ if PY3:
+ fake_stream = StringIO()
+ else:
+ fake_stream = BytesIO()
+
+ try:
+ sys.stdout = fake_stream
+
+ yield fake_stream
+ finally:
+ sys.stdout = old_stdout
+
+
+class ModuleTestCase(unittest.TestCase):
+ def setUp(self, module_args=None):
+ if module_args is None:
+ module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False}
+
+ args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args))
+
+ # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
+ self.stdin_swap = swap_stdin_and_argv(stdin_data=args)
+ self.stdin_swap.__enter__()
+
+ def tearDown(self):
+ # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
+ self.stdin_swap.__exit__(None, None, None)
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/vault_helper.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/vault_helper.py
new file mode 100644
index 00000000..dcce9c78
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/vault_helper.py
@@ -0,0 +1,39 @@
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils._text import to_bytes
+
+from ansible.parsing.vault import VaultSecret
+
+
+class TextVaultSecret(VaultSecret):
+ '''A secret piece of text. ie, a password. Tracks text encoding.
+
+ The text encoding of the text may not be the default text encoding so
+ we keep track of the encoding so we encode it to the same bytes.'''
+
+ def __init__(self, text, encoding=None, errors=None, _bytes=None):
+ super(TextVaultSecret, self).__init__()
+ self.text = text
+ self.encoding = encoding or 'utf-8'
+ self._bytes = _bytes
+ self.errors = errors or 'strict'
+
+ @property
+ def bytes(self):
+ '''The text encoded with encoding, unless we specifically set _bytes.'''
+ return self._bytes or to_bytes(self.text, encoding=self.encoding, errors=self.errors)
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/yaml_helper.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/yaml_helper.py
new file mode 100644
index 00000000..1ef17215
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/mock/yaml_helper.py
@@ -0,0 +1,124 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import io
+import yaml
+
+from ansible.module_utils.six import PY3
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.parsing.yaml.dumper import AnsibleDumper
+
+
+class YamlTestUtils(object):
+ """Mixin class to combine with a unittest.TestCase subclass."""
+ def _loader(self, stream):
+ """Vault related tests will want to override this.
+
+ Vault cases should setup a AnsibleLoader that has the vault password."""
+ return AnsibleLoader(stream)
+
+ def _dump_stream(self, obj, stream, dumper=None):
+ """Dump to a py2-unicode or py3-string stream."""
+ if PY3:
+ return yaml.dump(obj, stream, Dumper=dumper)
+ else:
+ return yaml.dump(obj, stream, Dumper=dumper, encoding=None)
+
+ def _dump_string(self, obj, dumper=None):
+ """Dump to a py2-unicode or py3-string"""
+ if PY3:
+ return yaml.dump(obj, Dumper=dumper)
+ else:
+ return yaml.dump(obj, Dumper=dumper, encoding=None)
+
+ def _dump_load_cycle(self, obj):
+ # Each pass though a dump or load revs the 'generation'
+ # obj to yaml string
+ string_from_object_dump = self._dump_string(obj, dumper=AnsibleDumper)
+
+ # wrap a stream/file like StringIO around that yaml
+ stream_from_object_dump = io.StringIO(string_from_object_dump)
+ loader = self._loader(stream_from_object_dump)
+ # load the yaml stream to create a new instance of the object (gen 2)
+ obj_2 = loader.get_data()
+
+ # dump the gen 2 objects directory to strings
+ string_from_object_dump_2 = self._dump_string(obj_2,
+ dumper=AnsibleDumper)
+
+ # The gen 1 and gen 2 yaml strings
+ self.assertEqual(string_from_object_dump, string_from_object_dump_2)
+ # the gen 1 (orig) and gen 2 py object
+ self.assertEqual(obj, obj_2)
+
+ # again! gen 3... load strings into py objects
+ stream_3 = io.StringIO(string_from_object_dump_2)
+ loader_3 = self._loader(stream_3)
+ obj_3 = loader_3.get_data()
+
+ string_from_object_dump_3 = self._dump_string(obj_3, dumper=AnsibleDumper)
+
+ self.assertEqual(obj, obj_3)
+ # should be transitive, but...
+ self.assertEqual(obj_2, obj_3)
+ self.assertEqual(string_from_object_dump, string_from_object_dump_3)
+
+ def _old_dump_load_cycle(self, obj):
+ '''Dump the passed in object to yaml, load it back up, dump again, compare.'''
+ stream = io.StringIO()
+
+ yaml_string = self._dump_string(obj, dumper=AnsibleDumper)
+ self._dump_stream(obj, stream, dumper=AnsibleDumper)
+
+ yaml_string_from_stream = stream.getvalue()
+
+ # reset stream
+ stream.seek(0)
+
+ loader = self._loader(stream)
+ # loader = AnsibleLoader(stream, vault_password=self.vault_password)
+ obj_from_stream = loader.get_data()
+
+ stream_from_string = io.StringIO(yaml_string)
+ loader2 = self._loader(stream_from_string)
+ # loader2 = AnsibleLoader(stream_from_string, vault_password=self.vault_password)
+ obj_from_string = loader2.get_data()
+
+ stream_obj_from_stream = io.StringIO()
+ stream_obj_from_string = io.StringIO()
+
+ if PY3:
+ yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper)
+ yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper)
+ else:
+ yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper, encoding=None)
+ yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper, encoding=None)
+
+ yaml_string_stream_obj_from_stream = stream_obj_from_stream.getvalue()
+ yaml_string_stream_obj_from_string = stream_obj_from_string.getvalue()
+
+ stream_obj_from_stream.seek(0)
+ stream_obj_from_string.seek(0)
+
+ if PY3:
+ yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper)
+ yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper)
+ else:
+ yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper, encoding=None)
+ yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper, encoding=None)
+
+ assert yaml_string == yaml_string_obj_from_stream
+ assert yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string
+ assert (yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string == yaml_string_stream_obj_from_stream ==
+ yaml_string_stream_obj_from_string)
+ assert obj == obj_from_stream
+ assert obj == obj_from_string
+ assert obj == yaml_string_obj_from_stream
+ assert obj == yaml_string_obj_from_string
+ assert obj == obj_from_stream == obj_from_string == yaml_string_obj_from_stream == yaml_string_obj_from_string
+ return {'obj': obj,
+ 'yaml_string': yaml_string,
+ 'yaml_string_from_stream': yaml_string_from_stream,
+ 'obj_from_stream': obj_from_stream,
+ 'obj_from_string': obj_from_string,
+ 'yaml_string_obj_from_string': yaml_string_obj_from_string}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/conftest.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/conftest.py
new file mode 100644
index 00000000..8bc13c4d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/conftest.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+from io import BytesIO
+
+import pytest
+
+import ansible.module_utils.basic
+from ansible.module_utils.six import PY3, string_types
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+
+@pytest.fixture
+def stdin(mocker, request):
+ old_args = ansible.module_utils.basic._ANSIBLE_ARGS
+ ansible.module_utils.basic._ANSIBLE_ARGS = None
+ old_argv = sys.argv
+ sys.argv = ['ansible_unittest']
+
+ if isinstance(request.param, string_types):
+ args = request.param
+ elif isinstance(request.param, MutableMapping):
+ if 'ANSIBLE_MODULE_ARGS' not in request.param:
+ request.param = {'ANSIBLE_MODULE_ARGS': request.param}
+ if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp'
+ if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False
+ args = json.dumps(request.param)
+ else:
+ raise Exception('Malformed data to the stdin pytest fixture')
+
+ fake_stdin = BytesIO(to_bytes(args, errors='surrogate_or_strict'))
+ if PY3:
+ mocker.patch('ansible.module_utils.basic.sys.stdin', mocker.MagicMock())
+ mocker.patch('ansible.module_utils.basic.sys.stdin.buffer', fake_stdin)
+ else:
+ mocker.patch('ansible.module_utils.basic.sys.stdin', fake_stdin)
+
+ yield fake_stdin
+
+ ansible.module_utils.basic._ANSIBLE_ARGS = old_args
+ sys.argv = old_argv
+
+
+@pytest.fixture
+def am(stdin, request):
+ old_args = ansible.module_utils.basic._ANSIBLE_ARGS
+ ansible.module_utils.basic._ANSIBLE_ARGS = None
+ old_argv = sys.argv
+ sys.argv = ['ansible_unittest']
+
+ argspec = {}
+ if hasattr(request, 'param'):
+ if isinstance(request.param, dict):
+ argspec = request.param
+
+ am = ansible.module_utils.basic.AnsibleModule(
+ argument_spec=argspec,
+ )
+ am._name = 'ansible_unittest'
+
+ yield am
+
+ ansible.module_utils.basic._ANSIBLE_ARGS = old_args
+ sys.argv = old_argv
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/ansible_aws_module/test_fail_json_aws.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/ansible_aws_module/test_fail_json_aws.py
new file mode 100644
index 00000000..c7e53afc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/ansible_aws_module/test_fail_json_aws.py
@@ -0,0 +1,321 @@
+# (c) 2020 Red Hat Inc.
+#
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+import botocore
+import boto3
+import json
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
+
+
+class TestFailJsonAws(object):
+ # ========================================================
+ # Prepare some data for use in our testing
+ # ========================================================
+ def setup_method(self):
+ # Basic information that ClientError needs to spawn off an error
+ self.EXAMPLE_EXCEPTION_DATA = {
+ "Error": {
+ "Code": "InvalidParameterValue",
+ "Message": "The filter 'exampleFilter' is invalid"
+ },
+ "ResponseMetadata": {
+ "RequestId": "01234567-89ab-cdef-0123-456789abcdef",
+ "HTTPStatusCode": 400,
+ "HTTPHeaders": {
+ "transfer-encoding": "chunked",
+ "date": "Fri, 13 Nov 2020 00:00:00 GMT",
+ "connection": "close",
+ "server": "AmazonEC2"
+ },
+ "RetryAttempts": 0
+ }
+ }
+ self.CAMEL_RESPONSE = camel_dict_to_snake_dict(self.EXAMPLE_EXCEPTION_DATA.get("ResponseMetadata"))
+ self.CAMEL_ERROR = camel_dict_to_snake_dict(self.EXAMPLE_EXCEPTION_DATA.get("Error"))
+ # ClientError(EXAMPLE_EXCEPTION_DATA, "testCall") will generate this
+ self.EXAMPLE_MSG = "An error occurred (InvalidParameterValue) when calling the testCall operation: The filter 'exampleFilter' is invalid"
+ self.DEFAULT_CORE_MSG = "An unspecified error occurred"
+ self.FAIL_MSG = "I Failed!"
+
+ # ========================================================
+ # Passing fail_json_aws nothing more than a ClientError
+ # ========================================================
+ @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"])
+ def test_fail_client_minimal(self, monkeypatch, stdin, capfd):
+ monkeypatch.setattr(botocore, "__version__", "1.2.3")
+ monkeypatch.setattr(boto3, "__version__", "1.2.4")
+
+ # Create a minimal module that we can call
+ module = AnsibleAWSModule(argument_spec=dict())
+ try:
+ raise botocore.exceptions.ClientError(self.EXAMPLE_EXCEPTION_DATA, "testCall")
+ except botocore.exceptions.ClientError as e:
+ with pytest.raises(SystemExit) as ctx:
+ module.fail_json_aws(e)
+ assert ctx.value.code == 1
+ out, err = capfd.readouterr()
+ return_val = json.loads(out)
+
+ assert return_val.get("msg") == self.EXAMPLE_MSG
+ assert return_val.get("boto3_version") == "1.2.4"
+ assert return_val.get("botocore_version") == "1.2.3"
+ assert return_val.get("exception") is not None
+ assert return_val.get("failed")
+ assert return_val.get("response_metadata") == self.CAMEL_RESPONSE
+ assert return_val.get("error") == self.CAMEL_ERROR
+
+ # ========================================================
+ # Passing fail_json_aws a ClientError and a message
+ # ========================================================
+ @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"])
+ def test_fail_client_msg(self, monkeypatch, stdin, capfd):
+ monkeypatch.setattr(botocore, "__version__", "1.2.3")
+ monkeypatch.setattr(boto3, "__version__", "1.2.4")
+
+ # Create a minimal module that we can call
+ module = AnsibleAWSModule(argument_spec=dict())
+ try:
+ raise botocore.exceptions.ClientError(self.EXAMPLE_EXCEPTION_DATA, "testCall")
+ except botocore.exceptions.ClientError as e:
+ with pytest.raises(SystemExit) as ctx:
+ module.fail_json_aws(e, msg=self.FAIL_MSG)
+ assert ctx.value.code == 1
+ out, err = capfd.readouterr()
+ return_val = json.loads(out)
+
+ assert return_val.get("msg") == self.FAIL_MSG + ": " + self.EXAMPLE_MSG
+ assert return_val.get("boto3_version") == "1.2.4"
+ assert return_val.get("botocore_version") == "1.2.3"
+ assert return_val.get("exception") is not None
+ assert return_val.get("failed")
+ assert return_val.get("response_metadata") == self.CAMEL_RESPONSE
+ assert return_val.get("error") == self.CAMEL_ERROR
+
+ # ========================================================
+ # Passing fail_json_aws a ClientError and a message as a positional argument
+ # ========================================================
+ @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"])
+ def test_fail_client_positional_msg(self, monkeypatch, stdin, capfd):
+ monkeypatch.setattr(botocore, "__version__", "1.2.3")
+ monkeypatch.setattr(boto3, "__version__", "1.2.4")
+
+ # Create a minimal module that we can call
+ module = AnsibleAWSModule(argument_spec=dict())
+ try:
+ raise botocore.exceptions.ClientError(self.EXAMPLE_EXCEPTION_DATA, "testCall")
+ except botocore.exceptions.ClientError as e:
+ with pytest.raises(SystemExit) as ctx:
+ module.fail_json_aws(e, self.FAIL_MSG)
+ assert ctx.value.code == 1
+ out, err = capfd.readouterr()
+ return_val = json.loads(out)
+
+ assert return_val.get("msg") == self.FAIL_MSG + ": " + self.EXAMPLE_MSG
+ assert return_val.get("boto3_version") == "1.2.4"
+ assert return_val.get("botocore_version") == "1.2.3"
+ assert return_val.get("exception") is not None
+ assert return_val.get("failed")
+ assert return_val.get("response_metadata") == self.CAMEL_RESPONSE
+ assert return_val.get("error") == self.CAMEL_ERROR
+
+ # ========================================================
+ # Passing fail_json_aws a ClientError and an arbitrary key
+ # ========================================================
+ @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"])
+ def test_fail_client_key(self, monkeypatch, stdin, capfd):
+ monkeypatch.setattr(botocore, "__version__", "1.2.3")
+ monkeypatch.setattr(boto3, "__version__", "1.2.4")
+
+ # Create a minimal module that we can call
+ module = AnsibleAWSModule(argument_spec=dict())
+ try:
+ raise botocore.exceptions.ClientError(self.EXAMPLE_EXCEPTION_DATA, "testCall")
+ except botocore.exceptions.ClientError as e:
+ with pytest.raises(SystemExit) as ctx:
+ module.fail_json_aws(e, extra_key="Some Value")
+ assert ctx.value.code == 1
+ out, err = capfd.readouterr()
+ return_val = json.loads(out)
+
+ assert return_val.get("msg") == self.EXAMPLE_MSG
+ assert return_val.get("extra_key") == "Some Value"
+ assert return_val.get("boto3_version") == "1.2.4"
+ assert return_val.get("botocore_version") == "1.2.3"
+ assert return_val.get("exception") is not None
+ assert return_val.get("failed")
+ assert return_val.get("response_metadata") == self.CAMEL_RESPONSE
+ assert return_val.get("error") == self.CAMEL_ERROR
+
+ # ========================================================
+ # Passing fail_json_aws a ClientError, and arbitraty key and a message
+ # ========================================================
+ @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"])
+ def test_fail_client_msg_and_key(self, monkeypatch, stdin, capfd):
+ monkeypatch.setattr(botocore, "__version__", "1.2.3")
+ monkeypatch.setattr(boto3, "__version__", "1.2.4")
+
+ # Create a minimal module that we can call
+ module = AnsibleAWSModule(argument_spec=dict())
+ try:
+ raise botocore.exceptions.ClientError(self.EXAMPLE_EXCEPTION_DATA, "testCall")
+ except botocore.exceptions.ClientError as e:
+ with pytest.raises(SystemExit) as ctx:
+ module.fail_json_aws(e, extra_key="Some Value", msg=self.FAIL_MSG)
+ assert ctx.value.code == 1
+ out, err = capfd.readouterr()
+ return_val = json.loads(out)
+
+ assert return_val.get("msg") == self.FAIL_MSG + ": " + self.EXAMPLE_MSG
+ assert return_val.get("extra_key") == "Some Value"
+ assert return_val.get("boto3_version") == "1.2.4"
+ assert return_val.get("botocore_version") == "1.2.3"
+ assert return_val.get("exception") is not None
+ assert return_val.get("failed")
+ assert return_val.get("response_metadata") == self.CAMEL_RESPONSE
+ assert return_val.get("error") == self.CAMEL_ERROR
+
+ # ========================================================
+ # Passing fail_json_aws nothing more than a BotoCoreError
+ # ========================================================
+ @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"])
+ def test_fail_botocore_minimal(self, monkeypatch, stdin, capfd):
+ monkeypatch.setattr(botocore, "__version__", "1.2.3")
+ monkeypatch.setattr(boto3, "__version__", "1.2.4")
+
+ # Create a minimal module that we can call
+ module = AnsibleAWSModule(argument_spec=dict())
+ try:
+ raise botocore.exceptions.BotoCoreError()
+ except botocore.exceptions.BotoCoreError as e:
+ with pytest.raises(SystemExit) as ctx:
+ module.fail_json_aws(e)
+ assert ctx.value.code == 1
+ out, err = capfd.readouterr()
+ return_val = json.loads(out)
+
+ assert return_val.get("msg") == self.DEFAULT_CORE_MSG
+ assert return_val.get("boto3_version") == "1.2.4"
+ assert return_val.get("botocore_version") == "1.2.3"
+ assert return_val.get("exception") is not None
+ assert return_val.get("failed")
+ assert "response_metadata" not in return_val
+ assert "error" not in return_val
+
+ # ========================================================
+ # Passing fail_json_aws BotoCoreError and a message
+ # ========================================================
+ @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"])
+ def test_fail_botocore_msg(self, monkeypatch, stdin, capfd):
+ monkeypatch.setattr(botocore, "__version__", "1.2.3")
+ monkeypatch.setattr(boto3, "__version__", "1.2.4")
+
+ # Create a minimal module that we can call
+ module = AnsibleAWSModule(argument_spec=dict())
+ try:
+ raise botocore.exceptions.BotoCoreError()
+ except botocore.exceptions.BotoCoreError as e:
+ with pytest.raises(SystemExit) as ctx:
+ module.fail_json_aws(e, msg=self.FAIL_MSG)
+ assert ctx.value.code == 1
+ out, err = capfd.readouterr()
+ return_val = json.loads(out)
+
+ assert return_val.get("msg") == self.FAIL_MSG + ": " + self.DEFAULT_CORE_MSG
+ assert return_val.get("boto3_version") == "1.2.4"
+ assert return_val.get("botocore_version") == "1.2.3"
+ assert return_val.get("exception") is not None
+ assert return_val.get("failed")
+ assert "response_metadata" not in return_val
+ assert "error" not in return_val
+
+ # ========================================================
+ # Passing fail_json_aws BotoCoreError and a message as a positional
+ # argument
+ # ========================================================
+ @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"])
+ def test_fail_botocore_positional_msg(self, monkeypatch, stdin, capfd):
+ monkeypatch.setattr(botocore, "__version__", "1.2.3")
+ monkeypatch.setattr(boto3, "__version__", "1.2.4")
+
+ # Create a minimal module that we can call
+ module = AnsibleAWSModule(argument_spec=dict())
+ try:
+ raise botocore.exceptions.BotoCoreError()
+ except botocore.exceptions.BotoCoreError as e:
+ with pytest.raises(SystemExit) as ctx:
+ module.fail_json_aws(e, self.FAIL_MSG)
+ assert ctx.value.code == 1
+ out, err = capfd.readouterr()
+ return_val = json.loads(out)
+
+ assert return_val.get("msg") == self.FAIL_MSG + ": " + self.DEFAULT_CORE_MSG
+ assert return_val.get("boto3_version") == "1.2.4"
+ assert return_val.get("botocore_version") == "1.2.3"
+ assert return_val.get("exception") is not None
+ assert return_val.get("failed")
+ assert "response_metadata" not in return_val
+ assert "error" not in return_val
+
+ # ========================================================
+ # Passing fail_json_aws a BotoCoreError and an arbitrary key
+ # ========================================================
+ @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"])
+ def test_fail_botocore_key(self, monkeypatch, stdin, capfd):
+ monkeypatch.setattr(botocore, "__version__", "1.2.3")
+ monkeypatch.setattr(boto3, "__version__", "1.2.4")
+
+ # Create a minimal module that we can call
+ module = AnsibleAWSModule(argument_spec=dict())
+ try:
+ raise botocore.exceptions.BotoCoreError()
+ except botocore.exceptions.BotoCoreError as e:
+ with pytest.raises(SystemExit) as ctx:
+ module.fail_json_aws(e, extra_key="Some Value")
+ assert ctx.value.code == 1
+ out, err = capfd.readouterr()
+ return_val = json.loads(out)
+
+ assert return_val.get("msg") == self.DEFAULT_CORE_MSG
+ assert return_val.get("extra_key") == "Some Value"
+ assert return_val.get("boto3_version") == "1.2.4"
+ assert return_val.get("botocore_version") == "1.2.3"
+ assert return_val.get("exception") is not None
+ assert return_val.get("failed")
+ assert "response_metadata" not in return_val
+ assert "error" not in return_val
+
+ # ========================================================
+ # Passing fail_json_aws BotoCoreError, an arbitry key and a message
+ # ========================================================
+ @pytest.mark.parametrize("stdin", [{}], indirect=["stdin"])
+ def test_fail_botocore_msg_and_key(self, monkeypatch, stdin, capfd):
+ monkeypatch.setattr(botocore, "__version__", "1.2.3")
+ monkeypatch.setattr(boto3, "__version__", "1.2.4")
+
+ # Create a minimal module that we can call
+ module = AnsibleAWSModule(argument_spec=dict())
+ try:
+ raise botocore.exceptions.BotoCoreError()
+ except botocore.exceptions.BotoCoreError as e:
+ with pytest.raises(SystemExit) as ctx:
+ module.fail_json_aws(e, extra_key="Some Value", msg=self.FAIL_MSG)
+ assert ctx.value.code == 1
+ out, err = capfd.readouterr()
+ return_val = json.loads(out)
+
+ assert return_val.get("msg") == self.FAIL_MSG + ": " + self.DEFAULT_CORE_MSG
+ assert return_val.get("extra_key") == "Some Value"
+ assert return_val.get("boto3_version") == "1.2.4"
+ assert return_val.get("botocore_version") == "1.2.3"
+ assert return_val.get("exception") is not None
+ assert return_val.get("failed")
+ assert "response_metadata" not in return_val
+ assert "error" not in return_val
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/test_is_boto3_error_code.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/test_is_boto3_error_code.py
new file mode 100644
index 00000000..1b1a70e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/test_is_boto3_error_code.py
@@ -0,0 +1,271 @@
+# -*- coding: utf-8 -*-
+# (c) 2020 Red Hat Inc.
+#
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+import botocore
+
+from ansible_collections.amazon.aws.tests.unit.compat import unittest
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3
+
+if not HAS_BOTO3:
+ pytestmark = pytest.mark.skip("test_iam.py requires the python modules 'boto3' and 'botocore'")
+
+
+class Boto3ErrorCodeTestSuite(unittest.TestCase):
+
+ def _make_denied_exception(self):
+ return botocore.exceptions.ClientError(
+ {
+ "Error": {
+ "Code": "AccessDenied",
+ "Message": "User: arn:aws:iam::123456789012:user/ExampleUser "
+ + "is not authorized to perform: iam:GetUser on resource: user ExampleUser"
+ },
+ "ResponseMetadata": {
+ "RequestId": "01234567-89ab-cdef-0123-456789abcdef"
+ }
+ }, 'getUser')
+
+ def _make_unexpected_exception(self):
+ return botocore.exceptions.ClientError(
+ {
+ "Error": {
+ "Code": "SomeThingWentWrong",
+ "Message": "Boom!"
+ },
+ "ResponseMetadata": {
+ "RequestId": "01234567-89ab-cdef-0123-456789abcdef"
+ }
+ }, 'someCall')
+
+ def _make_encoded_exception(self):
+ return botocore.exceptions.ClientError(
+ {
+ "Error": {
+ "Code": "PermissionDenied",
+ "Message": "You are not authorized to perform this operation. Encoded authorization failure message: " +
+ "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" +
+ "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" +
+ "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" +
+ "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" +
+ "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" +
+ "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" +
+ "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" +
+ "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" +
+ "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2"
+ },
+ "ResponseMetadata": {
+ "RequestId": "01234567-89ab-cdef-0123-456789abcdef"
+ }
+ }, 'someCall')
+
+ def _make_botocore_exception(self):
+ return botocore.exceptions.EndpointConnectionError(endpoint_url='junk.endpoint')
+
+ def setUp(self):
+ pass
+
+ def test_is_boto3_error_code_single__raise__client(self):
+ thrown_exception = self._make_denied_exception()
+ caught_exception = None
+ try:
+ raise thrown_exception
+ except is_boto3_error_code('AccessDenied') as e:
+ caught_exception = e
+ caught = 'Code'
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ caught_exception = e
+ caught = 'ClientError'
+ except botocore.exceptions.BotoCoreError as e:
+ caught_exception = e
+ caught = 'BotoCoreError'
+ except Exception as e:
+ caught_exception = e
+ caught = 'Exception'
+ self.assertEqual(caught_exception, thrown_exception)
+ self.assertEqual(caught, 'Code')
+
+ def test_is_boto3_error_code_single__raise__unexpected(self):
+ thrown_exception = self._make_unexpected_exception()
+ caught_exception = None
+ try:
+ raise thrown_exception
+ except is_boto3_error_code('AccessDenied') as e:
+ caught_exception = e
+ caught = 'Code'
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ caught_exception = e
+ caught = 'ClientError'
+ except botocore.exceptions.BotoCoreError as e:
+ caught_exception = e
+ caught = 'BotoCoreError'
+ except Exception as e:
+ caught_exception = e
+ caught = 'Exception'
+ self.assertEqual(caught_exception, thrown_exception)
+ self.assertEqual(caught, 'ClientError')
+
+ def test_is_boto3_error_code_single__raise__botocore(self):
+ thrown_exception = self._make_botocore_exception()
+ caught_exception = None
+ try:
+ raise thrown_exception
+ except is_boto3_error_code('AccessDenied') as e:
+ caught_exception = e
+ caught = 'Code'
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ caught_exception = e
+ caught = 'ClientError'
+ except botocore.exceptions.BotoCoreError as e:
+ caught_exception = e
+ caught = 'BotoCoreError'
+ except Exception as e:
+ caught_exception = e
+ caught = 'Exception'
+ self.assertEqual(caught_exception, thrown_exception)
+ self.assertEqual(caught, 'BotoCoreError')
+
+ def test_is_boto3_error_code_multiple__raise__client(self):
+ thrown_exception = self._make_denied_exception()
+ caught_exception = None
+ try:
+ raise thrown_exception
+ except is_boto3_error_code(['NotAccessDenied', 'AccessDenied']) as e:
+ caught_exception = e
+ caught = 'Code'
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ caught_exception = e
+ caught = 'ClientError'
+ except botocore.exceptions.BotoCoreError as e:
+ caught_exception = e
+ caught = 'BotoCoreError'
+ except Exception as e:
+ caught_exception = e
+ caught = 'Exception'
+ self.assertEqual(caught_exception, thrown_exception)
+ self.assertEqual(caught, 'Code')
+ caught_exception = None
+ try:
+ raise thrown_exception
+ except is_boto3_error_code(['AccessDenied', 'NotAccessDenied']) as e:
+ caught_exception = e
+ caught = 'Code'
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ caught_exception = e
+ caught = 'ClientError'
+ except botocore.exceptions.BotoCoreError as e:
+ caught_exception = e
+ caught = 'BotoCoreError'
+ except Exception as e:
+ caught_exception = e
+ caught = 'Exception'
+ self.assertEqual(caught_exception, thrown_exception)
+ self.assertEqual(caught, 'Code')
+
+ def test_is_boto3_error_code_multiple__raise__unexpected(self):
+ thrown_exception = self._make_unexpected_exception()
+ caught_exception = None
+ try:
+ raise thrown_exception
+ except is_boto3_error_code(['NotAccessDenied', 'AccessDenied']) as e:
+ caught_exception = e
+ caught = 'Code'
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ caught_exception = e
+ caught = 'ClientError'
+ except botocore.exceptions.BotoCoreError as e:
+ caught_exception = e
+ caught = 'BotoCoreError'
+ except Exception as e:
+ caught_exception = e
+ caught = 'Exception'
+ self.assertEqual(caught_exception, thrown_exception)
+ self.assertEqual(caught, 'ClientError')
+
+ def test_is_boto3_error_code_multiple__raise__botocore(self):
+ thrown_exception = self._make_botocore_exception()
+ caught_exception = None
+ try:
+ raise thrown_exception
+ except is_boto3_error_code(['NotAccessDenied', 'AccessDenied']) as e:
+ caught_exception = e
+ caught = 'Code'
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ caught_exception = e
+ caught = 'ClientError'
+ except botocore.exceptions.BotoCoreError as e:
+ caught_exception = e
+ caught = 'BotoCoreError'
+ except Exception as e:
+ caught_exception = e
+ caught = 'Exception'
+ self.assertEqual(caught_exception, thrown_exception)
+ self.assertEqual(caught, 'BotoCoreError')
+
+ def test_is_boto3_error_code_single__pass__client(self):
+ passed_exception = self._make_denied_exception()
+ returned_exception = is_boto3_error_code('AccessDenied', e=passed_exception)
+ self.assertTrue(isinstance(passed_exception, returned_exception))
+ self.assertTrue(issubclass(returned_exception, botocore.exceptions.ClientError))
+ self.assertFalse(issubclass(returned_exception, botocore.exceptions.BotoCoreError))
+ self.assertTrue(issubclass(returned_exception, Exception))
+ self.assertNotEqual(returned_exception.__name__, "NeverEverRaisedException")
+
+ def test_is_boto3_error_code_single__pass__unexpected(self):
+ passed_exception = self._make_unexpected_exception()
+ returned_exception = is_boto3_error_code('AccessDenied', e=passed_exception)
+ self.assertFalse(isinstance(passed_exception, returned_exception))
+ self.assertFalse(issubclass(returned_exception, botocore.exceptions.ClientError))
+ self.assertFalse(issubclass(returned_exception, botocore.exceptions.BotoCoreError))
+ self.assertTrue(issubclass(returned_exception, Exception))
+ self.assertEqual(returned_exception.__name__, "NeverEverRaisedException")
+
+ def test_is_boto3_error_code_single__pass__botocore(self):
+ passed_exception = self._make_botocore_exception()
+ returned_exception = is_boto3_error_code('AccessDenied', e=passed_exception)
+ self.assertFalse(isinstance(passed_exception, returned_exception))
+ self.assertFalse(issubclass(returned_exception, botocore.exceptions.ClientError))
+ self.assertFalse(issubclass(returned_exception, botocore.exceptions.BotoCoreError))
+ self.assertTrue(issubclass(returned_exception, Exception))
+ self.assertEqual(returned_exception.__name__, "NeverEverRaisedException")
+
+ def test_is_boto3_error_code_multiple__pass__client(self):
+ passed_exception = self._make_denied_exception()
+ returned_exception = is_boto3_error_code(['NotAccessDenied', 'AccessDenied'], e=passed_exception)
+ self.assertTrue(isinstance(passed_exception, returned_exception))
+ self.assertTrue(issubclass(returned_exception, botocore.exceptions.ClientError))
+ self.assertFalse(issubclass(returned_exception, botocore.exceptions.BotoCoreError))
+ self.assertTrue(issubclass(returned_exception, Exception))
+ self.assertNotEqual(returned_exception.__name__, "NeverEverRaisedException")
+ returned_exception = is_boto3_error_code(['AccessDenied', 'NotAccessDenied'], e=passed_exception)
+ self.assertTrue(isinstance(passed_exception, returned_exception))
+ self.assertTrue(issubclass(returned_exception, botocore.exceptions.ClientError))
+ self.assertFalse(issubclass(returned_exception, botocore.exceptions.BotoCoreError))
+ self.assertTrue(issubclass(returned_exception, Exception))
+ self.assertNotEqual(returned_exception.__name__, "NeverEverRaisedException")
+
+ def test_is_boto3_error_code_multiple__pass__unexpected(self):
+ passed_exception = self._make_unexpected_exception()
+ returned_exception = is_boto3_error_code(['NotAccessDenied', 'AccessDenied'], e=passed_exception)
+ self.assertFalse(isinstance(passed_exception, returned_exception))
+ self.assertFalse(issubclass(returned_exception, botocore.exceptions.ClientError))
+ self.assertFalse(issubclass(returned_exception, botocore.exceptions.BotoCoreError))
+ self.assertTrue(issubclass(returned_exception, Exception))
+ self.assertEqual(returned_exception.__name__, "NeverEverRaisedException")
+
+ def test_is_boto3_error_code_multiple__pass__botocore(self):
+ passed_exception = self._make_botocore_exception()
+ returned_exception = is_boto3_error_code(['NotAccessDenied', 'AccessDenied'], e=passed_exception)
+ self.assertFalse(isinstance(passed_exception, returned_exception))
+ self.assertFalse(issubclass(returned_exception, botocore.exceptions.ClientError))
+ self.assertFalse(issubclass(returned_exception, botocore.exceptions.BotoCoreError))
+ self.assertTrue(issubclass(returned_exception, Exception))
+ self.assertEqual(returned_exception.__name__, "NeverEverRaisedException")
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/test_is_boto3_error_message.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/test_is_boto3_error_message.py
new file mode 100644
index 00000000..550e89ef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/test_is_boto3_error_message.py
@@ -0,0 +1,164 @@
+# -*- coding: utf-8 -*-
+# (c) 2020 Red Hat Inc.
+#
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+import botocore
+
+from ansible_collections.amazon.aws.tests.unit.compat import unittest
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3
+
+if not HAS_BOTO3:
+ pytestmark = pytest.mark.skip("test_iam.py requires the python modules 'boto3' and 'botocore'")
+
+
+class Boto3ErrorTestSuite(unittest.TestCase):
+
+ def _make_denied_exception(self):
+ return botocore.exceptions.ClientError(
+ {
+ "Error": {
+ "Code": "AccessDenied",
+ "Message": "User: arn:aws:iam::123456789012:user/ExampleUser "
+ + "is not authorized to perform: iam:GetUser on resource: user ExampleUser"
+ },
+ "ResponseMetadata": {
+ "RequestId": "01234567-89ab-cdef-0123-456789abcdef"
+ }
+ }, 'getUser')
+
+ def _make_unexpected_exception(self):
+ return botocore.exceptions.ClientError(
+ {
+ "Error": {
+ "Code": "SomeThingWentWrong",
+ "Message": "Boom!"
+ },
+ "ResponseMetadata": {
+ "RequestId": "01234567-89ab-cdef-0123-456789abcdef"
+ }
+ }, 'someCall')
+
+ def _make_encoded_exception(self):
+ return botocore.exceptions.ClientError(
+ {
+ "Error": {
+ "Code": "AccessDenied",
+ "Message": "You are not authorized to perform this operation. Encoded authorization failure message: " +
+ "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" +
+ "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" +
+ "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" +
+ "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" +
+ "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" +
+ "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" +
+ "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" +
+ "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" +
+ "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2"
+ },
+ "ResponseMetadata": {
+ "RequestId": "01234567-89ab-cdef-0123-456789abcdef"
+ }
+ }, 'someCall')
+
+ def _make_botocore_exception(self):
+ return botocore.exceptions.EndpointConnectionError(endpoint_url='junk.endpoint')
+
+ def setUp(self):
+ pass
+
+ def test_is_boto3_error_message_single__raise__client(self):
+ caught_exception = None
+ thrown_exception = self._make_denied_exception()
+ # Test that we don't catch BotoCoreError
+ try:
+ raise thrown_exception
+ except is_boto3_error_message('is not authorized to perform') as e:
+ caught_exception = e
+ caught = 'Message'
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ caught_exception = e
+ caught = 'ClientError'
+ except botocore.exceptions.BotoCoreError as e:
+ caught_exception = e
+ caught = 'BotoCoreError'
+ except Exception as e:
+ caught_exception = e
+ caught = 'Exception'
+ self.assertEqual(caught_exception, thrown_exception)
+ self.assertEqual(caught, 'Message')
+
+ def test_is_boto3_error_message_single__raise__unexpected(self):
+ caught_exception = None
+ thrown_exception = self._make_unexpected_exception()
+ # Test that we don't catch BotoCoreError
+ try:
+ raise thrown_exception
+ except is_boto3_error_message('is not authorized to perform') as e:
+ caught_exception = e
+ caught = 'Message'
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ caught_exception = e
+ caught = 'ClientError'
+ except botocore.exceptions.BotoCoreError as e:
+ caught_exception = e
+ caught = 'BotoCoreError'
+ except Exception as e:
+ caught_exception = e
+ caught = 'Exception'
+ self.assertEqual(caught_exception, thrown_exception)
+ self.assertEqual(caught, 'ClientError')
+
+ def test_is_boto3_error_message_single__raise__botocore(self):
+ caught_exception = None
+ thrown_exception = self._make_botocore_exception()
+ # Test that we don't catch BotoCoreError
+ try:
+ raise thrown_exception
+ except is_boto3_error_message('is not authorized to perform') as e:
+ caught_exception = e
+ caught = 'Message'
+ except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
+ caught_exception = e
+ caught = 'ClientError'
+ except botocore.exceptions.BotoCoreError as e:
+ caught_exception = e
+ caught = 'BotoCoreError'
+ except Exception as e:
+ caught_exception = e
+ caught = 'Exception'
+ self.assertEqual(caught_exception, thrown_exception)
+ self.assertEqual(caught, 'BotoCoreError')
+
+ def test_is_boto3_error_message_single__pass__client(self):
+ passed_exception = self._make_denied_exception()
+ returned_exception = is_boto3_error_message('is not authorized to perform', e=passed_exception)
+ self.assertTrue(isinstance(passed_exception, returned_exception))
+ self.assertTrue(issubclass(returned_exception, botocore.exceptions.ClientError))
+ self.assertFalse(issubclass(returned_exception, botocore.exceptions.BotoCoreError))
+ self.assertTrue(issubclass(returned_exception, Exception))
+ self.assertNotEqual(returned_exception.__name__, "NeverEverRaisedException")
+
+ def test_is_boto3_error_message_single__pass__unexpected(self):
+ passed_exception = self._make_unexpected_exception()
+ returned_exception = is_boto3_error_message('is not authorized to perform', e=passed_exception)
+ self.assertFalse(isinstance(passed_exception, returned_exception))
+ self.assertFalse(issubclass(returned_exception, botocore.exceptions.ClientError))
+ self.assertFalse(issubclass(returned_exception, botocore.exceptions.BotoCoreError))
+ self.assertTrue(issubclass(returned_exception, Exception))
+ self.assertEqual(returned_exception.__name__, "NeverEverRaisedException")
+
+ def test_is_boto3_error_message_single__pass__botocore(self):
+ passed_exception = self._make_botocore_exception()
+ returned_exception = is_boto3_error_message('is not authorized to perform', e=passed_exception)
+ self.assertFalse(isinstance(passed_exception, returned_exception))
+ self.assertFalse(issubclass(returned_exception, botocore.exceptions.ClientError))
+ self.assertFalse(issubclass(returned_exception, botocore.exceptions.BotoCoreError))
+ self.assertTrue(issubclass(returned_exception, Exception))
+ self.assertEqual(returned_exception.__name__, "NeverEverRaisedException")
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/test_scrub_none_parameters.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/test_scrub_none_parameters.py
new file mode 100644
index 00000000..084c3102
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/core/test_scrub_none_parameters.py
@@ -0,0 +1,56 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters
+import pytest
+
+scrub_none_test_data = [
+ (dict(),
+ dict()
+ ),
+ (dict(param1='something'),
+ dict(param1='something')
+ ),
+ (dict(param1=False),
+ dict(param1=False)
+ ),
+ (dict(param1='something', param2='something_else'),
+ dict(param1='something', param2='something_else')
+ ),
+ (dict(param1='something', param2=dict()),
+ dict(param1='something', param2=dict())
+ ),
+ (dict(param1='something', param2=None),
+ dict(param1='something')
+ ),
+ (dict(param1='something', param2=None, param3=None),
+ dict(param1='something')
+ ),
+ (dict(param1='something', param2=None, param3=None, param4='something_else'),
+ dict(param1='something', param4='something_else')
+ ),
+ (dict(param1=dict(sub_param1='something', sub_param2=None), param2=None, param3=None, param4='something_else'),
+ dict(param1=dict(sub_param1='something'), param4='something_else')
+ ),
+ (dict(param1=dict(sub_param1='something', sub_param2=dict(sub_sub_param1='another_thing')), param2=None, param3=None, param4='something_else'),
+ dict(param1=dict(sub_param1='something', sub_param2=dict(sub_sub_param1='another_thing')), param4='something_else')
+ ),
+ (dict(param1=dict(sub_param1='something', sub_param2=dict()), param2=None, param3=None, param4='something_else'),
+ dict(param1=dict(sub_param1='something', sub_param2=dict()), param4='something_else')
+ ),
+ (dict(param1=dict(sub_param1='something', sub_param2=False), param2=None, param3=None, param4='something_else'),
+ dict(param1=dict(sub_param1='something', sub_param2=False), param4='something_else')
+ ),
+ (dict(param1=None, param2=None),
+ dict()
+ ),
+ (dict(param1=None, param2=[]),
+ dict(param2=[])
+ )
+]
+
+
+@pytest.mark.parametrize("input_params, output_params", scrub_none_test_data)
+def test_scrub_none_parameters(input_params, output_params):
+
+ assert scrub_none_parameters(input_params) == output_params
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/ec2/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/ec2/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/ec2/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/ec2/test_aws.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/ec2/test_aws.py
new file mode 100644
index 00000000..91170b9e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/ec2/test_aws.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+# (c) 2015, Allen Sanabria <asanabria@linuxdynasty.org>
+#
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ # We explicitly want to know if boto3/botocore are available, they're used
+ # by the code we're testing even if we don't directly use them.
+ import boto3 # pylint: disable=unused-import
+ import botocore
+ HAS_BOTO3 = True
+except Exception:
+ HAS_BOTO3 = False
+
+import pytest
+
+from ansible_collections.amazon.aws.tests.unit.compat import unittest
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
+
+if not HAS_BOTO3:
+ pytestmark = pytest.mark.skip("test_aws.py requires the python modules 'boto3' and 'botocore'")
+
+
+class RetryTestCase(unittest.TestCase):
+
+ def test_no_failures(self):
+ self.counter = 0
+
+ @AWSRetry.backoff(tries=2, delay=0.1)
+ def no_failures():
+ self.counter += 1
+
+ r = no_failures()
+ self.assertEqual(self.counter, 1)
+
+ def test_extend_boto3_failures(self):
+ self.counter = 0
+ err_msg = {'Error': {'Code': 'MalformedPolicyDocument'}}
+
+ @AWSRetry.backoff(tries=2, delay=0.1, catch_extra_error_codes=['MalformedPolicyDocument'])
+ def extend_failures():
+ self.counter += 1
+ if self.counter < 2:
+ raise botocore.exceptions.ClientError(err_msg, 'You did something wrong.')
+ else:
+ return 'success'
+
+ r = extend_failures()
+ self.assertEqual(r, 'success')
+ self.assertEqual(self.counter, 2)
+
+ def test_retry_once(self):
+ self.counter = 0
+ err_msg = {'Error': {'Code': 'InternalFailure'}}
+
+ @AWSRetry.backoff(tries=2, delay=0.1)
+ def retry_once():
+ self.counter += 1
+ if self.counter < 2:
+ raise botocore.exceptions.ClientError(err_msg, 'Something went wrong!')
+ else:
+ return 'success'
+
+ r = retry_once()
+ self.assertEqual(r, 'success')
+ self.assertEqual(self.counter, 2)
+
+ def test_reached_limit(self):
+ self.counter = 0
+ err_msg = {'Error': {'Code': 'RequestLimitExceeded'}}
+
+ @AWSRetry.backoff(tries=4, delay=0.1)
+ def fail():
+ self.counter += 1
+ raise botocore.exceptions.ClientError(err_msg, 'toooo fast!!')
+
+ # with self.assertRaises(botocore.exceptions.ClientError):
+ try:
+ fail()
+ except Exception as e:
+ self.assertEqual(e.response['Error']['Code'], 'RequestLimitExceeded')
+ self.assertEqual(self.counter, 4)
+
+ def test_unexpected_exception_does_not_retry(self):
+ self.counter = 0
+ err_msg = {'Error': {'Code': 'AuthFailure'}}
+
+ @AWSRetry.backoff(tries=4, delay=0.1)
+ def raise_unexpected_error():
+ self.counter += 1
+ raise botocore.exceptions.ClientError(err_msg, 'unexpected error')
+
+ # with self.assertRaises(botocore.exceptions.ClientError):
+ try:
+ raise_unexpected_error()
+ except Exception as e:
+ self.assertEqual(e.response['Error']['Code'], 'AuthFailure')
+
+ self.assertEqual(self.counter, 1)
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/ec2/test_compare_policies.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/ec2/test_compare_policies.py
new file mode 100644
index 00000000..c821f7a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/ec2/test_compare_policies.py
@@ -0,0 +1,341 @@
+# (c) 2017 Red Hat Inc.
+#
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies
+
+
+class Ec2UtilsComparePolicies(unittest.TestCase):
+
+ # ========================================================
+ # Setup some initial data that we can use within our tests
+ # ========================================================
+ def setUp(self):
+ # A pair of simple IAM Trust relationships using bools, the first a
+ # native bool the second a quoted string
+ self.bool_policy_bool = {
+ 'Version': '2012-10-17',
+ 'Statement': [
+ {
+ "Action": "sts:AssumeRole",
+ "Condition": {
+ "Bool": {"aws:MultiFactorAuthPresent": True}
+ },
+ "Effect": "Allow",
+ "Principal": {"AWS": "arn:aws:iam::XXXXXXXXXXXX:root"},
+ "Sid": "AssumeRoleWithBoolean"
+ }
+ ]
+ }
+
+ self.bool_policy_string = {
+ 'Version': '2012-10-17',
+ 'Statement': [
+ {
+ "Action": "sts:AssumeRole",
+ "Condition": {
+ "Bool": {"aws:MultiFactorAuthPresent": "true"}
+ },
+ "Effect": "Allow",
+ "Principal": {"AWS": "arn:aws:iam::XXXXXXXXXXXX:root"},
+ "Sid": "AssumeRoleWithBoolean"
+ }
+ ]
+ }
+
+ # A pair of simple bucket policies using numbers, the first a
+ # native int the second a quoted string
+ self.numeric_policy_number = {
+ 'Version': '2012-10-17',
+ 'Statement': [
+ {
+ "Action": "s3:ListBucket",
+ "Condition": {
+ "NumericLessThanEquals": {"s3:max-keys": 15}
+ },
+ "Effect": "Allow",
+ "Resource": "arn:aws:s3:::examplebucket",
+ "Sid": "s3ListBucketWithNumericLimit"
+ }
+ ]
+ }
+
+ self.numeric_policy_string = {
+ 'Version': '2012-10-17',
+ 'Statement': [
+ {
+ "Action": "s3:ListBucket",
+ "Condition": {
+ "NumericLessThanEquals": {"s3:max-keys": "15"}
+ },
+ "Effect": "Allow",
+ "Resource": "arn:aws:s3:::examplebucket",
+ "Sid": "s3ListBucketWithNumericLimit"
+ }
+ ]
+ }
+
+ self.small_policy_one = {
+ 'Version': '2012-10-17',
+ 'Statement': [
+ {
+ 'Action': 's3:PutObjectAcl',
+ 'Sid': 'AddCannedAcl2',
+ 'Resource': 'arn:aws:s3:::test_policy/*',
+ 'Effect': 'Allow',
+ 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
+ }
+ ]
+ }
+
+ # The same as small_policy_one, except the single resource is in a list and the contents of Statement are jumbled
+ self.small_policy_two = {
+ 'Version': '2012-10-17',
+ 'Statement': [
+ {
+ 'Effect': 'Allow',
+ 'Action': 's3:PutObjectAcl',
+ 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']},
+ 'Resource': ['arn:aws:s3:::test_policy/*'],
+ 'Sid': 'AddCannedAcl2'
+ }
+ ]
+ }
+
+ self.version_policy_missing = {
+ 'Statement': [
+ {
+ 'Action': 's3:PutObjectAcl',
+ 'Sid': 'AddCannedAcl2',
+ 'Resource': 'arn:aws:s3:::test_policy/*',
+ 'Effect': 'Allow',
+ 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
+ }
+ ]
+ }
+
+ self.version_policy_old = {
+ 'Version': '2008-10-17',
+ 'Statement': [
+ {
+ 'Action': 's3:PutObjectAcl',
+ 'Sid': 'AddCannedAcl2',
+ 'Resource': 'arn:aws:s3:::test_policy/*',
+ 'Effect': 'Allow',
+ 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
+ }
+ ]
+ }
+
+ self.version_policy_new = {
+ 'Version': '2012-10-17',
+ 'Statement': [
+ {
+ 'Action': 's3:PutObjectAcl',
+ 'Sid': 'AddCannedAcl2',
+ 'Resource': 'arn:aws:s3:::test_policy/*',
+ 'Effect': 'Allow',
+ 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
+ }
+ ]
+ }
+
+ self.larger_policy_one = {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "Test",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": [
+ "arn:aws:iam::XXXXXXXXXXXX:user/testuser1",
+ "arn:aws:iam::XXXXXXXXXXXX:user/testuser2"
+ ]
+ },
+ "Action": "s3:PutObjectAcl",
+ "Resource": "arn:aws:s3:::test_policy/*"
+ },
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "arn:aws:iam::XXXXXXXXXXXX:user/testuser2"
+ },
+ "Action": [
+ "s3:PutObject",
+ "s3:PutObjectAcl"
+ ],
+ "Resource": "arn:aws:s3:::test_policy/*"
+ }
+ ]
+ }
+
+ # The same as larger_policy_one, except having a list of length 1 and jumbled contents
+ self.larger_policy_two = {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Principal": {
+ "AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser2"]
+ },
+ "Effect": "Allow",
+ "Resource": "arn:aws:s3:::test_policy/*",
+ "Action": [
+ "s3:PutObject",
+ "s3:PutObjectAcl"
+ ]
+ },
+ {
+ "Action": "s3:PutObjectAcl",
+ "Principal": {
+ "AWS": [
+ "arn:aws:iam::XXXXXXXXXXXX:user/testuser1",
+ "arn:aws:iam::XXXXXXXXXXXX:user/testuser2"
+ ]
+ },
+ "Sid": "Test",
+ "Resource": "arn:aws:s3:::test_policy/*",
+ "Effect": "Allow"
+ }
+ ]
+ }
+
+ # Different than larger_policy_two: a different principal is given
+ self.larger_policy_three = {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Principal": {
+ "AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser2"]
+ },
+ "Effect": "Allow",
+ "Resource": "arn:aws:s3:::test_policy/*",
+ "Action": [
+ "s3:PutObject",
+ "s3:PutObjectAcl"]
+ },
+ {
+ "Action": "s3:PutObjectAcl",
+ "Principal": {
+ "AWS": [
+ "arn:aws:iam::XXXXXXXXXXXX:user/testuser1",
+ "arn:aws:iam::XXXXXXXXXXXX:user/testuser3"
+ ]
+ },
+ "Sid": "Test",
+ "Resource": "arn:aws:s3:::test_policy/*",
+ "Effect": "Allow"
+ }
+ ]
+ }
+
+ # Minimal policy using wildcarded Principal
+ self.wildcard_policy_one = {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Principal": {
+ "AWS": ["*"]
+ },
+ "Effect": "Allow",
+ "Resource": "arn:aws:s3:::test_policy/*",
+ "Action": [
+ "s3:PutObject",
+ "s3:PutObjectAcl"]
+ }
+ ]
+ }
+
+ # Minimal policy using wildcarded Principal
+ self.wildcard_policy_two = {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Principal": "*",
+ "Effect": "Allow",
+ "Resource": "arn:aws:s3:::test_policy/*",
+ "Action": [
+ "s3:PutObject",
+ "s3:PutObjectAcl"]
+ }
+ ]
+ }
+
+ # ========================================================
+ # ec2.compare_policies
+ # ========================================================
+
+ def test_compare_small_policies_without_differences(self):
+ """ Testing two small policies which are identical except for:
+ * The contents of the statement are in different orders
+ * The second policy contains a list of length one whereas in the first it is a string
+ """
+ self.assertFalse(compare_policies(self.small_policy_one, self.small_policy_two))
+
+ def test_compare_large_policies_without_differences(self):
+ """ Testing two larger policies which are identical except for:
+ * The statements are in different orders
+ * The contents of the statements are also in different orders
+ * The second contains a list of length one for the Principal whereas in the first it is a string
+ """
+ self.assertFalse(compare_policies(self.larger_policy_one, self.larger_policy_two))
+
+ def test_compare_larger_policies_with_difference(self):
+ """ Testing two larger policies which are identical except for:
+ * one different principal
+ """
+ self.assertTrue(compare_policies(self.larger_policy_two, self.larger_policy_three))
+
+ def test_compare_smaller_policy_with_larger(self):
+ """ Testing two policies of different sizes """
+ self.assertTrue(compare_policies(self.larger_policy_one, self.small_policy_one))
+
+ def test_compare_boolean_policy_bool_and_string_are_equal(self):
+ """ Testing two policies one using a quoted boolean, the other a bool """
+ self.assertFalse(compare_policies(self.bool_policy_string, self.bool_policy_bool))
+
+ def test_compare_numeric_policy_number_and_string_are_equal(self):
+ """ Testing two policies one using a quoted number, the other an int """
+ self.assertFalse(compare_policies(self.numeric_policy_string, self.numeric_policy_number))
+
+ def test_compare_version_policies_defaults_old(self):
+ """ Testing that a policy without Version is considered identical to one
+ with the 'old' Version (by default)
+ """
+ self.assertFalse(compare_policies(self.version_policy_old, self.version_policy_missing))
+ self.assertTrue(compare_policies(self.version_policy_new, self.version_policy_missing))
+
+ def test_compare_version_policies_default_disabled(self):
+ """ Testing that a policy without Version not considered identical when default_version=None
+ """
+ self.assertFalse(compare_policies(self.version_policy_missing, self.version_policy_missing, default_version=None))
+ self.assertTrue(compare_policies(self.version_policy_old, self.version_policy_missing, default_version=None))
+ self.assertTrue(compare_policies(self.version_policy_new, self.version_policy_missing, default_version=None))
+
+ def test_compare_version_policies_default_set(self):
+ """ Testing that a policy without Version is only considered identical
+ when default_version="2008-10-17"
+ """
+ self.assertFalse(compare_policies(self.version_policy_missing, self.version_policy_missing, default_version="2012-10-17"))
+ self.assertTrue(compare_policies(self.version_policy_old, self.version_policy_missing, default_version="2012-10-17"))
+ self.assertFalse(compare_policies(self.version_policy_old, self.version_policy_missing, default_version="2008-10-17"))
+ self.assertFalse(compare_policies(self.version_policy_new, self.version_policy_missing, default_version="2012-10-17"))
+ self.assertTrue(compare_policies(self.version_policy_new, self.version_policy_missing, default_version="2008-10-17"))
+
+ def test_compare_version_policies_with_none(self):
+ """ Testing that comparing with no policy works
+ """
+ self.assertTrue(compare_policies(self.small_policy_one, None))
+ self.assertTrue(compare_policies(None, self.small_policy_one))
+ self.assertFalse(compare_policies(None, None))
+
+ def test_compare_wildcard_policies_without_differences(self):
+ """ Testing two small wildcard policies which are identical except for:
+ * Principal: "*" vs Principal: ["AWS": "*"]
+ """
+ self.assertFalse(compare_policies(self.wildcard_policy_one, self.wildcard_policy_two))
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/test_ec2.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/test_ec2.py
new file mode 100644
index 00000000..dbba5be4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/test_ec2.py
@@ -0,0 +1,191 @@
+# (c) 2017 Red Hat Inc.
+#
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import map_complex_type
+
+
+class Ec2Utils(unittest.TestCase):
+
+ # ========================================================
+ # Setup some initial data that we can use within our tests
+ # ========================================================
+ def setUp(self):
+
+ self.tag_example_boto3_list = [
+ {'Key': 'lowerCamel', 'Value': 'lowerCamelValue'},
+ {'Key': 'UpperCamel', 'Value': 'upperCamelValue'},
+ {'Key': 'Normal case', 'Value': 'Normal Value'},
+ {'Key': 'lower case', 'Value': 'lower case value'}
+ ]
+
+ self.tag_example_dict = {
+ 'lowerCamel': 'lowerCamelValue',
+ 'UpperCamel': 'upperCamelValue',
+ 'Normal case': 'Normal Value',
+ 'lower case': 'lower case value'
+ }
+
+ # ========================================================
+ # ec2.map_complex_type
+ # ========================================================
+ def test_map_complex_type_over_dict(self):
+ complex_type = {'minimum_healthy_percent': "75", 'maximum_percent': "150"}
+ type_map = {'minimum_healthy_percent': 'int', 'maximum_percent': 'int'}
+ complex_type_mapped = map_complex_type(complex_type, type_map)
+ complex_type_expected = {'minimum_healthy_percent': 75, 'maximum_percent': 150}
+ self.assertEqual(complex_type_mapped, complex_type_expected)
+
+ # ========================================================
+ # ec2.ansible_dict_to_boto3_filter_list
+ # ========================================================
+
+ def test_ansible_dict_with_string_to_boto3_filter_list(self):
+ filters = {'some-aws-id': 'i-01234567'}
+ filter_list_string = [
+ {
+ 'Name': 'some-aws-id',
+ 'Values': [
+ 'i-01234567',
+ ]
+ }
+ ]
+
+ converted_filters_list = ansible_dict_to_boto3_filter_list(filters)
+ self.assertEqual(converted_filters_list, filter_list_string)
+
+ def test_ansible_dict_with_boolean_to_boto3_filter_list(self):
+ filters = {'enabled': True}
+ filter_list_boolean = [
+ {
+ 'Name': 'enabled',
+ 'Values': [
+ 'true',
+ ]
+ }
+ ]
+
+ converted_filters_bool = ansible_dict_to_boto3_filter_list(filters)
+ self.assertEqual(converted_filters_bool, filter_list_boolean)
+
+ def test_ansible_dict_with_integer_to_boto3_filter_list(self):
+ filters = {'version': 1}
+ filter_list_integer = [
+ {
+ 'Name': 'version',
+ 'Values': [
+ '1',
+ ]
+ }
+ ]
+
+ converted_filters_int = ansible_dict_to_boto3_filter_list(filters)
+ self.assertEqual(converted_filters_int, filter_list_integer)
+
+ # ========================================================
+ # ec2.ansible_dict_to_boto3_tag_list
+ # ========================================================
+
+ def test_ansible_dict_to_boto3_tag_list(self):
+ converted_list = ansible_dict_to_boto3_tag_list(self.tag_example_dict)
+ sorted_converted_list = sorted(converted_list, key=lambda i: (i['Key']))
+ sorted_list = sorted(self.tag_example_boto3_list, key=lambda i: (i['Key']))
+ self.assertEqual(sorted_converted_list, sorted_list)
+
+ # ========================================================
+ # ec2.boto3_tag_list_to_ansible_dict
+ # ========================================================
+
+ def test_boto3_tag_list_to_ansible_dict(self):
+ converted_dict = boto3_tag_list_to_ansible_dict(self.tag_example_boto3_list)
+ self.assertEqual(converted_dict, self.tag_example_dict)
+
+ def test_boto3_tag_list_to_ansible_dict_empty(self):
+ # AWS returns [] when there are no tags
+ self.assertEqual(boto3_tag_list_to_ansible_dict([]), {})
+ # Minio returns [{}] when there are no tags
+ self.assertEqual(boto3_tag_list_to_ansible_dict([{}]), {})
+
+ # ========================================================
+ # ec2.compare_aws_tags
+ # ========================================================
+
+ def test_compare_aws_tags_equal(self):
+ new_dict = dict(self.tag_example_dict)
+ keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict)
+ self.assertEqual({}, keys_to_set)
+ self.assertEqual([], keys_to_unset)
+ keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=False)
+ self.assertEqual({}, keys_to_set)
+ self.assertEqual([], keys_to_unset)
+ keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=True)
+ self.assertEqual({}, keys_to_set)
+ self.assertEqual([], keys_to_unset)
+
+ def test_compare_aws_tags_removed(self):
+ new_dict = dict(self.tag_example_dict)
+ del new_dict['lowerCamel']
+ del new_dict['Normal case']
+ keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict)
+ self.assertEqual({}, keys_to_set)
+ self.assertEqual(set(['lowerCamel', 'Normal case']), set(keys_to_unset))
+ keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=False)
+ self.assertEqual({}, keys_to_set)
+ self.assertEqual([], keys_to_unset)
+ keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=True)
+ self.assertEqual({}, keys_to_set)
+ self.assertEqual(set(['lowerCamel', 'Normal case']), set(keys_to_unset))
+
+ def test_compare_aws_tags_added(self):
+ new_dict = dict(self.tag_example_dict)
+ new_keys = {'add_me': 'lower case', 'Me too!': 'Contributing'}
+ new_dict.update(new_keys)
+ keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict)
+ self.assertEqual(new_keys, keys_to_set)
+ self.assertEqual([], keys_to_unset)
+ keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=False)
+ self.assertEqual(new_keys, keys_to_set)
+ self.assertEqual([], keys_to_unset)
+ keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=True)
+ self.assertEqual(new_keys, keys_to_set)
+ self.assertEqual([], keys_to_unset)
+
+ def test_compare_aws_tags_changed(self):
+ new_dict = dict(self.tag_example_dict)
+ new_keys = {'UpperCamel': 'anotherCamelValue', 'Normal case': 'normal value'}
+ new_dict.update(new_keys)
+ keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict)
+ self.assertEqual(new_keys, keys_to_set)
+ self.assertEqual([], keys_to_unset)
+ keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=False)
+ self.assertEqual(new_keys, keys_to_set)
+ self.assertEqual([], keys_to_unset)
+ keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=True)
+ self.assertEqual(new_keys, keys_to_set)
+ self.assertEqual([], keys_to_unset)
+
+ def test_compare_aws_tags_complex_update(self):
+ # Adds 'Me too!', Changes 'UpperCamel' and removes 'Normal case'
+ new_dict = dict(self.tag_example_dict)
+ new_keys = {'UpperCamel': 'anotherCamelValue', 'Me too!': 'Contributing'}
+ new_dict.update(new_keys)
+ del new_dict['Normal case']
+ keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict)
+ self.assertEqual(new_keys, keys_to_set)
+ self.assertEqual(['Normal case'], keys_to_unset)
+ keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=False)
+ self.assertEqual(new_keys, keys_to_set)
+ self.assertEqual([], keys_to_unset)
+ keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=True)
+ self.assertEqual(new_keys, keys_to_set)
+ self.assertEqual(['Normal case'], keys_to_unset)
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py
new file mode 100644
index 00000000..dba2c129
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py
@@ -0,0 +1,43 @@
+#
+# (c) 2021 Red Hat Inc.
+#
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible_collections.amazon.aws.plugins.module_utils.elbv2 as elbv2
+
+
+one_action = [
+ {
+ "ForwardConfig": {
+ "TargetGroupStickinessConfig": {"Enabled": False},
+ "TargetGroups": [
+ {
+ "TargetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:966509639900:targetgroup/my-tg-58045486/5b231e04f663ae21",
+ "Weight": 1,
+ }
+ ],
+ },
+ "TargetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:966509639900:targetgroup/my-tg-58045486/5b231e04f663ae21",
+ "Type": "forward",
+ }
+]
+
+
+def test__prune_ForwardConfig():
+ expectation = {
+ "TargetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:966509639900:targetgroup/my-tg-58045486/5b231e04f663ae21",
+ "Type": "forward",
+ }
+ assert elbv2._prune_ForwardConfig(one_action[0]) == expectation
+
+
+def _prune_secret():
+ assert elbv2._prune_secret(one_action[0]) == one_action[0]
+
+
+def _sort_actions_one_entry():
+ assert elbv2._sort_actions(one_action) == one_action
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/test_iam.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/test_iam.py
new file mode 100644
index 00000000..0bfa7484
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/module_utils/test_iam.py
@@ -0,0 +1,296 @@
+#
+# (c) 2020 Red Hat Inc.
+#
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+import botocore
+
+from ansible_collections.amazon.aws.tests.unit.compat.mock import MagicMock
+from ansible_collections.amazon.aws.tests.unit.compat import unittest
+
+import ansible_collections.amazon.aws.plugins.module_utils.iam as utils_iam
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3
+
+if not HAS_BOTO3:
+ pytestmark = pytest.mark.skip("test_iam.py requires the python modules 'boto3' and 'botocore'")
+
+
+class IamUtilsTestSuite(unittest.TestCase):
+
+ def _make_denied_exception(self, partition):
+ return botocore.exceptions.ClientError(
+ {
+ "Error": {
+ "Code": "AccessDenied",
+ "Message": "User: arn:" + partition + ":iam::123456789012:user/ExampleUser "
+ + "is not authorized to perform: iam:GetUser on resource: user ExampleUser"
+ },
+ "ResponseMetadata": {
+ "RequestId": "01234567-89ab-cdef-0123-456789abcdef"
+ }
+ }, 'getUser')
+
+ def _make_unexpected_exception(self):
+ return botocore.exceptions.ClientError(
+ {
+ "Error": {
+ "Code": "SomeThingWentWrong",
+ "Message": "Boom!"
+ },
+ "ResponseMetadata": {
+ "RequestId": "01234567-89ab-cdef-0123-456789abcdef"
+ }
+ }, 'someCall')
+
+ def _make_encoded_exception(self):
+ return botocore.exceptions.ClientError(
+ {
+ "Error": {
+ "Code": "AccessDenied",
+ "Message": "You are not authorized to perform this operation. Encoded authorization failure message: " +
+ "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" +
+ "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" +
+ "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" +
+ "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" +
+ "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" +
+ "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" +
+ "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" +
+ "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" +
+ "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2"
+ },
+ "ResponseMetadata": {
+ "RequestId": "01234567-89ab-cdef-0123-456789abcdef"
+ }
+ }, 'someCall')
+
+ def _make_botocore_exception(self):
+ return botocore.exceptions.EndpointConnectionError(endpoint_url='junk.endpoint')
+
+ def setUp(self):
+ self.sts_client = MagicMock()
+ self.iam_client = MagicMock()
+ self.module = MagicMock()
+ clients = {'sts': self.sts_client, 'iam': self.iam_client}
+
+ def get_client(*args, **kwargs):
+ return clients[args[0]]
+
+ self.module.client.side_effect = get_client
+ self.module.fail_json_aws.side_effect = SystemExit(1)
+ self.module.fail_json.side_effect = SystemExit(2)
+
+ # ========== get_aws_account_id ============
+ # This is just a minimal (compatability) wrapper around get_aws_account_info
+ # Perform some basic testing and call it a day.
+
+ # Test the simplest case - We're permitted to call GetCallerIdentity
+ def test_get_aws_account_id__caller_success(self):
+ # Prepare
+ self.sts_client.get_caller_identity.side_effect = [{'UserId': 'AIDA1234567890ABCDEFG',
+ 'Account': '123456789012',
+ 'Arn': 'arn:aws:iam::123456789012:user/ExampleUser'}]
+ # Run module
+ return_value = utils_iam.get_aws_account_id(self.module)
+ # Check we only saw the calls we mocked out
+ self.module.client.assert_called_once()
+ self.sts_client.get_caller_identity.assert_called_once()
+ # Check we got the values back we expected.
+ self.assertEqual(return_value, '123456789012')
+
+ # Test the simplest case - We're permitted to call GetCallerIdentity
+ # (China partition)
+ def test_get_aws_account_id__caller_success_cn(self):
+ # Prepare
+ self.sts_client.get_caller_identity.side_effect = [{'UserId': 'AIDA1234567890ABCDEFG',
+ 'Account': '123456789012',
+ 'Arn': 'arn:aws-cn:iam::123456789012:user/ExampleUser'}]
+ # Run module
+ return_value = utils_iam.get_aws_account_id(self.module)
+ # Check we only saw the calls we mocked out
+ self.module.client.assert_called_once()
+ self.sts_client.get_caller_identity.assert_called_once()
+ # Check we got the values back we expected.
+ self.assertEqual(return_value, '123456789012')
+
+ # ========== get_aws_account_info ============
+ # Test the simplest case - We're permitted to call GetCallerIdentity
+ def test_get_aws_account_info__caller_success(self):
+ # Prepare
+ self.sts_client.get_caller_identity.side_effect = [{'UserId': 'AIDA1234567890ABCDEFG',
+ 'Account': '123456789012',
+ 'Arn': 'arn:aws:iam::123456789012:user/ExampleUser'}]
+ # Run module
+ return_value = utils_iam.get_aws_account_info(self.module)
+ # Check we only saw the calls we mocked out
+ self.module.client.assert_called_once()
+ self.sts_client.get_caller_identity.assert_called_once()
+ # Check we got the values back we expected.
+ self.assertEqual(return_value, ('123456789012', 'aws',))
+
+ # (China partition)
+ def test_get_aws_account_info__caller_success_cn(self):
+ # Prepare
+ self.sts_client.get_caller_identity.side_effect = [{'UserId': 'AIDA1234567890ABCDEFG',
+ 'Account': '123456789012',
+ 'Arn': 'arn:aws-cn:iam::123456789012:user/ExampleUser'}]
+ # Run module
+ return_value = utils_iam.get_aws_account_info(self.module)
+ # Check we only saw the calls we mocked out
+ self.module.client.assert_called_once()
+ self.sts_client.get_caller_identity.assert_called_once()
+ # Check we got the values back we expected.
+ self.assertEqual(return_value, ('123456789012', 'aws-cn',))
+
+ # (US-Gov partition)
+ def test_get_aws_account_info__caller_success_gov(self):
+ # Prepare
+ self.sts_client.get_caller_identity.side_effect = [{'UserId': 'AIDA1234567890ABCDEFG',
+ 'Account': '123456789012',
+ 'Arn': 'arn:aws-us-gov:iam::123456789012:user/ExampleUser'}]
+ # Run module
+ return_value = utils_iam.get_aws_account_info(self.module)
+ # Check we only saw the calls we mocked out
+ self.module.client.assert_called_once()
+ self.sts_client.get_caller_identity.assert_called_once()
+ # Check we got the values back we expected.
+ self.assertEqual(return_value, ('123456789012', 'aws-us-gov',))
+
+ # If sts:get_caller_identity fails (most likely something wierd on the
+ # client side), then try a few extra options.
+ # Test response if STS fails and we need to fall back to GetUser
+ def test_get_aws_account_info__user_success(self):
+ # Prepare
+ self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()]
+ self.iam_client.get_user.side_effect = [{"User": {"Path": "/", "UserName": "ExampleUser", "UserId": "AIDA1234567890ABCDEFG",
+ "Arn": "arn:aws:iam::123456789012:user/ExampleUser", "CreateDate": "2020-09-08T14:04:32Z"}}]
+ # Run module
+ return_value = utils_iam.get_aws_account_info(self.module)
+ # Check we only saw the calls we mocked out
+ self.assertEqual(self.module.client.call_count, 2)
+ self.sts_client.get_caller_identity.assert_called_once()
+ self.iam_client.get_user.assert_called_once()
+ # Check we got the values back we expected.
+ self.assertEqual(return_value, ('123456789012', 'aws',))
+
+ # (China partition)
+ def test_get_aws_account_info__user_success_cn(self):
+ # Prepare
+ self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()]
+ self.iam_client.get_user.side_effect = [{"User": {"Path": "/", "UserName": "ExampleUser", "UserId": "AIDA1234567890ABCDEFG",
+ "Arn": "arn:aws-cn:iam::123456789012:user/ExampleUser", "CreateDate": "2020-09-08T14:04:32Z"}}]
+ # Run module
+ return_value = utils_iam.get_aws_account_info(self.module)
+ # Check we only saw the calls we mocked out
+ self.assertEqual(self.module.client.call_count, 2)
+ self.sts_client.get_caller_identity.assert_called_once()
+ self.iam_client.get_user.assert_called_once()
+ # Check we got the values back we expected.
+ self.assertEqual(return_value, ('123456789012', 'aws-cn',))
+
+ # (US-Gov partition)
+ def test_get_aws_account_info__user_success_gov(self):
+ # Prepare
+ self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()]
+ self.iam_client.get_user.side_effect = [{"User": {"Path": "/", "UserName": "ExampleUser", "UserId": "AIDA1234567890ABCDEFG",
+ "Arn": "arn:aws-us-gov:iam::123456789012:user/ExampleUser", "CreateDate": "2020-09-08T14:04:32Z"}}]
+ # Run module
+ return_value = utils_iam.get_aws_account_info(self.module)
+ # Check we only saw the calls we mocked out
+ self.assertEqual(self.module.client.call_count, 2)
+ self.sts_client.get_caller_identity.assert_called_once()
+ self.iam_client.get_user.assert_called_once()
+ # Check we got the values back we expected.
+ self.assertEqual(return_value, ('123456789012', 'aws-us-gov',))
+
+ # Test response if STS and IAM fails and we need to fall back to the denial message
+ def test_get_aws_account_info__user_denied(self):
+ # Prepare
+ self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()]
+ self.iam_client.get_user.side_effect = [self._make_denied_exception('aws')]
+ # Run module
+ return_value = utils_iam.get_aws_account_info(self.module)
+ # Check we only saw the calls we mocked out
+ self.assertEqual(self.module.client.call_count, 2)
+ self.sts_client.get_caller_identity.assert_called_once()
+ self.iam_client.get_user.assert_called_once()
+ # Check we got the values back we expected.
+ self.assertEqual(return_value, ('123456789012', 'aws',))
+
+ # (China partition)
+ def test_get_aws_account_info__user_denied_cn(self):
+ # Prepare
+ self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()]
+ self.iam_client.get_user.side_effect = [self._make_denied_exception('aws-cn')]
+ # Run module
+ return_value = utils_iam.get_aws_account_info(self.module)
+ # Check we only saw the calls we mocked out
+ self.assertEqual(self.module.client.call_count, 2)
+ self.sts_client.get_caller_identity.assert_called_once()
+ self.iam_client.get_user.assert_called_once()
+ # Check we got the values back we expected.
+ self.assertEqual(return_value, ('123456789012', 'aws-cn',))
+
+ # (US-Gov partition)
+ def test_get_aws_account_info__user_denied_gov(self):
+ # Prepare
+ self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()]
+ self.iam_client.get_user.side_effect = [self._make_denied_exception('aws-us-gov')]
+ # Run module
+ return_value = utils_iam.get_aws_account_info(self.module)
+ # Check we only saw the calls we mocked out
+ self.assertEqual(self.module.client.call_count, 2)
+ self.sts_client.get_caller_identity.assert_called_once()
+ self.iam_client.get_user.assert_called_once()
+ # Check we got the values back we expected.
+ self.assertEqual(return_value, ('123456789012', 'aws-us-gov',))
+
+ # Test that we fail gracefully if Boto throws exceptions at us...
+ def test_get_aws_account_info__boto_failures(self):
+ # Prepare
+ self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()]
+ self.iam_client.get_user.side_effect = [self._make_botocore_exception()]
+ # Run module
+ with pytest.raises(SystemExit) as e:
+ utils_iam.get_aws_account_info(self.module)
+ # Check we only saw the calls we mocked out
+ self.assertEqual(self.module.client.call_count, 2)
+ self.sts_client.get_caller_identity.assert_called_once()
+ self.iam_client.get_user.assert_called_once()
+ # Check we got the values back we expected.
+ assert e.type == SystemExit
+ assert e.value.code == 1 # 1 == fail_json_aws
+
+ def test_get_aws_account_info__client_failures(self):
+ # Prepare
+ self.sts_client.get_caller_identity.side_effect = [self._make_unexpected_exception()]
+ self.iam_client.get_user.side_effect = [self._make_unexpected_exception()]
+ # Run module
+ with pytest.raises(SystemExit) as e:
+ utils_iam.get_aws_account_info(self.module)
+ # Check we only saw the calls we mocked out
+ self.assertEqual(self.module.client.call_count, 2)
+ self.sts_client.get_caller_identity.assert_called_once()
+ self.iam_client.get_user.assert_called_once()
+ # Check we got the values back we expected.
+ assert e.type == SystemExit
+ assert e.value.code == 1 # 1 == fail_json_aws
+
+ def test_get_aws_account_info__encoded_failures(self):
+ # Prepare
+ self.sts_client.get_caller_identity.side_effect = [self._make_encoded_exception()]
+ self.iam_client.get_user.side_effect = [self._make_encoded_exception()]
+ # Run module
+ with pytest.raises(SystemExit) as e:
+ utils_iam.get_aws_account_info(self.module)
+ # Check we only saw the calls we mocked out
+ self.assertEqual(self.module.client.call_count, 2)
+ self.sts_client.get_caller_identity.assert_called_once()
+ self.iam_client.get_user.assert_called_once()
+ # Check we got the values back we expected.
+ assert e.type == SystemExit
+ assert e.value.code == 1 # 1 == fail_json (we couldn't parse the AccessDenied errors)
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/inventory/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/inventory/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/inventory/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/inventory/test_aws_ec2.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/inventory/test_aws_ec2.py
new file mode 100644
index 00000000..12ef2f6c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/inventory/test_aws_ec2.py
@@ -0,0 +1,189 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2017 Sloane Hertel <shertel@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+import datetime
+
+# Just to test that we have the prerequisite for InventoryModule and instance_data_filter_to_boto_attr
+boto3 = pytest.importorskip('boto3')
+botocore = pytest.importorskip('botocore')
+
+from ansible.errors import AnsibleError
+from ansible_collections.amazon.aws.plugins.inventory.aws_ec2 import InventoryModule, instance_data_filter_to_boto_attr
+
+instances = {
+ u'Instances': [
+ {u'Monitoring': {u'State': 'disabled'},
+ u'PublicDnsName': 'ec2-12-345-67-890.compute-1.amazonaws.com',
+ u'State': {u'Code': 16, u'Name': 'running'},
+ u'EbsOptimized': False,
+ u'LaunchTime': datetime.datetime(2017, 10, 31, 12, 59, 25),
+ u'PublicIpAddress': '12.345.67.890',
+ u'PrivateIpAddress': '098.76.54.321',
+ u'ProductCodes': [],
+ u'VpcId': 'vpc-12345678',
+ u'StateTransitionReason': '',
+ u'InstanceId': 'i-00000000000000000',
+ u'EnaSupport': True,
+ u'ImageId': 'ami-12345678',
+ u'PrivateDnsName': 'ip-098-76-54-321.ec2.internal',
+ u'KeyName': 'testkey',
+ u'SecurityGroups': [{u'GroupName': 'default', u'GroupId': 'sg-12345678'}],
+ u'ClientToken': '',
+ u'SubnetId': 'subnet-12345678',
+ u'InstanceType': 't2.micro',
+ u'NetworkInterfaces': [
+ {u'Status': 'in-use',
+ u'MacAddress': '12:a0:50:42:3d:a4',
+ u'SourceDestCheck': True,
+ u'VpcId': 'vpc-12345678',
+ u'Description': '',
+ u'NetworkInterfaceId': 'eni-12345678',
+ u'PrivateIpAddresses': [
+ {u'PrivateDnsName': 'ip-098-76-54-321.ec2.internal',
+ u'PrivateIpAddress': '098.76.54.321',
+ u'Primary': True,
+ u'Association':
+ {u'PublicIp': '12.345.67.890',
+ u'PublicDnsName': 'ec2-12-345-67-890.compute-1.amazonaws.com',
+ u'IpOwnerId': 'amazon'}}],
+ u'PrivateDnsName': 'ip-098-76-54-321.ec2.internal',
+ u'Attachment':
+ {u'Status': 'attached',
+ u'DeviceIndex': 0,
+ u'DeleteOnTermination': True,
+ u'AttachmentId': 'eni-attach-12345678',
+ u'AttachTime': datetime.datetime(2017, 10, 31, 12, 59, 25)},
+ u'Groups': [
+ {u'GroupName': 'default',
+ u'GroupId': 'sg-12345678'}],
+ u'Ipv6Addresses': [],
+ u'OwnerId': '123456789000',
+ u'PrivateIpAddress': '098.76.54.321',
+ u'SubnetId': 'subnet-12345678',
+ u'Association':
+ {u'PublicIp': '12.345.67.890',
+ u'PublicDnsName': 'ec2-12-345-67-890.compute-1.amazonaws.com',
+ u'IpOwnerId': 'amazon'}}],
+ u'SourceDestCheck': True,
+ u'Placement':
+ {u'Tenancy': 'default',
+ u'GroupName': '',
+ u'AvailabilityZone': 'us-east-1c'},
+ u'Hypervisor': 'xen',
+ u'BlockDeviceMappings': [
+ {u'DeviceName': '/dev/xvda',
+ u'Ebs':
+ {u'Status': 'attached',
+ u'DeleteOnTermination': True,
+ u'VolumeId': 'vol-01234567890000000',
+ u'AttachTime': datetime.datetime(2017, 10, 31, 12, 59, 26)}}],
+ u'Architecture': 'x86_64',
+ u'RootDeviceType': 'ebs',
+ u'RootDeviceName': '/dev/xvda',
+ u'VirtualizationType': 'hvm',
+ u'Tags': [{u'Value': 'test', u'Key': 'ansible'}, {u'Value': 'aws_ec2', u'Key': 'Name'}],
+ u'AmiLaunchIndex': 0}],
+ u'ReservationId': 'r-01234567890000000',
+ u'Groups': [],
+ u'OwnerId': '123456789000'
+}
+
+
+@pytest.fixture(scope="module")
+def inventory():
+ return InventoryModule()
+
+
+def test_compile_values(inventory):
+ found_value = instances['Instances'][0]
+ chain_of_keys = instance_data_filter_to_boto_attr['instance.group-id']
+ for attr in chain_of_keys:
+ found_value = inventory._compile_values(found_value, attr)
+ assert found_value == "sg-12345678"
+
+
+def test_get_boto_attr_chain(inventory):
+ instance = instances['Instances'][0]
+ assert inventory._get_boto_attr_chain('network-interface.addresses.private-ip-address', instance) == "098.76.54.321"
+
+
+def test_boto3_conn(inventory):
+ inventory._options = {"aws_profile": "first_precedence",
+ "aws_access_key": "test_access_key",
+ "aws_secret_key": "test_secret_key",
+ "aws_security_token": "test_security_token",
+ "iam_role_arn": None}
+ inventory._set_credentials()
+ with pytest.raises(AnsibleError) as error_message:
+ for connection, region in inventory._boto3_conn(regions=['us-east-1']):
+ assert "Insufficient credentials found" in error_message
+
+
+def test_get_hostname_default(inventory):
+ instance = instances['Instances'][0]
+ assert inventory._get_hostname(instance, hostnames=None) == "ec2-12-345-67-890.compute-1.amazonaws.com"
+
+
+def test_get_hostname(inventory):
+ hostnames = ['ip-address', 'dns-name']
+ instance = instances['Instances'][0]
+ assert inventory._get_hostname(instance, hostnames) == "12.345.67.890"
+
+
+def test_get_hostname_dict(inventory):
+ hostnames = [{'name': 'private-ip-address', 'separator': '_', 'prefix': 'tag:Name'}]
+ instance = instances['Instances'][0]
+ assert inventory._get_hostname(instance, hostnames) == "aws_ec2_098.76.54.321"
+
+
+def test_set_credentials(inventory):
+ inventory._options = {'aws_access_key': 'test_access_key',
+ 'aws_secret_key': 'test_secret_key',
+ 'aws_security_token': 'test_security_token',
+ 'aws_profile': 'test_profile',
+ 'iam_role_arn': 'arn:aws:iam::112233445566:role/test-role'}
+ inventory._set_credentials()
+
+ assert inventory.boto_profile == "test_profile"
+ assert inventory.aws_access_key_id == "test_access_key"
+ assert inventory.aws_secret_access_key == "test_secret_key"
+ assert inventory.aws_security_token == "test_security_token"
+ assert inventory.iam_role_arn == "arn:aws:iam::112233445566:role/test-role"
+
+
+def test_insufficient_credentials(inventory):
+ inventory._options = {
+ 'aws_access_key': None,
+ 'aws_secret_key': None,
+ 'aws_security_token': None,
+ 'aws_profile': None,
+ 'iam_role_arn': None
+ }
+ with pytest.raises(AnsibleError) as error_message:
+ inventory._set_credentials()
+ assert "Insufficient credentials found" in error_message
+
+
+def test_verify_file_bad_config(inventory):
+ assert inventory.verify_file('not_aws_config.yml') is False
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/lookup/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/lookup/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/lookup/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/lookup/fixtures/avi.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/lookup/fixtures/avi.json
new file mode 100644
index 00000000..ae89ca68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/lookup/fixtures/avi.json
@@ -0,0 +1,104 @@
+{
+ "mock_single_obj": {
+ "_last_modified": "",
+ "cloud_ref": "https://192.0.2.132/api/cloud/cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "dhcp_enabled": true,
+ "exclude_discovered_subnets": false,
+ "name": "PG-123",
+ "synced_from_se": true,
+ "tenant_ref": "https://192.0.2.132/api/tenant/admin",
+ "url": "https://192.0.2.132/api/network/dvportgroup-2084-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "uuid": "dvportgroup-2084-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "vcenter_dvs": true,
+ "vimgrnw_ref": "https://192.0.2.132/api/vimgrnwruntime/dvportgroup-2084-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "vrf_context_ref": "https://192.0.2.132/api/vrfcontext/vrfcontext-31f1b55f-319c-44eb-862f-69d79ffdf295"
+ },
+ "mock_multiple_obj": {
+ "results": [
+ {
+ "_last_modified": "",
+ "cloud_ref": "https://192.0.2.132/api/cloud/cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "dhcp_enabled": true,
+ "exclude_discovered_subnets": false,
+ "name": "J-PG-0682",
+ "synced_from_se": true,
+ "tenant_ref": "https://192.0.2.132/api/tenant/admin",
+ "url": "https://192.0.2.132/api/network/dvportgroup-2084-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "uuid": "dvportgroup-2084-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "vcenter_dvs": true,
+ "vimgrnw_ref": "https://192.0.2.132/api/vimgrnwruntime/dvportgroup-2084-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "vrf_context_ref": "https://192.0.2.132/api/vrfcontext/vrfcontext-31f1b55f-319c-44eb-862f-69d79ffdf295"
+ },
+ {
+ "_last_modified": "",
+ "cloud_ref": "https://192.0.2.132/api/cloud/cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "dhcp_enabled": true,
+ "exclude_discovered_subnets": false,
+ "name": "J-PG-0231",
+ "synced_from_se": true,
+ "tenant_ref": "https://192.0.2.132/api/tenant/admin",
+ "url": "https://192.0.2.132/api/network/dvportgroup-1627-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "uuid": "dvportgroup-1627-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "vcenter_dvs": true,
+ "vimgrnw_ref": "https://192.0.2.132/api/vimgrnwruntime/dvportgroup-1627-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "vrf_context_ref": "https://192.0.2.132/api/vrfcontext/vrfcontext-31f1b55f-319c-44eb-862f-69d79ffdf295"
+ },
+ {
+ "_last_modified": "",
+ "cloud_ref": "https://192.0.2.132/api/cloud/cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "dhcp_enabled": true,
+ "exclude_discovered_subnets": false,
+ "name": "J-PG-0535",
+ "synced_from_se": true,
+ "tenant_ref": "https://192.0.2.132/api/tenant/admin",
+ "url": "https://192.0.2.132/api/network/dvportgroup-1934-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "uuid": "dvportgroup-1934-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "vcenter_dvs": true,
+ "vimgrnw_ref": "https://192.0.2.132/api/vimgrnwruntime/dvportgroup-1934-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "vrf_context_ref": "https://192.0.2.132/api/vrfcontext/vrfcontext-31f1b55f-319c-44eb-862f-69d79ffdf295"
+ },
+ {
+ "_last_modified": "",
+ "cloud_ref": "https://192.0.2.132/api/cloud/cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "dhcp_enabled": true,
+ "exclude_discovered_subnets": false,
+ "name": "J-PG-0094",
+ "synced_from_se": true,
+ "tenant_ref": "https://192.0.2.132/api/tenant/admin",
+ "url": "https://192.0.2.132/api/network/dvportgroup-1458-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "uuid": "dvportgroup-1458-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "vcenter_dvs": true,
+ "vimgrnw_ref": "https://192.0.2.132/api/vimgrnwruntime/dvportgroup-1458-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "vrf_context_ref": "https://192.0.2.132/api/vrfcontext/vrfcontext-31f1b55f-319c-44eb-862f-69d79ffdf295"
+ },
+ {
+ "_last_modified": "",
+ "cloud_ref": "https://192.0.2.132/api/cloud/cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "dhcp_enabled": true,
+ "exclude_discovered_subnets": false,
+ "name": "J-PG-0437",
+ "synced_from_se": true,
+ "tenant_ref": "https://192.0.2.132/api/tenant/admin",
+ "url": "https://192.0.2.132/api/network/dvportgroup-1836-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "uuid": "dvportgroup-1836-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "vcenter_dvs": true,
+ "vimgrnw_ref": "https://192.0.2.132/api/vimgrnwruntime/dvportgroup-1836-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "vrf_context_ref": "https://192.0.2.132/api/vrfcontext/vrfcontext-31f1b55f-319c-44eb-862f-69d79ffdf295"
+ },
+ {
+ "_last_modified": "",
+ "cloud_ref": "https://192.0.2.132/api/cloud/cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "dhcp_enabled": true,
+ "exclude_discovered_subnets": false,
+ "name": "J-PG-0673",
+ "synced_from_se": true,
+ "tenant_ref": "https://192.0.2.132/api/tenant/admin",
+ "url": "https://192.0.2.132/api/network/dvportgroup-2075-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "uuid": "dvportgroup-2075-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "vcenter_dvs": true,
+ "vimgrnw_ref": "https://192.0.2.132/api/vimgrnwruntime/dvportgroup-2075-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
+ "vrf_context_ref": "https://192.0.2.132/api/vrfcontext/vrfcontext-31f1b55f-319c-44eb-862f-69d79ffdf295"
+ }
+ ]
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/lookup/test_aws_secret.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/lookup/test_aws_secret.py
new file mode 100644
index 00000000..3ac92824
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/lookup/test_aws_secret.py
@@ -0,0 +1,218 @@
+# (c) 2019 Robert Williams
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+import datetime
+from copy import copy
+
+from ansible.errors import AnsibleError
+from ansible.plugins.loader import lookup_loader
+
+from ansible_collections.amazon.aws.plugins.lookup import aws_secret
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+except ImportError:
+ pytestmark = pytest.mark.skip("This test requires the boto3 and botocore Python libraries")
+
+
+@pytest.fixture
+def dummy_credentials():
+ dummy_credentials = {}
+ dummy_credentials['boto_profile'] = None
+ dummy_credentials['aws_secret_key'] = "notasecret"
+ dummy_credentials['aws_access_key'] = "notakey"
+ dummy_credentials['aws_security_token'] = None
+ dummy_credentials['region'] = 'eu-west-1'
+ return dummy_credentials
+
+
+simple_variable_success_response = {
+ 'Name': 'secret',
+ 'VersionId': 'cafe8168-e6ce-4e59-8830-5b143faf6c52',
+ 'SecretString': '{"secret":"simplesecret"}',
+ 'VersionStages': ['AWSCURRENT'],
+ 'ResponseMetadata': {
+ 'RequestId': '21099462-597c-490a-800f-8b7a41e5151c',
+ 'HTTPStatusCode': 200,
+ 'HTTPHeaders': {
+ 'date': 'Thu, 04 Apr 2019 10:43:12 GMT',
+ 'content-type': 'application/x-amz-json-1.1',
+ 'content-length': '252',
+ 'connection': 'keep-alive',
+ 'x-amzn-requestid': '21099462-597c-490a-800f-8b7a41e5151c'
+ },
+ 'RetryAttempts': 0
+ }
+}
+
+
+def test_lookup_variable(mocker, dummy_credentials):
+ dateutil_tz = pytest.importorskip("dateutil.tz")
+ lookup = lookup_loader.get('amazon.aws.aws_secret')
+ boto3_double = mocker.MagicMock()
+ boto3_double.Session.return_value.client.return_value.get_secret_value.return_value = copy(
+ simple_variable_success_response)
+ boto3_client_double = boto3_double.Session.return_value.client
+
+ mocker.patch.object(boto3, 'session', boto3_double)
+ retval = lookup.run(["simple_variable"], None, **dummy_credentials)
+ assert (retval[0] == '{"secret":"simplesecret"}')
+ boto3_client_double.assert_called_with('secretsmanager', 'eu-west-1', aws_access_key_id='notakey',
+ aws_secret_access_key="notasecret", aws_session_token=None)
+
+
+error_response_missing = {'Error': {'Code': 'ResourceNotFoundException', 'Message': 'Fake Not Found Error'}}
+error_response_denied = {'Error': {'Code': 'AccessDeniedException', 'Message': 'Fake Denied Error'}}
+operation_name = 'FakeOperation'
+
+
+def test_on_missing_option(mocker, dummy_credentials):
+ boto3_double = mocker.MagicMock()
+ boto3_double.Session.return_value.client.return_value.get_secret_value.side_effect = ClientError(error_response_missing, operation_name)
+
+ with pytest.raises(AnsibleError, match="ResourceNotFound"):
+ mocker.patch.object(boto3, 'session', boto3_double)
+ lookup_loader.get('amazon.aws.aws_secret').run(["missing_secret"], None, **dummy_credentials)
+
+ mocker.patch.object(boto3, 'session', boto3_double)
+ args = copy(dummy_credentials)
+ args["on_missing"] = 'skip'
+ retval = lookup_loader.get('amazon.aws.aws_secret').run(["missing_secret"], None, **args)
+ assert(retval == [])
+
+ mocker.patch.object(boto3, 'session', boto3_double)
+ args = copy(dummy_credentials)
+ args["on_missing"] = 'warn'
+ retval = lookup_loader.get('amazon.aws.aws_secret').run(["missing_secret"], None, **args)
+ assert(retval == [])
+
+
+def test_on_denied_option(mocker, dummy_credentials):
+ boto3_double = mocker.MagicMock()
+ boto3_double.Session.return_value.client.return_value.get_secret_value.side_effect = ClientError(error_response_denied, operation_name)
+
+ with pytest.raises(AnsibleError, match="AccessDenied"):
+ mocker.patch.object(boto3, 'session', boto3_double)
+ lookup_loader.get('amazon.aws.aws_secret').run(["denied_secret"], None, **dummy_credentials)
+
+ mocker.patch.object(boto3, 'session', boto3_double)
+ args = copy(dummy_credentials)
+ args["on_denied"] = 'skip'
+ retval = lookup_loader.get('amazon.aws.aws_secret').run(["denied_secret"], None, **args)
+ assert(retval == [])
+
+ mocker.patch.object(boto3, 'session', boto3_double)
+ args = copy(dummy_credentials)
+ args["on_denied"] = 'warn'
+ retval = lookup_loader.get('amazon.aws.aws_secret').run(["denied_secret"], None, **args)
+ assert(retval == [])
+
+
+def test_nested_lookup_variable(mocker, dummy_credentials):
+ dateutil_tz = pytest.importorskip("dateutil.tz")
+ simple_variable_success_response = {
+ 'Name': 'simple_variable',
+ 'VersionId': 'cafe8168-e6ce-4e59-8830-5b143faf6c52',
+ 'SecretString': '{"key1": {"key2": {"key3": 1 } } }',
+ 'VersionStages': ['AWSCURRENT'],
+ 'CreatedDate': datetime.datetime(2019, 4, 4, 11, 41, 0, 878000, tzinfo=dateutil_tz.tzlocal()),
+ 'ResponseMetadata': {
+ 'RequestId': '21099462-597c-490a-800f-8b7a41e5151c',
+ 'HTTPStatusCode': 200,
+ 'HTTPHeaders': {
+ 'date': 'Thu, 04 Apr 2019 10:43:12 GMT',
+ 'content-type': 'application/x-amz-json-1.1',
+ 'content-length': '252',
+ 'connection': 'keep-alive',
+ 'x-amzn-requestid': '21099462-597c-490a-800f-8b7a41e5151c'
+ },
+ 'RetryAttempts': 0
+ }
+ }
+ lookup = lookup_loader.get('amazon.aws.aws_secret')
+ boto3_double = mocker.MagicMock()
+ boto3_double.Session.return_value.client.return_value.get_secret_value.return_value = simple_variable_success_response
+ boto3_client_double = boto3_double.Session.return_value.client
+
+ mocker.patch.object(boto3, 'session', boto3_double)
+ dummy_credentials["nested"] = 'true'
+ retval = lookup.run(["simple_variable.key1.key2.key3"], None, **dummy_credentials)
+ assert(retval[0] == '1')
+ boto3_client_double.assert_called_with('secretsmanager', 'eu-west-1', aws_access_key_id='notakey',
+ aws_secret_access_key="notasecret", aws_session_token=None)
+
+
+def test_path_lookup_variable(mocker, dummy_credentials):
+ lookup = aws_secret.LookupModule()
+ lookup._load_name = "aws_secret"
+
+ path_list_secrets_success_response = {
+ 'SecretList': [
+ {
+ 'Name': '/testpath/too',
+ },
+ {
+ 'Name': '/testpath/won',
+ }
+ ],
+ 'ResponseMetadata': {
+ 'RequestId': '21099462-597c-490a-800f-8b7a41e5151c',
+ 'HTTPStatusCode': 200,
+ 'HTTPHeaders': {
+ 'date': 'Thu, 04 Apr 2019 10:43:12 GMT',
+ 'content-type': 'application/x-amz-json-1.1',
+ 'content-length': '252',
+ 'connection': 'keep-alive',
+ 'x-amzn-requestid': '21099462-597c-490a-800f-8b7a41e5151c'
+ },
+ 'RetryAttempts': 0
+ }
+ }
+
+ boto3_double = mocker.MagicMock()
+ list_secrets_fn = boto3_double.Session.return_value.client.return_value.list_secrets
+ list_secrets_fn.return_value = path_list_secrets_success_response
+
+ get_secret_value_fn = boto3_double.Session.return_value.client.return_value.get_secret_value
+ first_path = copy(simple_variable_success_response)
+ first_path['SecretString'] = 'simple_value_too'
+ second_path = copy(simple_variable_success_response)
+ second_path['SecretString'] = 'simple_value_won'
+ get_secret_value_fn.side_effect = [
+ first_path,
+ second_path
+ ]
+
+ boto3_client_double = boto3_double.Session.return_value.client
+
+ mocker.patch.object(boto3, 'session', boto3_double)
+ dummy_credentials["bypath"] = 'true'
+ dummy_credentials["boto_profile"] = 'test'
+ dummy_credentials["aws_profile"] = 'test'
+ retval = lookup.run(["/testpath"], {}, **dummy_credentials)
+ assert (retval[0]["/testpath/won"] == "simple_value_won")
+ assert (retval[0]["/testpath/too"] == "simple_value_too")
+ boto3_client_double.assert_called_with('secretsmanager', 'eu-west-1', aws_access_key_id='notakey',
+ aws_secret_access_key="notasecret", aws_session_token=None)
+ list_secrets_fn.assert_called_with(Filters=[{'Key': 'name', 'Values': ['/testpath']}])
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/lookup/test_aws_ssm.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/lookup/test_aws_ssm.py
new file mode 100644
index 00000000..c94850b9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/lookup/test_aws_ssm.py
@@ -0,0 +1,166 @@
+#
+# (c) 2017 Michael De La Rue
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+from copy import copy
+
+from ansible.errors import AnsibleError
+
+from ansible_collections.amazon.aws.plugins.lookup import aws_ssm
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+except ImportError:
+ pytestmark = pytest.mark.skip("This test requires the boto3 and botocore Python libraries")
+
+simple_variable_success_response = {
+ 'Parameters': [
+ {
+ 'Name': 'simple_variable',
+ 'Type': 'String',
+ 'Value': 'simplevalue',
+ 'Version': 1
+ }
+ ],
+ 'InvalidParameters': [],
+ 'ResponseMetadata': {
+ 'RequestId': '12121212-3434-5656-7878-9a9a9a9a9a9a',
+ 'HTTPStatusCode': 200,
+ 'HTTPHeaders': {
+ 'x-amzn-requestid': '12121212-3434-5656-7878-9a9a9a9a9a9a',
+ 'content-type': 'application/x-amz-json-1.1',
+ 'content-length': '116',
+ 'date': 'Tue, 23 Jan 2018 11:04:27 GMT'
+ },
+ 'RetryAttempts': 0
+ }
+}
+
+path_success_response = copy(simple_variable_success_response)
+path_success_response['Parameters'] = [
+ {'Name': '/testpath/too', 'Type': 'String', 'Value': 'simple_value_too', 'Version': 1},
+ {'Name': '/testpath/won', 'Type': 'String', 'Value': 'simple_value_won', 'Version': 1}
+]
+
+missing_variable_response = copy(simple_variable_success_response)
+missing_variable_response['Parameters'] = []
+missing_variable_response['InvalidParameters'] = ['missing_variable']
+
+some_missing_variable_response = copy(simple_variable_success_response)
+some_missing_variable_response['Parameters'] = [
+ {'Name': 'simple', 'Type': 'String', 'Value': 'simple_value', 'Version': 1},
+ {'Name': '/testpath/won', 'Type': 'String', 'Value': 'simple_value_won', 'Version': 1}
+]
+some_missing_variable_response['InvalidParameters'] = ['missing_variable']
+
+
+dummy_credentials = {}
+dummy_credentials['boto_profile'] = None
+dummy_credentials['aws_secret_key'] = "notasecret"
+dummy_credentials['aws_access_key'] = "notakey"
+dummy_credentials['aws_security_token'] = None
+dummy_credentials['region'] = 'eu-west-1'
+
+
+def test_lookup_variable(mocker):
+ lookup = aws_ssm.LookupModule()
+ lookup._load_name = "aws_ssm"
+
+ boto3_double = mocker.MagicMock()
+ boto3_double.Session.return_value.client.return_value.get_parameters.return_value = simple_variable_success_response
+ boto3_client_double = boto3_double.Session.return_value.client
+
+ mocker.patch.object(boto3, 'session', boto3_double)
+ retval = lookup.run(["simple_variable"], {}, **dummy_credentials)
+ assert(retval[0] == "simplevalue")
+ boto3_client_double.assert_called_with('ssm', 'eu-west-1', aws_access_key_id='notakey',
+ aws_secret_access_key="notasecret", aws_session_token=None)
+
+
+def test_path_lookup_variable(mocker):
+ lookup = aws_ssm.LookupModule()
+ lookup._load_name = "aws_ssm"
+
+ boto3_double = mocker.MagicMock()
+ get_path_fn = boto3_double.Session.return_value.client.return_value.get_parameters_by_path
+ get_path_fn.return_value = path_success_response
+ boto3_client_double = boto3_double.Session.return_value.client
+
+ mocker.patch.object(boto3, 'session', boto3_double)
+ args = copy(dummy_credentials)
+ args["bypath"] = 'true'
+ retval = lookup.run(["/testpath"], {}, **args)
+ assert(retval[0]["/testpath/won"] == "simple_value_won")
+ assert(retval[0]["/testpath/too"] == "simple_value_too")
+ boto3_client_double.assert_called_with('ssm', 'eu-west-1', aws_access_key_id='notakey',
+ aws_secret_access_key="notasecret", aws_session_token=None)
+ get_path_fn.assert_called_with(Path="/testpath", Recursive=False, WithDecryption=True)
+
+
+def test_return_none_for_missing_variable(mocker):
+ """
+ during jinja2 templates, we can't shouldn't normally raise exceptions since this blocks the ability to use defaults.
+
+ for this reason we return ```None``` for missing variables
+ """
+ lookup = aws_ssm.LookupModule()
+ lookup._load_name = "aws_ssm"
+
+ boto3_double = mocker.MagicMock()
+ boto3_double.Session.return_value.client.return_value.get_parameters.return_value = missing_variable_response
+
+ mocker.patch.object(boto3, 'session', boto3_double)
+ retval = lookup.run(["missing_variable"], {}, **dummy_credentials)
+ assert(retval[0] is None)
+
+
+def test_match_retvals_to_call_params_even_with_some_missing_variables(mocker):
+ """
+ If we get a complex list of variables with some missing and some not, we still have to return a
+ list which matches with the original variable list.
+ """
+ lookup = aws_ssm.LookupModule()
+ lookup._load_name = "aws_ssm"
+
+ boto3_double = mocker.MagicMock()
+ boto3_double.Session.return_value.client.return_value.get_parameters.return_value = some_missing_variable_response
+
+ mocker.patch.object(boto3, 'session', boto3_double)
+ retval = lookup.run(["simple", "missing_variable", "/testpath/won", "simple"], {}, **dummy_credentials)
+ assert(retval == ["simple_value", None, "simple_value_won", "simple_value"])
+
+
+error_response = {'Error': {'Code': 'ResourceNotFoundException', 'Message': 'Fake Testing Error'}}
+operation_name = 'FakeOperation'
+
+
+def test_warn_denied_variable(mocker):
+ lookup = aws_ssm.LookupModule()
+ lookup._load_name = "aws_ssm"
+
+ boto3_double = mocker.MagicMock()
+ boto3_double.Session.return_value.client.return_value.get_parameters.side_effect = ClientError(error_response, operation_name)
+
+ with pytest.raises(AnsibleError):
+ mocker.patch.object(boto3, 'session', boto3_double)
+ lookup.run(["denied_variable"], {}, **dummy_credentials)
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/conftest.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/conftest.py
new file mode 100644
index 00000000..a7d1e047
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/conftest.py
@@ -0,0 +1,31 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+
+@pytest.fixture
+def patch_ansible_module(request, mocker):
+ if isinstance(request.param, string_types):
+ args = request.param
+ elif isinstance(request.param, MutableMapping):
+ if 'ANSIBLE_MODULE_ARGS' not in request.param:
+ request.param = {'ANSIBLE_MODULE_ARGS': request.param}
+ if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp'
+ if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False
+ args = json.dumps(request.param)
+ else:
+ raise Exception('Malformed data to the patch_ansible_module pytest fixture')
+
+ mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args))
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/a.pem b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/a.pem
new file mode 100644
index 00000000..4412f325
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/a.pem
@@ -0,0 +1,31 @@
+-----BEGIN CERTIFICATE-----
+MIIFVTCCBD2gAwIBAgISAx4pnfwvGxYrrQhr/UXiN7HCMA0GCSqGSIb3DQEBCwUA
+MEoxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MSMwIQYDVQQD
+ExpMZXQncyBFbmNyeXB0IEF1dGhvcml0eSBYMzAeFw0xOTA3MjUwMDI4NTdaFw0x
+OTEwMjMwMDI4NTdaMBoxGDAWBgNVBAMTD2NyeXB0b2dyYXBoeS5pbzCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBAKJDpCL99DVo83587MrVp6gunmKRoUfY
+vcgk5u2v0tB9OmZkcIY37z6AunHWr18Yj55zHmm6G8Nf35hmu3ql2A26WThCbmOe
+WXbxhgarkningZI9opUWnI2dIllguVIsq99GzhpNnDdCb26s5+SRhJI4cr4hYaKC
+XGDKooKWyXUX09SJTq7nW/1+pq3y9ZMvldRKjJALeAdwnC7kmUB6pK7q8J2VlpfQ
+wqGu6q/WHVdgnhWARw3GEFJWDn9wkxBAF08CpzhVaEj+iK+Ut/1HBgNYwqI47h7S
+q+qv0G2qklRVUtEM0zYRsp+y/6vivdbFLlPw8VaerbpJN3gLtpVNcGECAwEAAaOC
+AmMwggJfMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB
+BQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUjbe0bE1aZ8HiqtwqUfCe15bF
+V8UwHwYDVR0jBBgwFoAUqEpqYwR93brm0Tm3pkVl7/Oo7KEwbwYIKwYBBQUHAQEE
+YzBhMC4GCCsGAQUFBzABhiJodHRwOi8vb2NzcC5pbnQteDMubGV0c2VuY3J5cHQu
+b3JnMC8GCCsGAQUFBzAChiNodHRwOi8vY2VydC5pbnQteDMubGV0c2VuY3J5cHQu
+b3JnLzAaBgNVHREEEzARgg9jcnlwdG9ncmFwaHkuaW8wTAYDVR0gBEUwQzAIBgZn
+gQwBAgEwNwYLKwYBBAGC3xMBAQEwKDAmBggrBgEFBQcCARYaaHR0cDovL2Nwcy5s
+ZXRzZW5jcnlwdC5vcmcwggEDBgorBgEEAdZ5AgQCBIH0BIHxAO8AdgB0ftqDMa0z
+EJEhnM4lT0Jwwr/9XkIgCMY3NXnmEHvMVgAAAWwmvtnXAAAEAwBHMEUCIFXHYX/E
+xtbYCvjjQ3dN0HOLW1d8+aduktmax4mu3KszAiEAvTpxuSVVXJnVGA4tU2GOnI60
+sqTh/IK6hvrFN1k1HBUAdQApPFGWVMg5ZbqqUPxYB9S3b79Yeily3KTDDPTlRUf0
+eAAAAWwmvtm9AAAEAwBGMEQCIDn7sgzD+7JzR+XTvjKf7VyLWwX37O8uwCfCTKo7
++tEhAiB05bHiICU5wkfRBrwcvqXf4bPF7NT5LVlRQYzJ/hbpvzANBgkqhkiG9w0B
+AQsFAAOCAQEAcMU8E6D+5WC07QSeTppRTboC++7YgQg5NiSWm7OE2FlyiRZXnu0Y
+uBoaqAkZIqj7dom9wy1c1UauxOfM9lUZKhYnDTBu9tIhBAvCS0J0avv1j1KQygQ1
+qV+urJsunUwqV/vPWo1GfWophvyXVN6MAycv34ZXZvAjtG7oDcoQVLLvK1SIo2vu
+4/dNkOQzaeZez8q6Ij9762TbBWaK5C789VMdUWZCADWoToPIK533cWbDEp4IhBU/
+K73d7lGGl7S59SjT2V/XE6eJS9Zlj0M+A8pf/8tjM/ImHAjlOHB02sM/VfZ7HAuZ
+61TPxohL+e+X1FYeqIXYGXJmCEuB8WEmBg==
+-----END CERTIFICATE-----
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/b.pem b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/b.pem
new file mode 100644
index 00000000..2be4bca5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/b.pem
@@ -0,0 +1,47 @@
+-----BEGIN CERTIFICATE-----
+MIIIUjCCB/egAwIBAgIRALiJR3zQjp0MevT/Hk89sfAwCgYIKoZIzj0EAwIwgZIx
+CzAJBgNVBAYTAkdCMRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAOBgNV
+BAcTB1NhbGZvcmQxGjAYBgNVBAoTEUNPTU9ETyBDQSBMaW1pdGVkMTgwNgYDVQQD
+Ey9DT01PRE8gRUNDIERvbWFpbiBWYWxpZGF0aW9uIFNlY3VyZSBTZXJ2ZXIgQ0Eg
+MjAeFw0xOTA3MzEwMDAwMDBaFw0yMDAyMDYyMzU5NTlaMGwxITAfBgNVBAsTGERv
+bWFpbiBDb250cm9sIFZhbGlkYXRlZDEhMB8GA1UECxMYUG9zaXRpdmVTU0wgTXVs
+dGktRG9tYWluMSQwIgYDVQQDExtzc2wzODczMzcuY2xvdWRmbGFyZXNzbC5jb20w
+WTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAARPFdjdnBIJRPnHCPsCBJ/MmPytXnZX
+KV6lD2bbG5EVNuUQln4Na8heCY+sfpV+SPuuiNzZxgDA46GvyzdRYFhxo4IGUTCC
+Bk0wHwYDVR0jBBgwFoAUQAlhZ/C8g3FP3hIILG/U1Ct2PZYwHQYDVR0OBBYEFGLh
+bHk1KAYIRfVwXA3L+yDf0CxjMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8EAjAA
+MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjBPBgNVHSAESDBGMDoGCysG
+AQQBsjEBAgIHMCswKQYIKwYBBQUHAgEWHWh0dHBzOi8vc2VjdXJlLmNvbW9kby5j
+b20vQ1BTMAgGBmeBDAECATBWBgNVHR8ETzBNMEugSaBHhkVodHRwOi8vY3JsLmNv
+bW9kb2NhNC5jb20vQ09NT0RPRUNDRG9tYWluVmFsaWRhdGlvblNlY3VyZVNlcnZl
+ckNBMi5jcmwwgYgGCCsGAQUFBwEBBHwwejBRBggrBgEFBQcwAoZFaHR0cDovL2Ny
+dC5jb21vZG9jYTQuY29tL0NPTU9ET0VDQ0RvbWFpblZhbGlkYXRpb25TZWN1cmVT
+ZXJ2ZXJDQTIuY3J0MCUGCCsGAQUFBzABhhlodHRwOi8vb2NzcC5jb21vZG9jYTQu
+Y29tMIIDkAYDVR0RBIIDhzCCA4OCG3NzbDM4NzMzNy5jbG91ZGZsYXJlc3NsLmNv
+bYIMKi5hanJ0Y3QuY29tghMqLmFrcmVwYnVyY3UuZ2VuLnRyghUqLmFuZHJlYXNr
+YW5lbGxvcy5jb22CDSouYW5zaWJsZS5jb22CGSouYXJ0b2Z0b3VjaC1raW5nd29v
+ZC5jb22CFyouYm91bGRlcnN3YXRlcmhvbGUuY29tghcqLmJyb2Nrc3RlY2hzdXBw
+b3J0LmNvbYIQKi5idXJjbGFyLndlYi50coIcKi5ob3Blc29uZ2ZyZW5jaGJ1bGxk
+b2dzLm5ldIIMKi5odXJyZW0uY29tghAqLmh5dmVsaWNvbnMuY29tghAqLmthcm1h
+Zml0LmNvLnVrghUqLmxvd3J5c3lzdGVtc2luYy5jb22CDioubWFuaWNydW4uY29t
+ghUqLm11dHVvZmluYW5jaWVyYS5jb22CDyoucGlsZ3JpbWFnZS5waIINKi5wa2dh
+bWVzLm9yZ4IbKi5ybHBjb25zdWx0aW5nc2VydmljZXMuY29tghYqLnJ1eWF0YWJp
+cmxlcmkuZ2VuLnRyghQqLnJ5YW5hcHBoeXNpY3NjLmNvbYIVKi53ZWFyaXRiYWNr
+d2FyZHMub3Jngg8qLnlldGlzbmFjay5jb22CCmFqcnRjdC5jb22CEWFrcmVwYnVy
+Y3UuZ2VuLnRyghNhbmRyZWFza2FuZWxsb3MuY29tggthbnNpYmxlLmNvbYIXYXJ0
+b2Z0b3VjaC1raW5nd29vZC5jb22CFWJvdWxkZXJzd2F0ZXJob2xlLmNvbYIVYnJv
+Y2tzdGVjaHN1cHBvcnQuY29tgg5idXJjbGFyLndlYi50coIaaG9wZXNvbmdmcmVu
+Y2hidWxsZG9ncy5uZXSCCmh1cnJlbS5jb22CDmh5dmVsaWNvbnMuY29tgg5rYXJt
+YWZpdC5jby51a4ITbG93cnlzeXN0ZW1zaW5jLmNvbYIMbWFuaWNydW4uY29tghNt
+dXR1b2ZpbmFuY2llcmEuY29tgg1waWxncmltYWdlLnBoggtwa2dhbWVzLm9yZ4IZ
+cmxwY29uc3VsdGluZ3NlcnZpY2VzLmNvbYIUcnV5YXRhYmlybGVyaS5nZW4udHKC
+EnJ5YW5hcHBoeXNpY3NjLmNvbYITd2Vhcml0YmFja3dhcmRzLm9yZ4INeWV0aXNu
+YWNrLmNvbTCCAQQGCisGAQQB1nkCBAIEgfUEgfIA8AB2ALIeBcyLos2KIE6HZvkr
+uYolIGdr2vpw57JJUy3vi5BeAAABbEVw8SgAAAQDAEcwRQIgE2YeTfb/d4BBUwpZ
+ihWXSR+vRyNNUg8GlOak2MFMHv0CIQCLBvtU401m5/Psg9KirQZs321BSxgUKgSQ
+m9M691d3eQB2AF6nc/nfVsDntTZIfdBJ4DJ6kZoMhKESEoQYdZaBcUVYAAABbEVw
+8VgAAAQDAEcwRQIgGYsGfr3/mekjzMS9+ALAjx1ryfIfhXB/+UghTcw4Y8ICIQDS
+K2L18WX3+Oh4TjJhjh5tV1iYyZVYivcwwbr7mtmOqjAKBggqhkjOPQQDAgNJADBG
+AiEAjNt7LF78GV7snky9jwFcBsLH55ndzduvsrkJ7Ne1SgYCIQDsMJsTr9VP6kar
+4Kv8V9zNBmpGrGNuE7A1GixBvzNaHA==
+-----END CERTIFICATE-----
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.0.cert b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.0.cert
new file mode 100644
index 00000000..6997766a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.0.cert
@@ -0,0 +1,121 @@
+subject=/C=AU/ST=Victoria/L=Melbourne/O=Telstra Corporation Limited/OU=Telstra Energy/CN=dev.energy.inside.telstra.com
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3
+-----BEGIN CERTIFICATE-----
+MIIIHTCCBgWgAwIBAgIUCqrrzSfjzaoyB3DOxst2kMxFp/MwDQYJKoZIhvcNAQEL
+BQAwTTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxIzAh
+BgNVBAMTGlF1b1ZhZGlzIEdsb2JhbCBTU0wgSUNBIEczMB4XDTE5MDgyMTIyMjIy
+OFoXDTIxMDgyMTIyMzIwMFowgZsxCzAJBgNVBAYTAkFVMREwDwYDVQQIDAhWaWN0
+b3JpYTESMBAGA1UEBwwJTWVsYm91cm5lMSQwIgYDVQQKDBtUZWxzdHJhIENvcnBv
+cmF0aW9uIExpbWl0ZWQxFzAVBgNVBAsMDlRlbHN0cmEgRW5lcmd5MSYwJAYDVQQD
+DB1kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAMPAPH2y206qios2NMzlCNJv1mrwC1/8tH2HOqJGiYZB
+O7QOBRSvJsV++IozCB8ap99e8B64OOAQPOyykrdXd2axhftmMb1SFMF56eukHSuz
+KhKWRUgHs0UFRU51lDcBcOvphwJ+5SOgqrqKFFFBgJ0ZpcP54JpFwKIdh3ac10x2
+mBaW5ccqdv5X9oEMu1D/yivBmy34tsbLYyfttCjP76iVT7UVYHjHWynnIhsEyMsU
+gdM90NzrTlrvTSi/EcCD1W3+8b0f+G1TI5rhHbKwR0n/mv5QLFm7EABoYPhxS8bX
+B+9tE67yb0RyWbgvUiHySRynQLNMRpRx8Y9bA8uC8n8CAwEAAaOCA6QwggOgMAkG
+A1UdEwQCMAAwHwYDVR0jBBgwFoAUsxKJtalLNbwVAPCA6dh4h/ETfHYwcwYIKwYB
+BQUHAQEEZzBlMDcGCCsGAQUFBzAChitodHRwOi8vdHJ1c3QucXVvdmFkaXNnbG9i
+YWwuY29tL3F2c3NsZzMuY3J0MCoGCCsGAQUFBzABhh5odHRwOi8vb2NzcC5xdW92
+YWRpc2dsb2JhbC5jb20wgZ8GA1UdEQSBlzCBlIIdZGV2LmVuZXJneS5pbnNpZGUu
+dGVsc3RyYS5jb22CJXJlcG9ydHMuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5j
+b22CJ2dyZWVuc3luYy5kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbYIjbmdv
+c3MuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5jb20wUQYDVR0gBEowSDBGBgwr
+BgEEAb5YAAJkAQEwNjA0BggrBgEFBQcCARYoaHR0cDovL3d3dy5xdW92YWRpc2ds
+b2JhbC5jb20vcmVwb3NpdG9yeTAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH
+AwEwOgYDVR0fBDMwMTAvoC2gK4YpaHR0cDovL2NybC5xdW92YWRpc2dsb2JhbC5j
+b20vcXZzc2xnMy5jcmwwHQYDVR0OBBYEFEoJQRpPC/V5ZK3mMkszZE2v6vh+MA4G
+A1UdDwEB/wQEAwIFoDCCAXwGCisGAQQB1nkCBAIEggFsBIIBaAFmAHUAVhQGmi/X
+wuzT9eG9RLI+x0Z2ubyZEVzA75SYVdaJ0N0AAAFstk9Y+gAABAMARjBEAiBFMZa6
+O9iXVjy2kqQa54vgNFdU7shgFJJhm//fSAQZUAIgBIL/yPdh+XiuQS2xPhCzNYkh
+bxf7BbN4qUISESgiZpsAdgBvU3asMfAxGdiZAKRRFf93FRwR2QLBACkGjbIImjfZ
+EwAAAWy2T1nKAAAEAwBHMEUCIG0tp63jLsDsfCTDlcvV5ItjRkbUJBnkxlPdP2PH
+88sTAiEApgaPofVdn2hdI12iDDex72ta+9wpwQ1MxoaJn2nt+qEAdQDuS723dc5g
+uuFCaR+r4Z5mow9+X7By2IMAxHuJeqj9ywAAAWy2T1iJAAAEAwBGMEQCIE/mzEFp
+CJUc71jvwJa4Px86R3ZYK4mHmUlQAUZqd0ZkAiBdEmT8xxTuleSUlYHEkKCK/FZX
+L+vsYJpPrA9TsO5IsTANBgkqhkiG9w0BAQsFAAOCAgEApE9WLz3S8tqA9Dk3r9LF
+rJy8km9cBt1O9SQZwFsduGKGdF3Fd+/Y0V7UrFDzrX+NIzqcmgBHKxaIXorMBF70
+ajMaaROP2ymkpEXnruEwoR47fbW+JRAWDRm2xnouQveQX9ZcgCLbBvAWBqpndQj2
+DGmLJhNz5GlFBjh3PQZlU1w8hU7TrDxa7M1GMtVnk8X+o3l/MX9iPeEs+PiC4dHD
+hpj84RY1VQJz8+10rql47SB5YgbwcqaizTG4ax/OAv1JHNWtfAodIMX8Y8X00zoz
+A20LQv880jCCNANVNbrXJ3h4X3xwW/C1X9vYk0shymZJbT5u17JbPD1cy39bA7kT
+F4L7scdQRxvcqazYN4/IdgvgMji9OltiYufP88Ti8KB2tcl2accpiC5St/zllGD1
+hqEeYLMzjyvUKR/1uvURQQtc0DPvBRmvkB+aI4g+sLkTTFWj5bsA1vKU8SDCyMuB
+RQV11DId5+RNNCmWnskORUZJQssvY49pnfCxCES2nt3l/XzTzVtLYmd6G9uAqVac
+e2ibnmDrFVlmlyRsCiMfZl5/OTJzt7Cj3az59m5Syfw/lnS9YP82t/r/ufuKkO5Q
+q5a9aI8DuNNmAjR4lpIJNqIpX/y+dG2aGmx4XTc31MR9szWtiTgOHe0MkMupOAL0
+qkHrBgwo1zjuTMf3QOg6Z5Q=
+-----END CERTIFICATE-----
+
+subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+-----BEGIN CERTIFICATE-----
+MIIGFzCCA/+gAwIBAgIUftbnnMmtgcTIGT75XUQodw40ExcwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjExMDYxNDUwMThaFw0y
+MjExMDYxNDUwMThaME0xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMSMwIQYDVQQDExpRdW9WYWRpcyBHbG9iYWwgU1NMIElDQSBHMzCCAiIw
+DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANf8Od17be6c6lTGJDhEXpmkTs4y
+Q39Rr5VJyBeWCg06nSS71s6xF3sZvKcV0MbXlXCYM2ZX7cNTbJ81gs7uDsKFp+vK
+EymiKyEiI2SImOtECNnSg+RVR4np/xz/UlC0yFUisH75cZsJ8T1pkGMfiEouR0EM
+7O0uFgoboRfUP582TTWy0F7ynSA6YfGKnKj0OFwZJmGHVkLs1VevWjhj3R1fsPan
+H05P5moePFnpQdj1FofoSxUHZ0c7VB+sUimboHm/uHNY1LOsk77qiSuVC5/yrdg3
+2EEfP/mxJYT4r/5UiD7VahySzeZHzZ2OibQm2AfgfMN3l57lCM3/WPQBhMAPS1jz
+kE+7MjajM2f0aZctimW4Hasrj8AQnfAdHqZehbhtXaAlffNEzCdpNK584oCTVR7N
+UR9iZFx83ruTqpo+GcLP/iSYqhM4g7fy45sNhU+IS+ca03zbxTl3TTlkofXunI5B
+xxE30eGSQpDZ5+iUJcEOAuVKrlYocFbB3KF45hwcbzPWQ1DcO2jFAapOtQzeS+MZ
+yZzT2YseJ8hQHKu8YrXZWwKaNfyl8kFkHUBDICowNEoZvBwRCQp8sgqL6YRZy0uD
+JGxmnC2e0BVKSjcIvmq/CRWH7yiTk9eWm73xrsg9iIyD/kwJEnLyIk8tR5V8p/hc
+1H2AjDrZH12PsZ45AgMBAAGjgfMwgfAwEgYDVR0TAQH/BAgwBgEB/wIBATARBgNV
+HSAECjAIMAYGBFUdIAAwOgYIKwYBBQUHAQEELjAsMCoGCCsGAQUFBzABhh5odHRw
+Oi8vb2NzcC5xdW92YWRpc2dsb2JhbC5jb20wDgYDVR0PAQH/BAQDAgEGMB8GA1Ud
+IwQYMBaAFO3nb3Zav2DsSVvGpXe7chZxm8Q9MDsGA1UdHwQ0MDIwMKAuoCyGKmh0
+dHA6Ly9jcmwucXVvdmFkaXNnbG9iYWwuY29tL3F2cmNhMmczLmNybDAdBgNVHQ4E
+FgQUsxKJtalLNbwVAPCA6dh4h/ETfHYwDQYJKoZIhvcNAQELBQADggIBAFGm1Fqp
+RMiKr7a6h707M+km36PVXZnX1NZocCn36MrfRvphotbOCDm+GmRkar9ZMGhc8c/A
+Vn7JSCjwF9jNOFIOUyNLq0w4luk+Pt2YFDbgF8IDdx53xIo8Gv05e9xpTvQYaIto
+qeHbQjGXfSGc91olfX6JUwZlxxbhdJH+rxTFAg0jcbqToJoScWTfXSr1QRcNbSTs
+Y4CPG6oULsnhVvrzgldGSK+DxFi2OKcDsOKkV7W4IGg8Do2L/M588AfBnV8ERzpl
+qgMBBQxC2+0N6RdFHbmZt0HQE/NIg1s0xcjGx1XW3YTOfje31rmAXKHOehm4Bu48
+gr8gePq5cdQ2W9tA0Dnytb9wzH2SyPPIXRI7yNxaX9H8wYeDeeiKSSmQtfh1v5cV
+7RXvm8F6hLJkkco/HOW3dAUwZFcKsUH+1eUJKLN18eDGwB8yGawjHvOKqcfg5Lf/
+TvC7hgcx7pDYaCCaqHaekgUwXbB2Enzqr1fdwoU1c01W5YuQAtAx5wk1bf34Yq/J
+ph7wNXGvo88N0/EfP9AdVGmJzy7VuRXeVAOyjKAIeADMlwpjBRhcbs9m3dkqvoMb
+SXKJxv/hFmNgEOvOlaFsXX1dbKg1v+C1AzKAFdiuAIa62JzASiEhigqNSdqdTsOh
+8W8hdONuKKpe9zKedhBFAvuxhDgKmnySglYc
+-----END CERTIFICATE-----
+
+subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00
+MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf
+qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW
+n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym
+c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+
+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1
+o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j
+IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq
+IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz
+8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh
+vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l
+7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG
+cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD
+ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66
+AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC
+roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga
+W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n
+lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE
++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV
+csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd
+dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg
+KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM
+HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4
+WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M
+-----END CERTIFICATE-----
+
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.1.cert b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.1.cert
new file mode 100644
index 00000000..51f64f08
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.1.cert
@@ -0,0 +1,69 @@
+subject=/C=AU/ST=Victoria/L=Melbourne/O=Telstra Corporation Limited/OU=Telstra Energy/CN=dev.energy.inside.telstra.com
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3
+-----BEGIN CERTIFICATE-----
+MIIIHTCCBgWgAwIBAgIUCqrrzSfjzaoyB3DOxst2kMxFp/MwDQYJKoZIhvcNAQELBQAwTTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxIzAh
+BgNVBAMTGlF1b1ZhZGlzIEdsb2JhbCBTU0wgSUNBIEczMB4XDTE5MDgyMTIyMjIyOFoXDTIxMDgyMTIyMzIwMFowgZsxCzAJBgNVBAYTAkFVMREwDwYDVQQIDAhWaWN0
+b3JpYTESMBAGA1UEBwwJTWVsYm91cm5lMSQwIgYDVQQKDBtUZWxzdHJhIENvcnBvcmF0aW9uIExpbWl0ZWQxFzAVBgNVBAsMDlRlbHN0cmEgRW5lcmd5MSYwJAYDVQQD
+DB1kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMPAPH2y206qios2NMzlCNJv1mrwC1/8tH2HOqJGiYZB
+O7QOBRSvJsV++IozCB8ap99e8B64OOAQPOyykrdXd2axhftmMb1SFMF56eukHSuzKhKWRUgHs0UFRU51lDcBcOvphwJ+5SOgqrqKFFFBgJ0ZpcP54JpFwKIdh3ac10x2
+mBaW5ccqdv5X9oEMu1D/yivBmy34tsbLYyfttCjP76iVT7UVYHjHWynnIhsEyMsUgdM90NzrTlrvTSi/EcCD1W3+8b0f+G1TI5rhHbKwR0n/mv5QLFm7EABoYPhxS8bX
+B+9tE67yb0RyWbgvUiHySRynQLNMRpRx8Y9bA8uC8n8CAwEAAaOCA6QwggOgMAkGA1UdEwQCMAAwHwYDVR0jBBgwFoAUsxKJtalLNbwVAPCA6dh4h/ETfHYwcwYIKwYB
+BQUHAQEEZzBlMDcGCCsGAQUFBzAChitodHRwOi8vdHJ1c3QucXVvdmFkaXNnbG9iYWwuY29tL3F2c3NsZzMuY3J0MCoGCCsGAQUFBzABhh5odHRwOi8vb2NzcC5xdW92
+YWRpc2dsb2JhbC5jb20wgZ8GA1UdEQSBlzCBlIIdZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5jb22CJXJlcG9ydHMuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5j
+b22CJ2dyZWVuc3luYy5kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbYIjbmdvc3MuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5jb20wUQYDVR0gBEowSDBGBgwr
+BgEEAb5YAAJkAQEwNjA0BggrBgEFBQcCARYoaHR0cDovL3d3dy5xdW92YWRpc2dsb2JhbC5jb20vcmVwb3NpdG9yeTAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH
+AwEwOgYDVR0fBDMwMTAvoC2gK4YpaHR0cDovL2NybC5xdW92YWRpc2dsb2JhbC5jb20vcXZzc2xnMy5jcmwwHQYDVR0OBBYEFEoJQRpPC/V5ZK3mMkszZE2v6vh+MA4G
+A1UdDwEB/wQEAwIFoDCCAXwGCisGAQQB1nkCBAIEggFsBIIBaAFmAHUAVhQGmi/XwuzT9eG9RLI+x0Z2ubyZEVzA75SYVdaJ0N0AAAFstk9Y+gAABAMARjBEAiBFMZa6
+O9iXVjy2kqQa54vgNFdU7shgFJJhm//fSAQZUAIgBIL/yPdh+XiuQS2xPhCzNYkhbxf7BbN4qUISESgiZpsAdgBvU3asMfAxGdiZAKRRFf93FRwR2QLBACkGjbIImjfZ
+EwAAAWy2T1nKAAAEAwBHMEUCIG0tp63jLsDsfCTDlcvV5ItjRkbUJBnkxlPdP2PH88sTAiEApgaPofVdn2hdI12iDDex72ta+9wpwQ1MxoaJn2nt+qEAdQDuS723dc5g
+uuFCaR+r4Z5mow9+X7By2IMAxHuJeqj9ywAAAWy2T1iJAAAEAwBGMEQCIE/mzEFpCJUc71jvwJa4Px86R3ZYK4mHmUlQAUZqd0ZkAiBdEmT8xxTuleSUlYHEkKCK/FZX
+L+vsYJpPrA9TsO5IsTANBgkqhkiG9w0BAQsFAAOCAgEApE9WLz3S8tqA9Dk3r9LFrJy8km9cBt1O9SQZwFsduGKGdF3Fd+/Y0V7UrFDzrX+NIzqcmgBHKxaIXorMBF70
+ajMaaROP2ymkpEXnruEwoR47fbW+JRAWDRm2xnouQveQX9ZcgCLbBvAWBqpndQj2DGmLJhNz5GlFBjh3PQZlU1w8hU7TrDxa7M1GMtVnk8X+o3l/MX9iPeEs+PiC4dHD
+hpj84RY1VQJz8+10rql47SB5YgbwcqaizTG4ax/OAv1JHNWtfAodIMX8Y8X00zozA20LQv880jCCNANVNbrXJ3h4X3xwW/C1X9vYk0shymZJbT5u17JbPD1cy39bA7kT
+F4L7scdQRxvcqazYN4/IdgvgMji9OltiYufP88Ti8KB2tcl2accpiC5St/zllGD1hqEeYLMzjyvUKR/1uvURQQtc0DPvBRmvkB+aI4g+sLkTTFWj5bsA1vKU8SDCyMuB
+RQV11DId5+RNNCmWnskORUZJQssvY49pnfCxCES2nt3l/XzTzVtLYmd6G9uAqVace2ibnmDrFVlmlyRsCiMfZl5/OTJzt7Cj3az59m5Syfw/lnS9YP82t/r/ufuKkO5Q
+q5a9aI8DuNNmAjR4lpIJNqIpX/y+dG2aGmx4XTc31MR9szWtiTgOHe0MkMupOAL0qkHrBgwo1zjuTMf3QOg6Z5Q=
+-----END CERTIFICATE-----
+
+subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+-----BEGIN CERTIFICATE-----
+MIIGFzCCA/+gAwIBAgIUftbnnMmtgcTIGT75XUQodw40ExcwDQYJKoZIhvcNAQELBQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjExMDYxNDUwMThaFw0yMjExMDYxNDUwMThaME0xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMSMwIQYDVQQDExpRdW9WYWRpcyBHbG9iYWwgU1NMIElDQSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANf8Od17be6c6lTGJDhEXpmkTs4y
+Q39Rr5VJyBeWCg06nSS71s6xF3sZvKcV0MbXlXCYM2ZX7cNTbJ81gs7uDsKFp+vKEymiKyEiI2SImOtECNnSg+RVR4np/xz/UlC0yFUisH75cZsJ8T1pkGMfiEouR0EM
+7O0uFgoboRfUP582TTWy0F7ynSA6YfGKnKj0OFwZJmGHVkLs1VevWjhj3R1fsPanH05P5moePFnpQdj1FofoSxUHZ0c7VB+sUimboHm/uHNY1LOsk77qiSuVC5/yrdg3
+2EEfP/mxJYT4r/5UiD7VahySzeZHzZ2OibQm2AfgfMN3l57lCM3/WPQBhMAPS1jzkE+7MjajM2f0aZctimW4Hasrj8AQnfAdHqZehbhtXaAlffNEzCdpNK584oCTVR7N
+UR9iZFx83ruTqpo+GcLP/iSYqhM4g7fy45sNhU+IS+ca03zbxTl3TTlkofXunI5BxxE30eGSQpDZ5+iUJcEOAuVKrlYocFbB3KF45hwcbzPWQ1DcO2jFAapOtQzeS+MZ
+yZzT2YseJ8hQHKu8YrXZWwKaNfyl8kFkHUBDICowNEoZvBwRCQp8sgqL6YRZy0uDJGxmnC2e0BVKSjcIvmq/CRWH7yiTk9eWm73xrsg9iIyD/kwJEnLyIk8tR5V8p/hc
+1H2AjDrZH12PsZ45AgMBAAGjgfMwgfAwEgYDVR0TAQH/BAgwBgEB/wIBATARBgNVHSAECjAIMAYGBFUdIAAwOgYIKwYBBQUHAQEELjAsMCoGCCsGAQUFBzABhh5odHRw
+Oi8vb2NzcC5xdW92YWRpc2dsb2JhbC5jb20wDgYDVR0PAQH/BAQDAgEGMB8GA1UdIwQYMBaAFO3nb3Zav2DsSVvGpXe7chZxm8Q9MDsGA1UdHwQ0MDIwMKAuoCyGKmh0
+dHA6Ly9jcmwucXVvdmFkaXNnbG9iYWwuY29tL3F2cmNhMmczLmNybDAdBgNVHQ4EFgQUsxKJtalLNbwVAPCA6dh4h/ETfHYwDQYJKoZIhvcNAQELBQADggIBAFGm1Fqp
+RMiKr7a6h707M+km36PVXZnX1NZocCn36MrfRvphotbOCDm+GmRkar9ZMGhc8c/AVn7JSCjwF9jNOFIOUyNLq0w4luk+Pt2YFDbgF8IDdx53xIo8Gv05e9xpTvQYaIto
+qeHbQjGXfSGc91olfX6JUwZlxxbhdJH+rxTFAg0jcbqToJoScWTfXSr1QRcNbSTsY4CPG6oULsnhVvrzgldGSK+DxFi2OKcDsOKkV7W4IGg8Do2L/M588AfBnV8ERzpl
+qgMBBQxC2+0N6RdFHbmZt0HQE/NIg1s0xcjGx1XW3YTOfje31rmAXKHOehm4Bu48gr8gePq5cdQ2W9tA0Dnytb9wzH2SyPPIXRI7yNxaX9H8wYeDeeiKSSmQtfh1v5cV
+7RXvm8F6hLJkkco/HOW3dAUwZFcKsUH+1eUJKLN18eDGwB8yGawjHvOKqcfg5Lf/TvC7hgcx7pDYaCCaqHaekgUwXbB2Enzqr1fdwoU1c01W5YuQAtAx5wk1bf34Yq/J
+ph7wNXGvo88N0/EfP9AdVGmJzy7VuRXeVAOyjKAIeADMlwpjBRhcbs9m3dkqvoMbSXKJxv/hFmNgEOvOlaFsXX1dbKg1v+C1AzKAFdiuAIa62JzASiEhigqNSdqdTsOh
+8W8hdONuKKpe9zKedhBFAvuxhDgKmnySglYc
+-----END CERTIFICATE-----
+
+subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQELBQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf
+qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMWn4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym
+c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1
+o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0jIaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq
+IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh
+vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG
+cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD
+ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC
+roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0GaW/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n
+lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE+V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV
+csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtddbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg
+KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeMHVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4
+WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M
+-----END CERTIFICATE-----
+
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.2.cert b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.2.cert
new file mode 100644
index 00000000..ce299241
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.2.cert
@@ -0,0 +1,113 @@
+-----BEGIN CERTIFICATE-----
+MIIIHTCCBgWgAwIBAgIUCqrrzSfjzaoyB3DOxst2kMxFp/MwDQYJKoZIhvcNAQEL
+BQAwTTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxIzAh
+BgNVBAMTGlF1b1ZhZGlzIEdsb2JhbCBTU0wgSUNBIEczMB4XDTE5MDgyMTIyMjIy
+OFoXDTIxMDgyMTIyMzIwMFowgZsxCzAJBgNVBAYTAkFVMREwDwYDVQQIDAhWaWN0
+b3JpYTESMBAGA1UEBwwJTWVsYm91cm5lMSQwIgYDVQQKDBtUZWxzdHJhIENvcnBv
+cmF0aW9uIExpbWl0ZWQxFzAVBgNVBAsMDlRlbHN0cmEgRW5lcmd5MSYwJAYDVQQD
+DB1kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAMPAPH2y206qios2NMzlCNJv1mrwC1/8tH2HOqJGiYZB
+O7QOBRSvJsV++IozCB8ap99e8B64OOAQPOyykrdXd2axhftmMb1SFMF56eukHSuz
+KhKWRUgHs0UFRU51lDcBcOvphwJ+5SOgqrqKFFFBgJ0ZpcP54JpFwKIdh3ac10x2
+mBaW5ccqdv5X9oEMu1D/yivBmy34tsbLYyfttCjP76iVT7UVYHjHWynnIhsEyMsU
+gdM90NzrTlrvTSi/EcCD1W3+8b0f+G1TI5rhHbKwR0n/mv5QLFm7EABoYPhxS8bX
+B+9tE67yb0RyWbgvUiHySRynQLNMRpRx8Y9bA8uC8n8CAwEAAaOCA6QwggOgMAkG
+A1UdEwQCMAAwHwYDVR0jBBgwFoAUsxKJtalLNbwVAPCA6dh4h/ETfHYwcwYIKwYB
+BQUHAQEEZzBlMDcGCCsGAQUFBzAChitodHRwOi8vdHJ1c3QucXVvdmFkaXNnbG9i
+YWwuY29tL3F2c3NsZzMuY3J0MCoGCCsGAQUFBzABhh5odHRwOi8vb2NzcC5xdW92
+YWRpc2dsb2JhbC5jb20wgZ8GA1UdEQSBlzCBlIIdZGV2LmVuZXJneS5pbnNpZGUu
+dGVsc3RyYS5jb22CJXJlcG9ydHMuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5j
+b22CJ2dyZWVuc3luYy5kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbYIjbmdv
+c3MuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5jb20wUQYDVR0gBEowSDBGBgwr
+BgEEAb5YAAJkAQEwNjA0BggrBgEFBQcCARYoaHR0cDovL3d3dy5xdW92YWRpc2ds
+b2JhbC5jb20vcmVwb3NpdG9yeTAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH
+AwEwOgYDVR0fBDMwMTAvoC2gK4YpaHR0cDovL2NybC5xdW92YWRpc2dsb2JhbC5j
+b20vcXZzc2xnMy5jcmwwHQYDVR0OBBYEFEoJQRpPC/V5ZK3mMkszZE2v6vh+MA4G
+A1UdDwEB/wQEAwIFoDCCAXwGCisGAQQB1nkCBAIEggFsBIIBaAFmAHUAVhQGmi/X
+wuzT9eG9RLI+x0Z2ubyZEVzA75SYVdaJ0N0AAAFstk9Y+gAABAMARjBEAiBFMZa6
+O9iXVjy2kqQa54vgNFdU7shgFJJhm//fSAQZUAIgBIL/yPdh+XiuQS2xPhCzNYkh
+bxf7BbN4qUISESgiZpsAdgBvU3asMfAxGdiZAKRRFf93FRwR2QLBACkGjbIImjfZ
+EwAAAWy2T1nKAAAEAwBHMEUCIG0tp63jLsDsfCTDlcvV5ItjRkbUJBnkxlPdP2PH
+88sTAiEApgaPofVdn2hdI12iDDex72ta+9wpwQ1MxoaJn2nt+qEAdQDuS723dc5g
+uuFCaR+r4Z5mow9+X7By2IMAxHuJeqj9ywAAAWy2T1iJAAAEAwBGMEQCIE/mzEFp
+CJUc71jvwJa4Px86R3ZYK4mHmUlQAUZqd0ZkAiBdEmT8xxTuleSUlYHEkKCK/FZX
+L+vsYJpPrA9TsO5IsTANBgkqhkiG9w0BAQsFAAOCAgEApE9WLz3S8tqA9Dk3r9LF
+rJy8km9cBt1O9SQZwFsduGKGdF3Fd+/Y0V7UrFDzrX+NIzqcmgBHKxaIXorMBF70
+ajMaaROP2ymkpEXnruEwoR47fbW+JRAWDRm2xnouQveQX9ZcgCLbBvAWBqpndQj2
+DGmLJhNz5GlFBjh3PQZlU1w8hU7TrDxa7M1GMtVnk8X+o3l/MX9iPeEs+PiC4dHD
+hpj84RY1VQJz8+10rql47SB5YgbwcqaizTG4ax/OAv1JHNWtfAodIMX8Y8X00zoz
+A20LQv880jCCNANVNbrXJ3h4X3xwW/C1X9vYk0shymZJbT5u17JbPD1cy39bA7kT
+F4L7scdQRxvcqazYN4/IdgvgMji9OltiYufP88Ti8KB2tcl2accpiC5St/zllGD1
+hqEeYLMzjyvUKR/1uvURQQtc0DPvBRmvkB+aI4g+sLkTTFWj5bsA1vKU8SDCyMuB
+RQV11DId5+RNNCmWnskORUZJQssvY49pnfCxCES2nt3l/XzTzVtLYmd6G9uAqVac
+e2ibnmDrFVlmlyRsCiMfZl5/OTJzt7Cj3az59m5Syfw/lnS9YP82t/r/ufuKkO5Q
+q5a9aI8DuNNmAjR4lpIJNqIpX/y+dG2aGmx4XTc31MR9szWtiTgOHe0MkMupOAL0
+qkHrBgwo1zjuTMf3QOg6Z5Q=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIGFzCCA/+gAwIBAgIUftbnnMmtgcTIGT75XUQodw40ExcwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjExMDYxNDUwMThaFw0y
+MjExMDYxNDUwMThaME0xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMSMwIQYDVQQDExpRdW9WYWRpcyBHbG9iYWwgU1NMIElDQSBHMzCCAiIw
+DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANf8Od17be6c6lTGJDhEXpmkTs4y
+Q39Rr5VJyBeWCg06nSS71s6xF3sZvKcV0MbXlXCYM2ZX7cNTbJ81gs7uDsKFp+vK
+EymiKyEiI2SImOtECNnSg+RVR4np/xz/UlC0yFUisH75cZsJ8T1pkGMfiEouR0EM
+7O0uFgoboRfUP582TTWy0F7ynSA6YfGKnKj0OFwZJmGHVkLs1VevWjhj3R1fsPan
+H05P5moePFnpQdj1FofoSxUHZ0c7VB+sUimboHm/uHNY1LOsk77qiSuVC5/yrdg3
+2EEfP/mxJYT4r/5UiD7VahySzeZHzZ2OibQm2AfgfMN3l57lCM3/WPQBhMAPS1jz
+kE+7MjajM2f0aZctimW4Hasrj8AQnfAdHqZehbhtXaAlffNEzCdpNK584oCTVR7N
+UR9iZFx83ruTqpo+GcLP/iSYqhM4g7fy45sNhU+IS+ca03zbxTl3TTlkofXunI5B
+xxE30eGSQpDZ5+iUJcEOAuVKrlYocFbB3KF45hwcbzPWQ1DcO2jFAapOtQzeS+MZ
+yZzT2YseJ8hQHKu8YrXZWwKaNfyl8kFkHUBDICowNEoZvBwRCQp8sgqL6YRZy0uD
+JGxmnC2e0BVKSjcIvmq/CRWH7yiTk9eWm73xrsg9iIyD/kwJEnLyIk8tR5V8p/hc
+1H2AjDrZH12PsZ45AgMBAAGjgfMwgfAwEgYDVR0TAQH/BAgwBgEB/wIBATARBgNV
+HSAECjAIMAYGBFUdIAAwOgYIKwYBBQUHAQEELjAsMCoGCCsGAQUFBzABhh5odHRw
+Oi8vb2NzcC5xdW92YWRpc2dsb2JhbC5jb20wDgYDVR0PAQH/BAQDAgEGMB8GA1Ud
+IwQYMBaAFO3nb3Zav2DsSVvGpXe7chZxm8Q9MDsGA1UdHwQ0MDIwMKAuoCyGKmh0
+dHA6Ly9jcmwucXVvdmFkaXNnbG9iYWwuY29tL3F2cmNhMmczLmNybDAdBgNVHQ4E
+FgQUsxKJtalLNbwVAPCA6dh4h/ETfHYwDQYJKoZIhvcNAQELBQADggIBAFGm1Fqp
+RMiKr7a6h707M+km36PVXZnX1NZocCn36MrfRvphotbOCDm+GmRkar9ZMGhc8c/A
+Vn7JSCjwF9jNOFIOUyNLq0w4luk+Pt2YFDbgF8IDdx53xIo8Gv05e9xpTvQYaIto
+qeHbQjGXfSGc91olfX6JUwZlxxbhdJH+rxTFAg0jcbqToJoScWTfXSr1QRcNbSTs
+Y4CPG6oULsnhVvrzgldGSK+DxFi2OKcDsOKkV7W4IGg8Do2L/M588AfBnV8ERzpl
+qgMBBQxC2+0N6RdFHbmZt0HQE/NIg1s0xcjGx1XW3YTOfje31rmAXKHOehm4Bu48
+gr8gePq5cdQ2W9tA0Dnytb9wzH2SyPPIXRI7yNxaX9H8wYeDeeiKSSmQtfh1v5cV
+7RXvm8F6hLJkkco/HOW3dAUwZFcKsUH+1eUJKLN18eDGwB8yGawjHvOKqcfg5Lf/
+TvC7hgcx7pDYaCCaqHaekgUwXbB2Enzqr1fdwoU1c01W5YuQAtAx5wk1bf34Yq/J
+ph7wNXGvo88N0/EfP9AdVGmJzy7VuRXeVAOyjKAIeADMlwpjBRhcbs9m3dkqvoMb
+SXKJxv/hFmNgEOvOlaFsXX1dbKg1v+C1AzKAFdiuAIa62JzASiEhigqNSdqdTsOh
+8W8hdONuKKpe9zKedhBFAvuxhDgKmnySglYc
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00
+MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf
+qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW
+n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym
+c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+
+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1
+o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j
+IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq
+IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz
+8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh
+vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l
+7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG
+cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD
+ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66
+AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC
+roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga
+W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n
+lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE
++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV
+csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd
+dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg
+KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM
+HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4
+WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M
+-----END CERTIFICATE-----
+
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.3.cert b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.3.cert
new file mode 100644
index 00000000..0c947b17
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.3.cert
@@ -0,0 +1,124 @@
+subject=/C=AU/ST=Victoria/L=Melbourne/O=Telstra Corporation Limited/OU=Telstra Energy/CN=dev.energy.inside.telstra.com
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3
+-----BEGIN CERTIFICATE-----
+MIIIHTCCBgWgAwIBAgIUCqrrzSfjzaoyB3DOxst2kMxFp/MwDQYJKoZIhvcNAQEL
+BQAwTTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxIzAh
+BgNVBAMTGlF1b1ZhZGlzIEdsb2JhbCBTU0wgSUNBIEczMB4XDTE5MDgyMTIyMjIy
+OFoXDTIxMDgyMTIyMzIwMFowgZsxCzAJBgNVBAYTAkFVMREwDwYDVQQIDAhWaWN0
+b3JpYTESMBAGA1UEBwwJTWVsYm91cm5lMSQwIgYDVQQKDBtUZWxzdHJhIENvcnBv
+cmF0aW9uIExpbWl0ZWQxFzAVBgNVBAsMDlRlbHN0cmEgRW5lcmd5MSYwJAYDVQQD
+DB1kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAMPAPH2y206qios2NMzlCNJv1mrwC1/8tH2HOqJGiYZB
+O7QOBRSvJsV++IozCB8ap99e8B64OOAQPOyykrdXd2axhftmMb1SFMF56eukHSuz
+KhKWRUgHs0UFRU51lDcBcOvphwJ+5SOgqrqKFFFBgJ0ZpcP54JpFwKIdh3ac10x2
+mBaW5ccqdv5X9oEMu1D/yivBmy34tsbLYyfttCjP76iVT7UVYHjHWynnIhsEyMsU
+gdM90NzrTlrvTSi/EcCD1W3+8b0f+G1TI5rhHbKwR0n/mv5QLFm7EABoYPhxS8bX
+B+9tE67yb0RyWbgvUiHySRynQLNMRpRx8Y9bA8uC8n8CAwEAAaOCA6QwggOgMAkG
+A1UdEwQCMAAwHwYDVR0jBBgwFoAUsxKJtalLNbwVAPCA6dh4h/ETfHYwcwYIKwYB
+BQUHAQEEZzBlMDcGCCsGAQUFBzAChitodHRwOi8vdHJ1c3QucXVvdmFkaXNnbG9i
+YWwuY29tL3F2c3NsZzMuY3J0MCoGCCsGAQUFBzABhh5odHRwOi8vb2NzcC5xdW92
+YWRpc2dsb2JhbC5jb20wgZ8GA1UdEQSBlzCBlIIdZGV2LmVuZXJneS5pbnNpZGUu
+dGVsc3RyYS5jb22CJXJlcG9ydHMuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5j
+b22CJ2dyZWVuc3luYy5kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbYIjbmdv
+c3MuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5jb20wUQYDVR0gBEowSDBGBgwr
+BgEEAb5YAAJkAQEwNjA0BggrBgEFBQcCARYoaHR0cDovL3d3dy5xdW92YWRpc2ds
+b2JhbC5jb20vcmVwb3NpdG9yeTAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH
+AwEwOgYDVR0fBDMwMTAvoC2gK4YpaHR0cDovL2NybC5xdW92YWRpc2dsb2JhbC5j
+b20vcXZzc2xnMy5jcmwwHQYDVR0OBBYEFEoJQRpPC/V5ZK3mMkszZE2v6vh+MA4G
+A1UdDwEB/wQEAwIFoDCCAXwGCisGAQQB1nkCBAIEggFsBIIBaAFmAHUAVhQGmi/X
+wuzT9eG9RLI+x0Z2ubyZEVzA75SYVdaJ0N0AAAFstk9Y+gAABAMARjBEAiBFMZa6
+O9iXVjy2kqQa54vgNFdU7shgFJJhm//fSAQZUAIgBIL/yPdh+XiuQS2xPhCzNYkh
+bxf7BbN4qUISESgiZpsAdgBvU3asMfAxGdiZAKRRFf93FRwR2QLBACkGjbIImjfZ
+EwAAAWy2T1nKAAAEAwBHMEUCIG0tp63jLsDsfCTDlcvV5ItjRkbUJBnkxlPdP2PH
+88sTAiEApgaPofVdn2hdI12iDDex72ta+9wpwQ1MxoaJn2nt+qEAdQDuS723dc5g
+uuFCaR+r4Z5mow9+X7By2IMAxHuJeqj9ywAAAWy2T1iJAAAEAwBGMEQCIE/mzEFp
+CJUc71jvwJa4Px86R3ZYK4mHmUlQAUZqd0ZkAiBdEmT8xxTuleSUlYHEkKCK/FZX
+L+vsYJpPrA9TsO5IsTANBgkqhkiG9w0BAQsFAAOCAgEApE9WLz3S8tqA9Dk3r9LF
+rJy8km9cBt1O9SQZwFsduGKGdF3Fd+/Y0V7UrFDzrX+NIzqcmgBHKxaIXorMBF70
+ajMaaROP2ymkpEXnruEwoR47fbW+JRAWDRm2xnouQveQX9ZcgCLbBvAWBqpndQj2
+DGmLJhNz5GlFBjh3PQZlU1w8hU7TrDxa7M1GMtVnk8X+o3l/MX9iPeEs+PiC4dHD
+hpj84RY1VQJz8+10rql47SB5YgbwcqaizTG4ax/OAv1JHNWtfAodIMX8Y8X00zoz
+A20LQv880jCCNANVNbrXJ3h4X3xwW/C1X9vYk0shymZJbT5u17JbPD1cy39bA7kT
+F4L7scdQRxvcqazYN4/IdgvgMji9OltiYufP88Ti8KB2tcl2accpiC5St/zllGD1
+hqEeYLMzjyvUKR/1uvURQQtc0DPvBRmvkB+aI4g+sLkTTFWj5bsA1vKU8SDCyMuB
+RQV11DId5+RNNCmWnskORUZJQssvY49pnfCxCES2nt3l/XzTzVtLYmd6G9uAqVac
+e2ibnmDrFVlmlyRsCiMfZl5/OTJzt7Cj3az59m5Syfw/lnS9YP82t/r/ufuKkO5Q
+q5a9aI8DuNNmAjR4lpIJNqIpX/y+dG2aGmx4XTc31MR9szWtiTgOHe0MkMupOAL0
+qkHrBgwo1zjuTMf3QOg6Z5Q=
+-----END CERTIFICATE-----
+
+
+subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00
+MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf
+qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW
+n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym
+c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+
+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1
+o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j
+IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq
+IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz
+8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh
+vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l
+7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG
+cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD
+ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66
+AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC
+roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga
+W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n
+lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE
++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV
+csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd
+dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg
+KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM
+HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4
+WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M
+-----END CERTIFICATE-----
+
+
+
+
+subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+-----BEGIN CERTIFICATE-----
+MIIGFzCCA/+gAwIBAgIUftbnnMmtgcTIGT75XUQodw40ExcwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjExMDYxNDUwMThaFw0y
+MjExMDYxNDUwMThaME0xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMSMwIQYDVQQDExpRdW9WYWRpcyBHbG9iYWwgU1NMIElDQSBHMzCCAiIw
+DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANf8Od17be6c6lTGJDhEXpmkTs4y
+Q39Rr5VJyBeWCg06nSS71s6xF3sZvKcV0MbXlXCYM2ZX7cNTbJ81gs7uDsKFp+vK
+EymiKyEiI2SImOtECNnSg+RVR4np/xz/UlC0yFUisH75cZsJ8T1pkGMfiEouR0EM
+7O0uFgoboRfUP582TTWy0F7ynSA6YfGKnKj0OFwZJmGHVkLs1VevWjhj3R1fsPan
+H05P5moePFnpQdj1FofoSxUHZ0c7VB+sUimboHm/uHNY1LOsk77qiSuVC5/yrdg3
+2EEfP/mxJYT4r/5UiD7VahySzeZHzZ2OibQm2AfgfMN3l57lCM3/WPQBhMAPS1jz
+kE+7MjajM2f0aZctimW4Hasrj8AQnfAdHqZehbhtXaAlffNEzCdpNK584oCTVR7N
+UR9iZFx83ruTqpo+GcLP/iSYqhM4g7fy45sNhU+IS+ca03zbxTl3TTlkofXunI5B
+xxE30eGSQpDZ5+iUJcEOAuVKrlYocFbB3KF45hwcbzPWQ1DcO2jFAapOtQzeS+MZ
+yZzT2YseJ8hQHKu8YrXZWwKaNfyl8kFkHUBDICowNEoZvBwRCQp8sgqL6YRZy0uD
+JGxmnC2e0BVKSjcIvmq/CRWH7yiTk9eWm73xrsg9iIyD/kwJEnLyIk8tR5V8p/hc
+1H2AjDrZH12PsZ45AgMBAAGjgfMwgfAwEgYDVR0TAQH/BAgwBgEB/wIBATARBgNV
+HSAECjAIMAYGBFUdIAAwOgYIKwYBBQUHAQEELjAsMCoGCCsGAQUFBzABhh5odHRw
+Oi8vb2NzcC5xdW92YWRpc2dsb2JhbC5jb20wDgYDVR0PAQH/BAQDAgEGMB8GA1Ud
+IwQYMBaAFO3nb3Zav2DsSVvGpXe7chZxm8Q9MDsGA1UdHwQ0MDIwMKAuoCyGKmh0
+dHA6Ly9jcmwucXVvdmFkaXNnbG9iYWwuY29tL3F2cmNhMmczLmNybDAdBgNVHQ4E
+FgQUsxKJtalLNbwVAPCA6dh4h/ETfHYwDQYJKoZIhvcNAQELBQADggIBAFGm1Fqp
+RMiKr7a6h707M+km36PVXZnX1NZocCn36MrfRvphotbOCDm+GmRkar9ZMGhc8c/A
+Vn7JSCjwF9jNOFIOUyNLq0w4luk+Pt2YFDbgF8IDdx53xIo8Gv05e9xpTvQYaIto
+qeHbQjGXfSGc91olfX6JUwZlxxbhdJH+rxTFAg0jcbqToJoScWTfXSr1QRcNbSTs
+Y4CPG6oULsnhVvrzgldGSK+DxFi2OKcDsOKkV7W4IGg8Do2L/M588AfBnV8ERzpl
+qgMBBQxC2+0N6RdFHbmZt0HQE/NIg1s0xcjGx1XW3YTOfje31rmAXKHOehm4Bu48
+gr8gePq5cdQ2W9tA0Dnytb9wzH2SyPPIXRI7yNxaX9H8wYeDeeiKSSmQtfh1v5cV
+7RXvm8F6hLJkkco/HOW3dAUwZFcKsUH+1eUJKLN18eDGwB8yGawjHvOKqcfg5Lf/
+TvC7hgcx7pDYaCCaqHaekgUwXbB2Enzqr1fdwoU1c01W5YuQAtAx5wk1bf34Yq/J
+ph7wNXGvo88N0/EfP9AdVGmJzy7VuRXeVAOyjKAIeADMlwpjBRhcbs9m3dkqvoMb
+SXKJxv/hFmNgEOvOlaFsXX1dbKg1v+C1AzKAFdiuAIa62JzASiEhigqNSdqdTsOh
+8W8hdONuKKpe9zKedhBFAvuxhDgKmnySglYc
+-----END CERTIFICATE-----
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.4.cert b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.4.cert
new file mode 100644
index 00000000..adbb8edc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-1.4.cert
@@ -0,0 +1,86 @@
+subject=/C=AU/ST=Victoria/L=Melbourne/O=Telstra Corporation Limited/OU=Telstra Energy/CN=dev.energy.inside.telstra.com
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3
+-----BEGIN CERTIFICATE-----
+MIIIHTCCBgWgAwIBAgIUCqrrzSfjzaoyB3DOxst2kMxFp/MwDQYJKoZIhvcNAQEL
+BQAwTTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxIzAh
+BgNVBAMTGlF1b1ZhZGlzIEdsb2JhbCBTU0wgSUNBIEczMB4XDTE5MDgyMTIyMjIy
+OFoXDTIxMDgyMTIyMzIwMFowgZsxCzAJBgNVBAYTAkFVMREwDwYDVQQIDAhWaWN0
+b3JpYTESMBAGA1UEBwwJTWVsYm91cm5lMSQwIgYDVQQKDBtUZWxzdHJhIENvcnBv
+cmF0aW9uIExpbWl0ZWQxFzAVBgNVBAsMDlRlbHN0cmEgRW5lcmd5MSYwJAYDVQQD
+DB1kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAMPAPH2y206qios2NMzlCNJv1mrwC1/8tH2HOqJGiYZB
+O7QOBRSvJsV++IozCB8ap99e8B64OOAQPOyykrdXd2axhftmMb1SFMF56eukHSuz
+KhKWRUgHs0UFRU51lDcBcOvphwJ+5SOgqrqKFFFBgJ0ZpcP54JpFwKIdh3ac10x2
+mBaW5ccqdv5X9oEMu1D/yivBmy34tsbLYyfttCjP76iVT7UVYHjHWynnIhsEyMsU
+gdM90NzrTlrvTSi/EcCD1W3+8b0f+G1TI5rhHbKwR0n/mv5QLFm7EABoYPhxS8bX
+B+9tE67yb0RyWbgvUiHySRynQLNMRpRx8Y9bA8uC8n8CAwEAAaOCA6QwggOgMAkG
+A1UdEwQCMAAwHwYDVR0jBBgwFoAUsxKJtalLNbwVAPCA6dh4h/ETfHYwcwYIKwYB
+BQUHAQEEZzBlMDcGCCsGAQUFBzAChitodHRwOi8vdHJ1c3QucXVvdmFkaXNnbG9i
+YWwuY29tL3F2c3NsZzMuY3J0MCoGCCsGAQUFBzABhh5odHRwOi8vb2NzcC5xdW92
+YWRpc2dsb2JhbC5jb20wgZ8GA1UdEQSBlzCBlIIdZGV2LmVuZXJneS5pbnNpZGUu
+dGVsc3RyYS5jb22CJXJlcG9ydHMuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5j
+b22CJ2dyZWVuc3luYy5kZXYuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbYIjbmdv
+c3MuZGV2LmVuZXJneS5pbnNpZGUudGVsc3RyYS5jb20wUQYDVR0gBEowSDBGBgwr
+BgEEAb5YAAJkAQEwNjA0BggrBgEFBQcCARYoaHR0cDovL3d3dy5xdW92YWRpc2ds
+b2JhbC5jb20vcmVwb3NpdG9yeTAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH
+AwEwOgYDVR0fBDMwMTAvoC2gK4YpaHR0cDovL2NybC5xdW92YWRpc2dsb2JhbC5j
+b20vcXZzc2xnMy5jcmwwHQYDVR0OBBYEFEoJQRpPC/V5ZK3mMkszZE2v6vh+MA4G
+A1UdDwEB/wQEAwIFoDCCAXwGCisGAQQB1nkCBAIEggFsBIIBaAFmAHUAVhQGmi/X
+wuzT9eG9RLI+x0Z2ubyZEVzA75SYVdaJ0N0AAAFstk9Y+gAABAMARjBEAiBFMZa6
+O9iXVjy2kqQa54vgNFdU7shgFJJhm//fSAQZUAIgBIL/yPdh+XiuQS2xPhCzNYkh
+bxf7BbN4qUISESgiZpsAdgBvU3asMfAxGdiZAKRRFf93FRwR2QLBACkGjbIImjfZ
+EwAAAWy2T1nKAAAEAwBHMEUCIG0tp63jLsDsfCTDlcvV5ItjRkbUJBnkxlPdP2PH
+88sTAiEApgaPofVdn2hdI12iDDex72ta+9wpwQ1MxoaJn2nt+qEAdQDuS723dc5g
+uuFCaR+r4Z5mow9+X7By2IMAxHuJeqj9ywAAAWy2T1iJAAAEAwBGMEQCIE/mzEFp
+CJUc71jvwJa4Px86R3ZYK4mHmUlQAUZqd0ZkAiBdEmT8xxTuleSUlYHEkKCK/FZX
+L+vsYJpPrA9TsO5IsTANBgkqhkiG9w0BAQsFAAOCAgEApE9WLz3S8tqA9Dk3r9LF
+rJy8km9cBt1O9SQZwFsduGKGdF3Fd+/Y0V7UrFDzrX+NIzqcmgBHKxaIXorMBF70
+ajMaaROP2ymkpEXnruEwoR47fbW+JRAWDRm2xnouQveQX9ZcgCLbBvAWBqpndQj2
+DGmLJhNz5GlFBjh3PQZlU1w8hU7TrDxa7M1GMtVnk8X+o3l/MX9iPeEs+PiC4dHD
+hpj84RY1VQJz8+10rql47SB5YgbwcqaizTG4ax/OAv1JHNWtfAodIMX8Y8X00zoz
+A20LQv880jCCNANVNbrXJ3h4X3xwW/C1X9vYk0shymZJbT5u17JbPD1cy39bA7kT
+F4L7scdQRxvcqazYN4/IdgvgMji9OltiYufP88Ti8KB2tcl2accpiC5St/zllGD1
+hqEeYLMzjyvUKR/1uvURQQtc0DPvBRmvkB+aI4g+sLkTTFWj5bsA1vKU8SDCyMuB
+RQV11DId5+RNNCmWnskORUZJQssvY49pnfCxCES2nt3l/XzTzVtLYmd6G9uAqVac
+e2ibnmDrFVlmlyRsCiMfZl5/OTJzt7Cj3az59m5Syfw/lnS9YP82t/r/ufuKkO5Q
+q5a9aI8DuNNmAjR4lpIJNqIpX/y+dG2aGmx4XTc31MR9szWtiTgOHe0MkMupOAL0
+qkHrBgwo1zjuTMf3QOg6Z5Q=
+-----END CERTIFICATE-----
+
+subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+-----BEGIN CERTIFICATE-----
+MIIGFzCCA/+gAwIBAgIUftbnnMmtgcTIGT75XUQodw40ExcwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjExMDYxNDUwMThaFw0y
+MjExMDYxNDUwMThaME0xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMSMwIQYDVQQDExpRdW9WYWRpcyBHbG9iYWwgU1NMIElDQSBHMzCCAiIw
+DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANf8Od17be6c6lTGJDhEXpmkTs4y
+Q39Rr5VJyBeWCg06nSS71s6xF3sZvKcV0MbXlXCYM2ZX7cNTbJ81gs7uDsKFp+vK
+EymiKyEiI2SImOtECNnSg+RVR4np/xz/UlC0yFUisH75cZsJ8T1pkGMfiEouR0EM
+7O0uFgoboRfUP582TTWy0F7ynSA6YfGKnKj0OFwZJmGHVkLs1VevWjhj3R1fsPan
+H05P5moePFnpQdj1FofoSxUHZ0c7VB+sUimboHm/uHNY1LOsk77qiSuVC5/yrdg3
+2EEfP/mxJYT4r/5UiD7VahySzeZHzZ2OibQm2AfgfMN3l57lCM3/WPQBhMAPS1jz
+kE+7MjajM2f0aZctimW4Hasrj8AQnfAdHqZehbhtXaAlffNEzCdpNK584oCTVR7N
+UR9iZFx83ruTqpo+GcLP/iSYqhM4g7fy45sNhU+IS+ca03zbxTl3TTlkofXunI5B
+xxE30eGSQpDZ5+iUJcEOAuVKrlYocFbB3KF45hwcbzPWQ1DcO2jFAapOtQzeS+MZ
+yZzT2YseJ8hQHKu8YrXZWwKaNfyl8kFkHUBDICowNEoZvBwRCQp8sgqL6YRZy0uD
+JGxmnC2e0BVKSjcIvmq/CRWH7yiTk9eWm73xrsg9iIyD/kwJEnLyIk8tR5V8p/hc
+1H2AjDrZH12PsZ45AgMBAAGjgfMwgfAwEgYDVR0TAQH/BAgwBgEB/wIBATARBgNV
+HSAECjAIMAYGBFUdIAAwOgYIKwYBBQUHAQEELjAsMCoGCCsGAQUFBzABhh5odHRw
+Oi8vb2NzcC5xdW92YWRpc2dsb2JhbC5jb20wDgYDVR0PAQH/BAQDAgEGMB8GA1Ud
+IwQYMBaAFO3nb3Zav2DsSVvGpXe7chZxm8Q9MDsGA1UdHwQ0MDIwMKAuoCyGKmh0
+dHA6Ly9jcmwucXVvdmFkaXNnbG9iYWwuY29tL3F2cmNhMmczLmNybDAdBgNVHQ4E
+FgQUsxKJtalLNbwVAPCA6dh4h/ETfHYwDQYJKoZIhvcNAQELBQADggIBAFGm1Fqp
+RMiKr7a6h707M+km36PVXZnX1NZocCn36MrfRvphotbOCDm+GmRkar9ZMGhc8c/A
+Vn7JSCjwF9jNOFIOUyNLq0w4luk+Pt2YFDbgF8IDdx53xIo8Gv05e9xpTvQYaIto
+qeHbQjGXfSGc91olfX6JUwZlxxbhdJH+rxTFAg0jcbqToJoScWTfXSr1QRcNbSTs
+Y4CPG6oULsnhVvrzgldGSK+DxFi2OKcDsOKkV7W4IGg8Do2L/M588AfBnV8ERzpl
+qgMBBQxC2+0N6RdFHbmZt0HQE/NIg1s0xcjGx1XW3YTOfje31rmAXKHOehm4Bu48
+gr8gePq5cdQ2W9tA0Dnytb9wzH2SyPPIXRI7yNxaX9H8wYeDeeiKSSmQtfh1v5cV
+7RXvm8F6hLJkkco/HOW3dAUwZFcKsUH+1eUJKLN18eDGwB8yGawjHvOKqcfg5Lf/
+TvC7hgcx7pDYaCCaqHaekgUwXbB2Enzqr1fdwoU1c01W5YuQAtAx5wk1bf34Yq/J
+ph7wNXGvo88N0/EfP9AdVGmJzy7VuRXeVAOyjKAIeADMlwpjBRhcbs9m3dkqvoMb
+SXKJxv/hFmNgEOvOlaFsXX1dbKg1v+C1AzKAFdiuAIa62JzASiEhigqNSdqdTsOh
+8W8hdONuKKpe9zKedhBFAvuxhDgKmnySglYc
+-----END CERTIFICATE-----
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-4.cert b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-4.cert
new file mode 100644
index 00000000..2b82edf6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/chain-4.cert
@@ -0,0 +1,121 @@
+subject=/C=AU/ST=Victoria/L=Melbourne/O=Telstra Corporation Limited/OU=Telstra Energy/CN=prod.energy.inside.telstra.com
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3
+-----BEGIN CERTIFICATE-----
+MIIIJDCCBgygAwIBAgIUP9S/56XvOFzWk1vp1+7JJT17brEwDQYJKoZIhvcNAQEL
+BQAwTTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxIzAh
+BgNVBAMTGlF1b1ZhZGlzIEdsb2JhbCBTU0wgSUNBIEczMB4XDTE5MDgyNzAzMTU1
+NFoXDTIxMDgyNzAzMjUwMFowgZwxCzAJBgNVBAYTAkFVMREwDwYDVQQIDAhWaWN0
+b3JpYTESMBAGA1UEBwwJTWVsYm91cm5lMSQwIgYDVQQKDBtUZWxzdHJhIENvcnBv
+cmF0aW9uIExpbWl0ZWQxFzAVBgNVBAsMDlRlbHN0cmEgRW5lcmd5MScwJQYDVQQD
+DB5wcm9kLmVuZXJneS5pbnNpZGUudGVsc3RyYS5jb20wggEiMA0GCSqGSIb3DQEB
+AQUAA4IBDwAwggEKAoIBAQCrRouNZFOZwM1qyAU6v6ag9fzSx3y8zz36nR8HuqbA
+/wqrbMmnpofwdx/9u1bilsHfJzIODv0hm7aGk+neTK3DIapiII3m0HKW0v+GLsl7
+JkDuc2o3XlakcXlA45qDKCZXbXZtY4/kdxKG0OSUZi7oQqohhYl/c/ojrTiey+4G
+KhEVqWwOuQ1OC1DRw4qMH54d0koFxxSLPJ8JiiztLlK/e9n8BoJikj5fBqWy5R1F
+bGXCdzjcfmPV6iSOzJShpUgj4ga91mO6j3S6LLfK5ibbTlY+pmUxUT+m9nKMon3h
+mFptTYo9t9vUF/a/owjRxNLg01fJLNjYn8QV2vQvODGfAgMBAAGjggOqMIIDpjAJ
+BgNVHRMEAjAAMB8GA1UdIwQYMBaAFLMSibWpSzW8FQDwgOnYeIfxE3x2MHMGCCsG
+AQUFBwEBBGcwZTA3BggrBgEFBQcwAoYraHR0cDovL3RydXN0LnF1b3ZhZGlzZ2xv
+YmFsLmNvbS9xdnNzbGczLmNydDAqBggrBgEFBQcwAYYeaHR0cDovL29jc3AucXVv
+dmFkaXNnbG9iYWwuY29tMIGjBgNVHREEgZswgZiCHnByb2QuZW5lcmd5Lmluc2lk
+ZS50ZWxzdHJhLmNvbYImcmVwb3J0cy5wcm9kLmVuZXJneS5pbnNpZGUudGVsc3Ry
+YS5jb22CKGdyZWVuc3luYy5wcm9kLmVuZXJneS5pbnNpZGUudGVsc3RyYS5jb22C
+JG5nb3NzLnByb2QuZW5lcmd5Lmluc2lkZS50ZWxzdHJhLmNvbTBRBgNVHSAESjBI
+MEYGDCsGAQQBvlgAAmQBATA2MDQGCCsGAQUFBwIBFihodHRwOi8vd3d3LnF1b3Zh
+ZGlzZ2xvYmFsLmNvbS9yZXBvc2l0b3J5MB0GA1UdJQQWMBQGCCsGAQUFBwMCBggr
+BgEFBQcDATA6BgNVHR8EMzAxMC+gLaArhilodHRwOi8vY3JsLnF1b3ZhZGlzZ2xv
+YmFsLmNvbS9xdnNzbGczLmNybDAdBgNVHQ4EFgQUoIME5TykVAI8VF5g0zeh0xdv
+i3owDgYDVR0PAQH/BAQDAgWgMIIBfgYKKwYBBAHWeQIEAgSCAW4EggFqAWgAdgBW
+FAaaL9fC7NP14b1Esj7HRna5vJkRXMDvlJhV1onQ3QAAAWzRG8r0AAAEAwBHMEUC
+IQDShuQyYMiy7KKxWOzffolVIcPRgWD7ClNEbIcUATHKyQIgXnTZBXcpcbXBQXLs
+tFuvY36TbKIYc2ql2nmdydGQ9wcAdgCkuQmQtBhYFIe7E6LMZ3AKPDWYBPkb37jj
+d80OyA3cEAAAAWzRG8sAAAAEAwBHMEUCIGsLEoA9S7pNE3VoNZHxl2IAdeP3Dy2Q
+Mk0rM46hp6CRAiEA08rOjswSdcn7qgDEoiyvlcrOTIFJAEcMlxSY65yLVUwAdgBV
+gdTCFpA2AUrqC5tXPFPwwOQ4eHAlCBcvo6odBxPTDAAAAWzRG8q7AAAEAwBHMEUC
+IAkVCcTFG8MBDI58JKIhMlPbzkdrKnYY3Kp9KqWuTAvMAiEAipeI7RCLBk8+T/p+
+gY7+vtFZxKDthcJMUpZz7qmica0wDQYJKoZIhvcNAQELBQADggIBAESe0U1qArxL
+F2uk65q6x6HBcZuSocpceokzcUBv07Kxs6UJU9ybTbl8VYPuC+OUdpvut1kOJCJm
+1TRrr5KMh+9as42xkbKRZnh5TQt7aHmVcLHLfA4x0UrELfNX3fVTDxwDAPAhE5oM
+0w+d1foLakh7dXKKSxobEI3KRwFp19iuZeIqwI8XMWMr9ajhTC0T7D2QvKotpNBS
+sNDHiIE3IXoa9o7UiOG8IfW0wAt7CEygv0F7ctHRTcQSP/SJIGYOUZ7uotULVL5i
+elG31Y83Jx3sPNCy4IZfCip6Gw7MgsN2CZGApqi49edSqDWyRIfmCeXtMc7XI7Md
+kqqWxbqGGTdYJCucoGqahqRR+BI9anEqTD9T5Gy0TpCi2pgp1i7czza71nfz0PcN
+R0pw/1lqb9AqmJ2XELpBpo82B9XGple9thpincai7jPk3ezY5eEvDTmkHRlUFCp8
+8M66Ga19hZTgnHPWDKZYZzuZ7Lcl2WbapFOYYHJggSpBRy4GkH6eTSkUB9G9k8vU
+gbvtS7sR5ggecbCBu0M4TWYmnUojR8UXtr0oOTlXysTHVGs5Tx9ChhOLyUqhX8tM
+1zSDT8JJvbbw4RqpGzBKTNaO5nxRLgKVQOQdM8f1kjMr9/U58Lc4UiaTkJM14VfK
+8GfV8+K/vRCBtME53ILvm1l18jtakG3c
+-----END CERTIFICATE-----
+
+subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+-----BEGIN CERTIFICATE-----
+MIIGFzCCA/+gAwIBAgIUftbnnMmtgcTIGT75XUQodw40ExcwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjExMDYxNDUwMThaFw0y
+MjExMDYxNDUwMThaME0xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMSMwIQYDVQQDExpRdW9WYWRpcyBHbG9iYWwgU1NMIElDQSBHMzCCAiIw
+DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANf8Od17be6c6lTGJDhEXpmkTs4y
+Q39Rr5VJyBeWCg06nSS71s6xF3sZvKcV0MbXlXCYM2ZX7cNTbJ81gs7uDsKFp+vK
+EymiKyEiI2SImOtECNnSg+RVR4np/xz/UlC0yFUisH75cZsJ8T1pkGMfiEouR0EM
+7O0uFgoboRfUP582TTWy0F7ynSA6YfGKnKj0OFwZJmGHVkLs1VevWjhj3R1fsPan
+H05P5moePFnpQdj1FofoSxUHZ0c7VB+sUimboHm/uHNY1LOsk77qiSuVC5/yrdg3
+2EEfP/mxJYT4r/5UiD7VahySzeZHzZ2OibQm2AfgfMN3l57lCM3/WPQBhMAPS1jz
+kE+7MjajM2f0aZctimW4Hasrj8AQnfAdHqZehbhtXaAlffNEzCdpNK584oCTVR7N
+UR9iZFx83ruTqpo+GcLP/iSYqhM4g7fy45sNhU+IS+ca03zbxTl3TTlkofXunI5B
+xxE30eGSQpDZ5+iUJcEOAuVKrlYocFbB3KF45hwcbzPWQ1DcO2jFAapOtQzeS+MZ
+yZzT2YseJ8hQHKu8YrXZWwKaNfyl8kFkHUBDICowNEoZvBwRCQp8sgqL6YRZy0uD
+JGxmnC2e0BVKSjcIvmq/CRWH7yiTk9eWm73xrsg9iIyD/kwJEnLyIk8tR5V8p/hc
+1H2AjDrZH12PsZ45AgMBAAGjgfMwgfAwEgYDVR0TAQH/BAgwBgEB/wIBATARBgNV
+HSAECjAIMAYGBFUdIAAwOgYIKwYBBQUHAQEELjAsMCoGCCsGAQUFBzABhh5odHRw
+Oi8vb2NzcC5xdW92YWRpc2dsb2JhbC5jb20wDgYDVR0PAQH/BAQDAgEGMB8GA1Ud
+IwQYMBaAFO3nb3Zav2DsSVvGpXe7chZxm8Q9MDsGA1UdHwQ0MDIwMKAuoCyGKmh0
+dHA6Ly9jcmwucXVvdmFkaXNnbG9iYWwuY29tL3F2cmNhMmczLmNybDAdBgNVHQ4E
+FgQUsxKJtalLNbwVAPCA6dh4h/ETfHYwDQYJKoZIhvcNAQELBQADggIBAFGm1Fqp
+RMiKr7a6h707M+km36PVXZnX1NZocCn36MrfRvphotbOCDm+GmRkar9ZMGhc8c/A
+Vn7JSCjwF9jNOFIOUyNLq0w4luk+Pt2YFDbgF8IDdx53xIo8Gv05e9xpTvQYaIto
+qeHbQjGXfSGc91olfX6JUwZlxxbhdJH+rxTFAg0jcbqToJoScWTfXSr1QRcNbSTs
+Y4CPG6oULsnhVvrzgldGSK+DxFi2OKcDsOKkV7W4IGg8Do2L/M588AfBnV8ERzpl
+qgMBBQxC2+0N6RdFHbmZt0HQE/NIg1s0xcjGx1XW3YTOfje31rmAXKHOehm4Bu48
+gr8gePq5cdQ2W9tA0Dnytb9wzH2SyPPIXRI7yNxaX9H8wYeDeeiKSSmQtfh1v5cV
+7RXvm8F6hLJkkco/HOW3dAUwZFcKsUH+1eUJKLN18eDGwB8yGawjHvOKqcfg5Lf/
+TvC7hgcx7pDYaCCaqHaekgUwXbB2Enzqr1fdwoU1c01W5YuQAtAx5wk1bf34Yq/J
+ph7wNXGvo88N0/EfP9AdVGmJzy7VuRXeVAOyjKAIeADMlwpjBRhcbs9m3dkqvoMb
+SXKJxv/hFmNgEOvOlaFsXX1dbKg1v+C1AzKAFdiuAIa62JzASiEhigqNSdqdTsOh
+8W8hdONuKKpe9zKedhBFAvuxhDgKmnySglYc
+-----END CERTIFICATE-----
+
+subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00
+MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf
+qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW
+n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym
+c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+
+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1
+o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j
+IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq
+IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz
+8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh
+vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l
+7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG
+cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD
+ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66
+AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC
+roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga
+W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n
+lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE
++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV
+csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd
+dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg
+KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM
+HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4
+WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M
+-----END CERTIFICATE-----
+
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/simple-chain-a.cert b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/simple-chain-a.cert
new file mode 100644
index 00000000..1d9bbe21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/simple-chain-a.cert
@@ -0,0 +1,18 @@
+subject=/C=AU/ST=Victoria/L=Melbourne/O=Telstra Corporation Limited/OU=Telstra Energy/CN=dev.energy.inside.telstra.com
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3
+-----BEGIN CERTIFICATE-----
+aaa
+-----END CERTIFICATE-----
+
+subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+-----BEGIN CERTIFICATE-----
+bbb
+-----END CERTIFICATE-----
+
+subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+-----BEGIN CERTIFICATE-----
+ccc
+-----END CERTIFICATE-----
+
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/simple-chain-b.cert b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/simple-chain-b.cert
new file mode 100644
index 00000000..1d9bbe21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/simple-chain-b.cert
@@ -0,0 +1,18 @@
+subject=/C=AU/ST=Victoria/L=Melbourne/O=Telstra Corporation Limited/OU=Telstra Energy/CN=dev.energy.inside.telstra.com
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3
+-----BEGIN CERTIFICATE-----
+aaa
+-----END CERTIFICATE-----
+
+subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Global SSL ICA G3
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+-----BEGIN CERTIFICATE-----
+bbb
+-----END CERTIFICATE-----
+
+subject=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+issuer=/C=BM/O=QuoVadis Limited/CN=QuoVadis Root CA 2 G3
+-----BEGIN CERTIFICATE-----
+ccc
+-----END CERTIFICATE-----
+
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/thezip.zip b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/thezip.zip
new file mode 100644
index 00000000..6eaefdd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/thezip.zip
Binary files differ
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/.gitkeep b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/.gitkeep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/.gitkeep
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.CreateStack_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.CreateStack_1.json
new file mode 100644
index 00000000..36f1489b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.CreateStack_1.json
@@ -0,0 +1,17 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "03fbfc36-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "03fbfc36-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:07 GMT",
+ "content-length": "393",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DeleteStack_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DeleteStack_1.json
new file mode 100644
index 00000000..d526155a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DeleteStack_1.json
@@ -0,0 +1,16 @@
+{
+ "status_code": 200,
+ "data": {
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "170d1e02-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "170d1e02-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:39 GMT",
+ "content-length": "212",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_1.json
new file mode 100644
index 00000000..3758c77b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_1.json
@@ -0,0 +1,38 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "User Initiated",
+ "StackName": "ansible-test-basic-yaml",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "LogicalResourceId": "ansible-test-basic-yaml"
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "043d4a05-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "043d4a05-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:08 GMT",
+ "content-length": "1183",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_2.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_2.json
new file mode 100644
index 00000000..2c5a7655
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_2.json
@@ -0,0 +1,80 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 12,
+ "microsecond": 754000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "Resource creation Initiated",
+ "StackName": "ansible-test-basic-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 11,
+ "microsecond": 159000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-basic-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "User Initiated",
+ "StackName": "ansible-test-basic-yaml",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "LogicalResourceId": "ansible-test-basic-yaml"
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "075d9d71-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "075d9d71-b5d0-11e7-ae09-550cfe4b2358",
+ "vary": "Accept-Encoding",
+ "content-length": "2730",
+ "content-type": "text/xml",
+ "date": "Fri, 20 Oct 2017 19:51:13 GMT"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_3.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_3.json
new file mode 100644
index 00000000..cf2c2450
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_3.json
@@ -0,0 +1,80 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 12,
+ "microsecond": 754000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "Resource creation Initiated",
+ "StackName": "ansible-test-basic-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 11,
+ "microsecond": 159000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-basic-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "User Initiated",
+ "StackName": "ansible-test-basic-yaml",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "LogicalResourceId": "ansible-test-basic-yaml"
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "0a7eb31b-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "0a7eb31b-b5d0-11e7-ae09-550cfe4b2358",
+ "vary": "Accept-Encoding",
+ "content-length": "2730",
+ "content-type": "text/xml",
+ "date": "Fri, 20 Oct 2017 19:51:19 GMT"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_4.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_4.json
new file mode 100644
index 00000000..32ee9c1c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_4.json
@@ -0,0 +1,80 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 12,
+ "microsecond": 754000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "Resource creation Initiated",
+ "StackName": "ansible-test-basic-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 11,
+ "microsecond": 159000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-basic-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "User Initiated",
+ "StackName": "ansible-test-basic-yaml",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "LogicalResourceId": "ansible-test-basic-yaml"
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "0d9e1c06-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "0d9e1c06-b5d0-11e7-ae09-550cfe4b2358",
+ "vary": "Accept-Encoding",
+ "content-length": "2730",
+ "content-type": "text/xml",
+ "date": "Fri, 20 Oct 2017 19:51:24 GMT"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_5.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_5.json
new file mode 100644
index 00000000..b547cd4d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_5.json
@@ -0,0 +1,80 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 12,
+ "microsecond": 754000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "Resource creation Initiated",
+ "StackName": "ansible-test-basic-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 11,
+ "microsecond": 159000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-basic-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "User Initiated",
+ "StackName": "ansible-test-basic-yaml",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "LogicalResourceId": "ansible-test-basic-yaml"
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "10bd84ca-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "10bd84ca-b5d0-11e7-ae09-550cfe4b2358",
+ "vary": "Accept-Encoding",
+ "content-length": "2730",
+ "content-type": "text/xml",
+ "date": "Fri, 20 Oct 2017 19:51:29 GMT"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_6.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_6.json
new file mode 100644
index 00000000..15bd043a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_6.json
@@ -0,0 +1,100 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_COMPLETE-2017-10-20T19:51:33.200Z",
+ "ResourceStatus": "CREATE_COMPLETE",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 33,
+ "microsecond": 200000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-basic-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 12,
+ "microsecond": 754000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "Resource creation Initiated",
+ "StackName": "ansible-test-basic-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 11,
+ "microsecond": 159000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-basic-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "User Initiated",
+ "StackName": "ansible-test-basic-yaml",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "LogicalResourceId": "ansible-test-basic-yaml"
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "13dbb3fd-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "13dbb3fd-b5d0-11e7-ae09-550cfe4b2358",
+ "vary": "Accept-Encoding",
+ "content-length": "3490",
+ "content-type": "text/xml",
+ "date": "Fri, 20 Oct 2017 19:51:34 GMT"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_7.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_7.json
new file mode 100644
index 00000000..87db7c59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStackEvents_7.json
@@ -0,0 +1,119 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "140d7220-b5d0-11e7-933f-50a686be7356",
+ "ResourceStatus": "CREATE_COMPLETE",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 35,
+ "microsecond": 121000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-basic-yaml",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "LogicalResourceId": "ansible-test-basic-yaml"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_COMPLETE-2017-10-20T19:51:33.200Z",
+ "ResourceStatus": "CREATE_COMPLETE",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 33,
+ "microsecond": 200000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-basic-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 12,
+ "microsecond": 754000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "Resource creation Initiated",
+ "StackName": "ansible-test-basic-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 11,
+ "microsecond": 159000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-basic-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "User Initiated",
+ "StackName": "ansible-test-basic-yaml",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "LogicalResourceId": "ansible-test-basic-yaml"
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "16faf590-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "16faf590-b5d0-11e7-ae09-550cfe4b2358",
+ "vary": "Accept-Encoding",
+ "content-length": "4276",
+ "content-type": "text/xml",
+ "date": "Fri, 20 Oct 2017 19:51:39 GMT"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_1.json
new file mode 100644
index 00000000..7acdb3ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_1.json
@@ -0,0 +1,40 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EnableTerminationProtection": false,
+ "Description": "Basic template that creates an S3 bucket",
+ "Tags": [],
+ "StackStatusReason": "User Initiated",
+ "CreationTime": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-basic-yaml",
+ "NotificationARNs": [],
+ "StackStatus": "CREATE_IN_PROGRESS",
+ "DisableRollback": false,
+ "RollbackConfiguration": {}
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "042974db-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "042974db-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:08 GMT",
+ "content-length": "975",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_2.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_2.json
new file mode 100644
index 00000000..0ed674b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_2.json
@@ -0,0 +1,39 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "Description": "Basic template that creates an S3 bucket",
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "CreationTime": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-basic-yaml",
+ "NotificationARNs": [],
+ "StackStatus": "CREATE_IN_PROGRESS",
+ "DisableRollback": false,
+ "RollbackConfiguration": {}
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "074b26dc-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "074b26dc-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:13 GMT",
+ "content-length": "913",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_3.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_3.json
new file mode 100644
index 00000000..633c5e15
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_3.json
@@ -0,0 +1,39 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "Description": "Basic template that creates an S3 bucket",
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "CreationTime": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-basic-yaml",
+ "NotificationARNs": [],
+ "StackStatus": "CREATE_IN_PROGRESS",
+ "DisableRollback": false,
+ "RollbackConfiguration": {}
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "0a6cb1b3-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "0a6cb1b3-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:18 GMT",
+ "content-length": "913",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_4.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_4.json
new file mode 100644
index 00000000..e5ca69dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_4.json
@@ -0,0 +1,39 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "Description": "Basic template that creates an S3 bucket",
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "CreationTime": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-basic-yaml",
+ "NotificationARNs": [],
+ "StackStatus": "CREATE_IN_PROGRESS",
+ "DisableRollback": false,
+ "RollbackConfiguration": {}
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "0d8cddf1-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "0d8cddf1-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:23 GMT",
+ "content-length": "913",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_5.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_5.json
new file mode 100644
index 00000000..31a3057c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_5.json
@@ -0,0 +1,39 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "Description": "Basic template that creates an S3 bucket",
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "CreationTime": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-basic-yaml",
+ "NotificationARNs": [],
+ "StackStatus": "CREATE_IN_PROGRESS",
+ "DisableRollback": false,
+ "RollbackConfiguration": {}
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "10ac94d5-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "10ac94d5-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:28 GMT",
+ "content-length": "913",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_6.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_6.json
new file mode 100644
index 00000000..90ca7467
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_6.json
@@ -0,0 +1,39 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "Description": "Basic template that creates an S3 bucket",
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "CreationTime": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-basic-yaml",
+ "NotificationARNs": [],
+ "StackStatus": "CREATE_IN_PROGRESS",
+ "DisableRollback": false,
+ "RollbackConfiguration": {}
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "13caeb1b-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "13caeb1b-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:33 GMT",
+ "content-length": "913",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_7.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_7.json
new file mode 100644
index 00000000..905c04f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/basic_s3_stack/cloudformation.DescribeStacks_7.json
@@ -0,0 +1,45 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-basic-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "Description": "Basic template that creates an S3 bucket",
+ "Tags": [],
+ "Outputs": [
+ {
+ "OutputKey": "TheName",
+ "OutputValue": "ansible-test-basic-yaml-mybucket-13m2y4v8bptj4"
+ }
+ ],
+ "EnableTerminationProtection": false,
+ "CreationTime": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-basic-yaml",
+ "NotificationARNs": [],
+ "StackStatus": "CREATE_COMPLETE",
+ "DisableRollback": false,
+ "RollbackConfiguration": {}
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "16ea53bb-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "16ea53bb-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:39 GMT",
+ "content-length": "1115",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.CreateStack_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.CreateStack_1.json
new file mode 100644
index 00000000..9084936a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.CreateStack_1.json
@@ -0,0 +1,17 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "03fbfc36-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "03fbfc36-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:07 GMT",
+ "content-length": "393",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DeleteStack_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DeleteStack_1.json
new file mode 100644
index 00000000..d526155a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DeleteStack_1.json
@@ -0,0 +1,16 @@
+{
+ "status_code": 200,
+ "data": {
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "170d1e02-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "170d1e02-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:39 GMT",
+ "content-length": "212",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_1.json
new file mode 100644
index 00000000..399eab49
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_1.json
@@ -0,0 +1,39 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "User Initiated",
+ "StackName": "ansible-test-client-request-token-yaml",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "ansible-test-client-request-token-yaml"
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "043d4a05-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "043d4a05-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:08 GMT",
+ "content-length": "1183",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_2.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_2.json
new file mode 100644
index 00000000..f57dbf53
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_2.json
@@ -0,0 +1,83 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 12,
+ "microsecond": 754000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "Resource creation Initiated",
+ "StackName": "ansible-test-client-request-token-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 11,
+ "microsecond": 159000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-client-request-token-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "User Initiated",
+ "StackName": "ansible-test-client-request-token-yaml",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "ansible-test-client-request-token-yaml"
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "075d9d71-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "075d9d71-b5d0-11e7-ae09-550cfe4b2358",
+ "vary": "Accept-Encoding",
+ "content-length": "2730",
+ "content-type": "text/xml",
+ "date": "Fri, 20 Oct 2017 19:51:13 GMT"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_3.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_3.json
new file mode 100644
index 00000000..c8b4d694
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_3.json
@@ -0,0 +1,83 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 12,
+ "microsecond": 754000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "Resource creation Initiated",
+ "StackName": "ansible-test-client-request-token-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 11,
+ "microsecond": 159000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-client-request-token-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "User Initiated",
+ "StackName": "ansible-test-client-request-token-yaml",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "ansible-test-client-request-token-yaml"
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "0a7eb31b-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "0a7eb31b-b5d0-11e7-ae09-550cfe4b2358",
+ "vary": "Accept-Encoding",
+ "content-length": "2730",
+ "content-type": "text/xml",
+ "date": "Fri, 20 Oct 2017 19:51:19 GMT"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_4.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_4.json
new file mode 100644
index 00000000..8bb03ede
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_4.json
@@ -0,0 +1,83 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 12,
+ "microsecond": 754000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "Resource creation Initiated",
+ "StackName": "ansible-test-client-request-token-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 11,
+ "microsecond": 159000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-client-request-token-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "User Initiated",
+ "StackName": "ansible-test-client-request-token-yaml",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "ansible-test-client-request-token-yaml"
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "0d9e1c06-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "0d9e1c06-b5d0-11e7-ae09-550cfe4b2358",
+ "vary": "Accept-Encoding",
+ "content-length": "2730",
+ "content-type": "text/xml",
+ "date": "Fri, 20 Oct 2017 19:51:24 GMT"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_5.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_5.json
new file mode 100644
index 00000000..311949d0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_5.json
@@ -0,0 +1,83 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 12,
+ "microsecond": 754000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "Resource creation Initiated",
+ "StackName": "ansible-test-client-request-token-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 11,
+ "microsecond": 159000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-client-request-token-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "User Initiated",
+ "StackName": "ansible-test-client-request-token-yaml",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "ansible-test-client-request-token-yaml"
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "10bd84ca-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "10bd84ca-b5d0-11e7-ae09-550cfe4b2358",
+ "vary": "Accept-Encoding",
+ "content-length": "2730",
+ "content-type": "text/xml",
+ "date": "Fri, 20 Oct 2017 19:51:29 GMT"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_6.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_6.json
new file mode 100644
index 00000000..ddab94a5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_6.json
@@ -0,0 +1,104 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_COMPLETE-2017-10-20T19:51:33.200Z",
+ "ResourceStatus": "CREATE_COMPLETE",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 33,
+ "microsecond": 200000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-client-request-token-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 12,
+ "microsecond": 754000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "Resource creation Initiated",
+ "StackName": "ansible-test-client-request-token-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 11,
+ "microsecond": 159000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-client-request-token-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "User Initiated",
+ "StackName": "ansible-test-client-request-token-yaml",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "ansible-test-client-request-token-yaml"
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "13dbb3fd-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "13dbb3fd-b5d0-11e7-ae09-550cfe4b2358",
+ "vary": "Accept-Encoding",
+ "content-length": "3490",
+ "content-type": "text/xml",
+ "date": "Fri, 20 Oct 2017 19:51:34 GMT"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_7.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_7.json
new file mode 100644
index 00000000..86da5fb4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStackEvents_7.json
@@ -0,0 +1,124 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "140d7220-b5d0-11e7-933f-50a686be7356",
+ "ResourceStatus": "CREATE_COMPLETE",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 35,
+ "microsecond": 121000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-client-request-token-yaml",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "ansible-test-client-request-token-yaml"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_COMPLETE-2017-10-20T19:51:33.200Z",
+ "ResourceStatus": "CREATE_COMPLETE",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 33,
+ "microsecond": 200000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-client-request-token-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:12.754Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 12,
+ "microsecond": 754000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "Resource creation Initiated",
+ "StackName": "ansible-test-client-request-token-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "MyBucket-CREATE_IN_PROGRESS-2017-10-20T19:51:11.159Z",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::S3::Bucket",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 11,
+ "microsecond": 159000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-client-request-token-yaml",
+ "ResourceProperties": "{}\n",
+ "PhysicalResourceId": "",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "MyBucket"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EventId": "04032730-b5d0-11e7-86b8-503ac93168c5",
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "ResourceStatusReason": "User Initiated",
+ "StackName": "ansible-test-client-request-token-yaml",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf",
+ "LogicalResourceId": "ansible-test-client-request-token-yaml"
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "16faf590-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "16faf590-b5d0-11e7-ae09-550cfe4b2358",
+ "vary": "Accept-Encoding",
+ "content-length": "4276",
+ "content-type": "text/xml",
+ "date": "Fri, 20 Oct 2017 19:51:39 GMT"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_1.json
new file mode 100644
index 00000000..7734b0ca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_1.json
@@ -0,0 +1,40 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "EnableTerminationProtection": false,
+ "Description": "Basic template that creates an S3 bucket",
+ "Tags": [],
+ "StackStatusReason": "User Initiated",
+ "CreationTime": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-client-request-token-yaml",
+ "NotificationARNs": [],
+ "StackStatus": "CREATE_IN_PROGRESS",
+ "DisableRollback": false,
+ "RollbackConfiguration": {}
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "042974db-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "042974db-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:08 GMT",
+ "content-length": "975",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_2.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_2.json
new file mode 100644
index 00000000..0a1e74d7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_2.json
@@ -0,0 +1,39 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "Description": "Basic template that creates an S3 bucket",
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "CreationTime": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-client-request-token-yaml",
+ "NotificationARNs": [],
+ "StackStatus": "CREATE_IN_PROGRESS",
+ "DisableRollback": false,
+ "RollbackConfiguration": {}
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "074b26dc-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "074b26dc-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:13 GMT",
+ "content-length": "913",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_3.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_3.json
new file mode 100644
index 00000000..12d5839f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_3.json
@@ -0,0 +1,39 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "Description": "Basic template that creates an S3 bucket",
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "CreationTime": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-client-request-token-yaml",
+ "NotificationARNs": [],
+ "StackStatus": "CREATE_IN_PROGRESS",
+ "DisableRollback": false,
+ "RollbackConfiguration": {}
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "0a6cb1b3-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "0a6cb1b3-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:18 GMT",
+ "content-length": "913",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_4.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_4.json
new file mode 100644
index 00000000..a3cb0a8c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_4.json
@@ -0,0 +1,39 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "Description": "Basic template that creates an S3 bucket",
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "CreationTime": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-client-request-token-yaml",
+ "NotificationARNs": [],
+ "StackStatus": "CREATE_IN_PROGRESS",
+ "DisableRollback": false,
+ "RollbackConfiguration": {}
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "0d8cddf1-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "0d8cddf1-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:23 GMT",
+ "content-length": "913",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_5.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_5.json
new file mode 100644
index 00000000..251d71fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_5.json
@@ -0,0 +1,39 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "Description": "Basic template that creates an S3 bucket",
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "CreationTime": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-client-request-token-yaml",
+ "NotificationARNs": [],
+ "StackStatus": "CREATE_IN_PROGRESS",
+ "DisableRollback": false,
+ "RollbackConfiguration": {}
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "10ac94d5-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "10ac94d5-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:28 GMT",
+ "content-length": "913",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_6.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_6.json
new file mode 100644
index 00000000..2251125f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_6.json
@@ -0,0 +1,39 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "Description": "Basic template that creates an S3 bucket",
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "CreationTime": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-client-request-token-yaml",
+ "NotificationARNs": [],
+ "StackStatus": "CREATE_IN_PROGRESS",
+ "DisableRollback": false,
+ "RollbackConfiguration": {}
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "13caeb1b-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "13caeb1b-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:33 GMT",
+ "content-length": "913",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_7.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_7.json
new file mode 100644
index 00000000..aa8c7fd0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/client_request_token_s3_stack/cloudformation.DescribeStacks_7.json
@@ -0,0 +1,45 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-client-request-token-yaml/04023cd0-b5d0-11e7-86b8-503ac93168c5",
+ "Description": "Basic template that creates an S3 bucket",
+ "Tags": [],
+ "Outputs": [
+ {
+ "OutputKey": "TheName",
+ "OutputValue": "ansible-test-client-request-token-yaml-mybucket-13m2y4v8bptj4"
+ }
+ ],
+ "EnableTerminationProtection": false,
+ "CreationTime": {
+ "hour": 19,
+ "__class__": "datetime",
+ "month": 10,
+ "second": 8,
+ "microsecond": 324000,
+ "year": 2017,
+ "day": 20,
+ "minute": 51
+ },
+ "StackName": "ansible-test-client-request-token-yaml",
+ "NotificationARNs": [],
+ "StackStatus": "CREATE_COMPLETE",
+ "DisableRollback": false,
+ "RollbackConfiguration": {}
+ }
+ ],
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "16ea53bb-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "16ea53bb-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:39 GMT",
+ "content-length": "1115",
+ "content-type": "text/xml"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_1.json
new file mode 100644
index 00000000..109feacd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_1.json
@@ -0,0 +1,22 @@
+{
+ "status_code": 400,
+ "data": {
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 400,
+ "RequestId": "179d9e46-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "179d9e46-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:40 GMT",
+ "content-length": "301",
+ "content-type": "text/xml",
+ "connection": "close"
+ }
+ },
+ "Error": {
+ "Message": "Stack [ansible-test-nonexist] does not exist",
+ "Code": "ValidationError",
+ "Type": "Sender"
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_2.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_2.json
new file mode 100644
index 00000000..589f92cc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStackEvents_2.json
@@ -0,0 +1,22 @@
+{
+ "status_code": 400,
+ "data": {
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 400,
+ "RequestId": "17d80f44-b5d0-11e7-80c4-9f499f779cdb",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "17d80f44-b5d0-11e7-80c4-9f499f779cdb",
+ "date": "Fri, 20 Oct 2017 19:51:40 GMT",
+ "content-length": "301",
+ "content-type": "text/xml",
+ "connection": "close"
+ }
+ },
+ "Error": {
+ "Message": "Stack [ansible-test-nonexist] does not exist",
+ "Code": "ValidationError",
+ "Type": "Sender"
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStacks_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStacks_1.json
new file mode 100644
index 00000000..ea227415
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/delete_nonexistent_stack/cloudformation.DescribeStacks_1.json
@@ -0,0 +1,22 @@
+{
+ "status_code": 400,
+ "data": {
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 400,
+ "RequestId": "175fab26-b5d0-11e7-9d9b-45815c77100a",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "175fab26-b5d0-11e7-9d9b-45815c77100a",
+ "date": "Fri, 20 Oct 2017 19:51:40 GMT",
+ "content-length": "307",
+ "content-type": "text/xml",
+ "connection": "close"
+ }
+ },
+ "Error": {
+ "Message": "Stack with id ansible-test-nonexist does not exist",
+ "Code": "ValidationError",
+ "Type": "Sender"
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/get_nonexistent_stack/cloudformation.DescribeStacks_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/get_nonexistent_stack/cloudformation.DescribeStacks_1.json
new file mode 100644
index 00000000..cf29c6c7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/get_nonexistent_stack/cloudformation.DescribeStacks_1.json
@@ -0,0 +1,22 @@
+{
+ "status_code": 400,
+ "data": {
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 400,
+ "RequestId": "181566c8-b5d0-11e7-9d9b-45815c77100a",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "181566c8-b5d0-11e7-9d9b-45815c77100a",
+ "date": "Fri, 20 Oct 2017 19:51:41 GMT",
+ "content-length": "307",
+ "content-type": "text/xml",
+ "connection": "close"
+ }
+ },
+ "Error": {
+ "Message": "Stack with id ansible-test-nonexist does not exist",
+ "Code": "ValidationError",
+ "Type": "Sender"
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/invalid_template_json/cloudformation.CreateStack_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/invalid_template_json/cloudformation.CreateStack_1.json
new file mode 100644
index 00000000..7ad6cac9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/invalid_template_json/cloudformation.CreateStack_1.json
@@ -0,0 +1,22 @@
+{
+ "status_code": 400,
+ "data": {
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 400,
+ "RequestId": "03b1107f-b5d0-11e7-ae09-550cfe4b2358",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "03b1107f-b5d0-11e7-ae09-550cfe4b2358",
+ "date": "Fri, 20 Oct 2017 19:51:07 GMT",
+ "content-length": "320",
+ "content-type": "text/xml",
+ "connection": "close"
+ }
+ },
+ "Error": {
+ "Message": "Template format error: JSON not well-formed. (line 4, column 4)",
+ "Code": "ValidationError",
+ "Type": "Sender"
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.CreateStack_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.CreateStack_1.json
new file mode 100644
index 00000000..64c8e1f2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.CreateStack_1.json
@@ -0,0 +1,17 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "ResponseMetadata": {
+ "RequestId": "c741ebcd-3a0e-11e9-b25f-d1217e6893bf",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "c741ebcd-3a0e-11e9-b25f-d1217e6893bf",
+ "content-type": "text/xml",
+ "content-length": "407",
+ "date": "Tue, 26 Feb 2019 21:37:55 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_1.json
new file mode 100644
index 00000000..7a6a4964
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_1.json
@@ -0,0 +1,38 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "c74b1310-3a0e-11e9-9a48-067794494828",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ansible-test-on-create-failure-delete",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 37,
+ "second": 55,
+ "microsecond": 909000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "User Initiated"
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "c7b0b337-3a0e-11e9-b25f-d1217e6893bf",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "c7b0b337-3a0e-11e9-b25f-d1217e6893bf",
+ "content-type": "text/xml",
+ "content-length": "1153",
+ "date": "Tue, 26 Feb 2019 21:37:56 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_2.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_2.json
new file mode 100644
index 00000000..6218ed8b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_2.json
@@ -0,0 +1,101 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "ECRRepo-CREATE_FAILED-2019-02-26T21:38:01.107Z",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 1,
+ "microsecond": 107000
+ },
+ "ResourceStatus": "CREATE_FAILED",
+ "ResourceStatusReason": "Invalid parameter at 'PolicyText' failed to satisfy constraint: 'Invalid repository policy provided' (Service: AmazonECR; Status Code: 400; Error Code: InvalidParameterException; Request ID: ca5769ae-3a0e-11e9-a183-3f277586a4cb)",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:38:00.657Z",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 0,
+ "microsecond": 657000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "Resource creation Initiated",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:38:00.221Z",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 0,
+ "microsecond": 221000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "c74b1310-3a0e-11e9-9a48-067794494828",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ansible-test-on-create-failure-delete",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 37,
+ "second": 55,
+ "microsecond": 909000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "User Initiated"
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "caf667e9-3a0e-11e9-b25f-d1217e6893bf",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "caf667e9-3a0e-11e9-b25f-d1217e6893bf",
+ "content-type": "text/xml",
+ "content-length": "4312",
+ "vary": "Accept-Encoding",
+ "date": "Tue, 26 Feb 2019 21:38:01 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_3.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_3.json
new file mode 100644
index 00000000..cde6beb8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_3.json
@@ -0,0 +1,121 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "cafc8250-3a0e-11e9-86c5-02035744c0fa",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ansible-test-on-create-failure-delete",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 2,
+ "microsecond": 76000
+ },
+ "ResourceStatus": "DELETE_IN_PROGRESS",
+ "ResourceStatusReason": "The following resource(s) failed to create: [ECRRepo]. . Delete requested by user."
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "ECRRepo-CREATE_FAILED-2019-02-26T21:38:01.107Z",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 1,
+ "microsecond": 107000
+ },
+ "ResourceStatus": "CREATE_FAILED",
+ "ResourceStatusReason": "Invalid parameter at 'PolicyText' failed to satisfy constraint: 'Invalid repository policy provided' (Service: AmazonECR; Status Code: 400; Error Code: InvalidParameterException; Request ID: ca5769ae-3a0e-11e9-a183-3f277586a4cb)",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:38:00.657Z",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 0,
+ "microsecond": 657000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "Resource creation Initiated",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:38:00.221Z",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 0,
+ "microsecond": 221000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "c74b1310-3a0e-11e9-9a48-067794494828",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ansible-test-on-create-failure-delete",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 37,
+ "second": 55,
+ "microsecond": 909000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "User Initiated"
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "ce498af1-3a0e-11e9-b25f-d1217e6893bf",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "ce498af1-3a0e-11e9-b25f-d1217e6893bf",
+ "content-type": "text/xml",
+ "content-length": "5207",
+ "vary": "Accept-Encoding",
+ "date": "Tue, 26 Feb 2019 21:38:06 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_4.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_4.json
new file mode 100644
index 00000000..4f35d6dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_4.json
@@ -0,0 +1,180 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "d19c8600-3a0e-11e9-a4ba-0a3524ef8042",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ansible-test-on-create-failure-delete",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 13,
+ "microsecond": 177000
+ },
+ "ResourceStatus": "DELETE_COMPLETE"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "ECRRepo-DELETE_COMPLETE-2019-02-26T21:38:12.486Z",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 12,
+ "microsecond": 486000
+ },
+ "ResourceStatus": "DELETE_COMPLETE",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "ECRRepo-DELETE_IN_PROGRESS-2019-02-26T21:38:12.139Z",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 12,
+ "microsecond": 139000
+ },
+ "ResourceStatus": "DELETE_IN_PROGRESS",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "cafc8250-3a0e-11e9-86c5-02035744c0fa",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ansible-test-on-create-failure-delete",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 2,
+ "microsecond": 76000
+ },
+ "ResourceStatus": "DELETE_IN_PROGRESS",
+ "ResourceStatusReason": "The following resource(s) failed to create: [ECRRepo]. . Delete requested by user."
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "ECRRepo-CREATE_FAILED-2019-02-26T21:38:01.107Z",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 1,
+ "microsecond": 107000
+ },
+ "ResourceStatus": "CREATE_FAILED",
+ "ResourceStatusReason": "Invalid parameter at 'PolicyText' failed to satisfy constraint: 'Invalid repository policy provided' (Service: AmazonECR; Status Code: 400; Error Code: InvalidParameterException; Request ID: ca5769ae-3a0e-11e9-a183-3f277586a4cb)",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:38:00.657Z",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 0,
+ "microsecond": 657000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "Resource creation Initiated",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:38:00.221Z",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 0,
+ "microsecond": 221000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "c74b1310-3a0e-11e9-9a48-067794494828",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ansible-test-on-create-failure-delete",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 37,
+ "second": 55,
+ "microsecond": 909000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "User Initiated"
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "d19fbb1b-3a0e-11e9-b25f-d1217e6893bf",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "d19fbb1b-3a0e-11e9-b25f-d1217e6893bf",
+ "content-type": "text/xml",
+ "content-length": "7857",
+ "vary": "Accept-Encoding",
+ "date": "Tue, 26 Feb 2019 21:38:12 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_5.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_5.json
new file mode 100644
index 00000000..68a743f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStackEvents_5.json
@@ -0,0 +1,180 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "d19c8600-3a0e-11e9-a4ba-0a3524ef8042",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ansible-test-on-create-failure-delete",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 13,
+ "microsecond": 177000
+ },
+ "ResourceStatus": "DELETE_COMPLETE"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "ECRRepo-DELETE_COMPLETE-2019-02-26T21:38:12.486Z",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 12,
+ "microsecond": 486000
+ },
+ "ResourceStatus": "DELETE_COMPLETE",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "ECRRepo-DELETE_IN_PROGRESS-2019-02-26T21:38:12.139Z",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 12,
+ "microsecond": 139000
+ },
+ "ResourceStatus": "DELETE_IN_PROGRESS",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "cafc8250-3a0e-11e9-86c5-02035744c0fa",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ansible-test-on-create-failure-delete",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 2,
+ "microsecond": 76000
+ },
+ "ResourceStatus": "DELETE_IN_PROGRESS",
+ "ResourceStatusReason": "The following resource(s) failed to create: [ECRRepo]. . Delete requested by user."
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "ECRRepo-CREATE_FAILED-2019-02-26T21:38:01.107Z",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 1,
+ "microsecond": 107000
+ },
+ "ResourceStatus": "CREATE_FAILED",
+ "ResourceStatusReason": "Invalid parameter at 'PolicyText' failed to satisfy constraint: 'Invalid repository policy provided' (Service: AmazonECR; Status Code: 400; Error Code: InvalidParameterException; Request ID: ca5769ae-3a0e-11e9-a183-3f277586a4cb)",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:38:00.657Z",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-8jlpw72yz5x8",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 0,
+ "microsecond": 657000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "Resource creation Initiated",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:38:00.221Z",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 0,
+ "microsecond": 221000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "EventId": "c74b1310-3a0e-11e9-9a48-067794494828",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "LogicalResourceId": "ansible-test-on-create-failure-delete",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 37,
+ "second": 55,
+ "microsecond": 909000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "User Initiated"
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "d4fbddab-3a0e-11e9-b25f-d1217e6893bf",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "d4fbddab-3a0e-11e9-b25f-d1217e6893bf",
+ "content-type": "text/xml",
+ "content-length": "7857",
+ "vary": "Accept-Encoding",
+ "date": "Tue, 26 Feb 2019 21:38:18 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_1.json
new file mode 100644
index 00000000..cf5f86ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_1.json
@@ -0,0 +1,42 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "CreationTime": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 37,
+ "second": 55,
+ "microsecond": 909000
+ },
+ "RollbackConfiguration": {},
+ "StackStatus": "CREATE_IN_PROGRESS",
+ "StackStatusReason": "User Initiated",
+ "DisableRollback": false,
+ "NotificationARNs": [],
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "DriftInformation": {
+ "StackDriftStatus": "NOT_CHECKED"
+ }
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "c77fb823-3a0e-11e9-b25f-d1217e6893bf",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "c77fb823-3a0e-11e9-b25f-d1217e6893bf",
+ "content-type": "text/xml",
+ "content-length": "1041",
+ "date": "Tue, 26 Feb 2019 21:37:56 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_2.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_2.json
new file mode 100644
index 00000000..71a9f54b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_2.json
@@ -0,0 +1,41 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "CreationTime": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 37,
+ "second": 55,
+ "microsecond": 909000
+ },
+ "RollbackConfiguration": {},
+ "StackStatus": "CREATE_IN_PROGRESS",
+ "DisableRollback": false,
+ "NotificationARNs": [],
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "DriftInformation": {
+ "StackDriftStatus": "NOT_CHECKED"
+ }
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "cad153b2-3a0e-11e9-b25f-d1217e6893bf",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "cad153b2-3a0e-11e9-b25f-d1217e6893bf",
+ "content-type": "text/xml",
+ "content-length": "979",
+ "date": "Tue, 26 Feb 2019 21:38:01 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_3.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_3.json
new file mode 100644
index 00000000..c2028183
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_3.json
@@ -0,0 +1,52 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "CreationTime": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 37,
+ "second": 55,
+ "microsecond": 909000
+ },
+ "DeletionTime": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 2,
+ "microsecond": 76000
+ },
+ "RollbackConfiguration": {},
+ "StackStatus": "DELETE_IN_PROGRESS",
+ "StackStatusReason": "The following resource(s) failed to create: [ECRRepo]. . Delete requested by user.",
+ "DisableRollback": false,
+ "NotificationARNs": [],
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "DriftInformation": {
+ "StackDriftStatus": "NOT_CHECKED"
+ }
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "ce24289a-3a0e-11e9-b25f-d1217e6893bf",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "ce24289a-3a0e-11e9-b25f-d1217e6893bf",
+ "content-type": "text/xml",
+ "content-length": "1171",
+ "date": "Tue, 26 Feb 2019 21:38:06 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_4.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_4.json
new file mode 100644
index 00000000..89f83553
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_4.json
@@ -0,0 +1,51 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "CreationTime": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 37,
+ "second": 55,
+ "microsecond": 909000
+ },
+ "DeletionTime": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 2,
+ "microsecond": 76000
+ },
+ "RollbackConfiguration": {},
+ "StackStatus": "DELETE_IN_PROGRESS",
+ "DisableRollback": false,
+ "NotificationARNs": [],
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "DriftInformation": {
+ "StackDriftStatus": "NOT_CHECKED"
+ }
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "d16c27f2-3a0e-11e9-b25f-d1217e6893bf",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "d16c27f2-3a0e-11e9-b25f-d1217e6893bf",
+ "content-type": "text/xml",
+ "content-length": "1041",
+ "date": "Tue, 26 Feb 2019 21:38:12 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_5.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_5.json
new file mode 100644
index 00000000..739c8293
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_delete/cloudformation.DescribeStacks_5.json
@@ -0,0 +1,50 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-delete/c74a4fc0-3a0e-11e9-9a48-067794494828",
+ "StackName": "ansible-test-on-create-failure-delete",
+ "CreationTime": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 37,
+ "second": 55,
+ "microsecond": 909000
+ },
+ "DeletionTime": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 38,
+ "second": 2,
+ "microsecond": 76000
+ },
+ "RollbackConfiguration": {},
+ "StackStatus": "DELETE_COMPLETE",
+ "DisableRollback": false,
+ "NotificationARNs": [],
+ "Tags": [],
+ "DriftInformation": {
+ "StackDriftStatus": "NOT_CHECKED"
+ }
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "d4c90dd6-3a0e-11e9-b25f-d1217e6893bf",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "d4c90dd6-3a0e-11e9-b25f-d1217e6893bf",
+ "content-type": "text/xml",
+ "content-length": "965",
+ "date": "Tue, 26 Feb 2019 21:38:18 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.CreateStack_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.CreateStack_1.json
new file mode 100644
index 00000000..86f1945f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.CreateStack_1.json
@@ -0,0 +1,17 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950",
+ "ResponseMetadata": {
+ "RequestId": "a396a58a-3a0f-11e9-b7db-3fe3824c73cb",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "a396a58a-3a0f-11e9-b7db-3fe3824c73cb",
+ "content-type": "text/xml",
+ "content-length": "411",
+ "date": "Tue, 26 Feb 2019 21:44:05 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DeleteStack_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DeleteStack_1.json
new file mode 100644
index 00000000..1a3a67c6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DeleteStack_1.json
@@ -0,0 +1,16 @@
+{
+ "status_code": 200,
+ "data": {
+ "ResponseMetadata": {
+ "RequestId": "a78f0832-3a0f-11e9-b7db-3fe3824c73cb",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "a78f0832-3a0f-11e9-b7db-3fe3824c73cb",
+ "content-type": "text/xml",
+ "content-length": "212",
+ "date": "Tue, 26 Feb 2019 21:44:11 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_1.json
new file mode 100644
index 00000000..58d7a89e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_1.json
@@ -0,0 +1,38 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950",
+ "EventId": "a39e6ce0-3a0f-11e9-96ca-02f46dd00950",
+ "StackName": "ansible-test-on-create-failure-do-nothing",
+ "LogicalResourceId": "ansible-test-on-create-failure-do-nothing",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 44,
+ "second": 5,
+ "microsecond": 553000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "User Initiated"
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "a406cc84-3a0f-11e9-b7db-3fe3824c73cb",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "a406cc84-3a0f-11e9-b7db-3fe3824c73cb",
+ "content-type": "text/xml",
+ "content-length": "1169",
+ "date": "Tue, 26 Feb 2019 21:44:06 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_2.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_2.json
new file mode 100644
index 00000000..0a7e32e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStackEvents_2.json
@@ -0,0 +1,121 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950",
+ "EventId": "a6c32c80-3a0f-11e9-ac5e-06deb474fa52",
+ "StackName": "ansible-test-on-create-failure-do-nothing",
+ "LogicalResourceId": "ansible-test-on-create-failure-do-nothing",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 44,
+ "second": 10,
+ "microsecond": 804000
+ },
+ "ResourceStatus": "CREATE_FAILED",
+ "ResourceStatusReason": "The following resource(s) failed to create: [ECRRepo]. "
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950",
+ "EventId": "ECRRepo-CREATE_FAILED-2019-02-26T21:44:09.905Z",
+ "StackName": "ansible-test-on-create-failure-do-nothing",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-a8g0mh5il4t5",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 44,
+ "second": 9,
+ "microsecond": 905000
+ },
+ "ResourceStatus": "CREATE_FAILED",
+ "ResourceStatusReason": "Invalid parameter at 'PolicyText' failed to satisfy constraint: 'Invalid repository policy provided' (Service: AmazonECR; Status Code: 400; Error Code: InvalidParameterException; Request ID: a62a6f71-3a0f-11e9-9164-457e0a3a5e1b)",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950",
+ "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:44:09.497Z",
+ "StackName": "ansible-test-on-create-failure-do-nothing",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-a8g0mh5il4t5",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 44,
+ "second": 9,
+ "microsecond": 497000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "Resource creation Initiated",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950",
+ "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:44:09.076Z",
+ "StackName": "ansible-test-on-create-failure-do-nothing",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 44,
+ "second": 9,
+ "microsecond": 76000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950",
+ "EventId": "a39e6ce0-3a0f-11e9-96ca-02f46dd00950",
+ "StackName": "ansible-test-on-create-failure-do-nothing",
+ "LogicalResourceId": "ansible-test-on-create-failure-do-nothing",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 44,
+ "second": 5,
+ "microsecond": 553000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "User Initiated"
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "a75fbad0-3a0f-11e9-b7db-3fe3824c73cb",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "a75fbad0-3a0f-11e9-b7db-3fe3824c73cb",
+ "content-type": "text/xml",
+ "content-length": "5231",
+ "vary": "Accept-Encoding",
+ "date": "Tue, 26 Feb 2019 21:44:11 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_1.json
new file mode 100644
index 00000000..53214331
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_1.json
@@ -0,0 +1,42 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950",
+ "StackName": "ansible-test-on-create-failure-do-nothing",
+ "CreationTime": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 44,
+ "second": 5,
+ "microsecond": 553000
+ },
+ "RollbackConfiguration": {},
+ "StackStatus": "CREATE_IN_PROGRESS",
+ "StackStatusReason": "User Initiated",
+ "DisableRollback": true,
+ "NotificationARNs": [],
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "DriftInformation": {
+ "StackDriftStatus": "NOT_CHECKED"
+ }
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "a3d44acf-3a0f-11e9-b7db-3fe3824c73cb",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "a3d44acf-3a0f-11e9-b7db-3fe3824c73cb",
+ "content-type": "text/xml",
+ "content-length": "1048",
+ "date": "Tue, 26 Feb 2019 21:44:05 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_2.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_2.json
new file mode 100644
index 00000000..df17f5a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_do_nothing/cloudformation.DescribeStacks_2.json
@@ -0,0 +1,42 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-do-nothing/a39dd0a0-3a0f-11e9-96ca-02f46dd00950",
+ "StackName": "ansible-test-on-create-failure-do-nothing",
+ "CreationTime": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 44,
+ "second": 5,
+ "microsecond": 553000
+ },
+ "RollbackConfiguration": {},
+ "StackStatus": "CREATE_FAILED",
+ "StackStatusReason": "The following resource(s) failed to create: [ECRRepo]. ",
+ "DisableRollback": true,
+ "NotificationARNs": [],
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "DriftInformation": {
+ "StackDriftStatus": "NOT_CHECKED"
+ }
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "a7301f4a-3a0f-11e9-b7db-3fe3824c73cb",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "a7301f4a-3a0f-11e9-b7db-3fe3824c73cb",
+ "content-type": "text/xml",
+ "content-length": "1084",
+ "date": "Tue, 26 Feb 2019 21:44:11 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.CreateStack_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.CreateStack_1.json
new file mode 100644
index 00000000..f71422b9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.CreateStack_1.json
@@ -0,0 +1,17 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "ResponseMetadata": {
+ "RequestId": "9139de54-3a0f-11e9-b938-97983b40cabe",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "9139de54-3a0f-11e9-b938-97983b40cabe",
+ "content-type": "text/xml",
+ "content-length": "409",
+ "date": "Tue, 26 Feb 2019 21:43:34 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DeleteStack_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DeleteStack_1.json
new file mode 100644
index 00000000..111dc90d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DeleteStack_1.json
@@ -0,0 +1,16 @@
+{
+ "status_code": 200,
+ "data": {
+ "ResponseMetadata": {
+ "RequestId": "988b3097-3a0f-11e9-b938-97983b40cabe",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "988b3097-3a0f-11e9-b938-97983b40cabe",
+ "content-type": "text/xml",
+ "content-length": "212",
+ "date": "Tue, 26 Feb 2019 21:43:46 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_1.json
new file mode 100644
index 00000000..2bcac7f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_1.json
@@ -0,0 +1,38 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "EventId": "9140bc10-3a0f-11e9-94bf-0a9edf17d014",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "LogicalResourceId": "ansible-test-on-create-failure-rollback",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 34,
+ "microsecond": 740000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "User Initiated"
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "9199b1a7-3a0f-11e9-b938-97983b40cabe",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "9199b1a7-3a0f-11e9-b938-97983b40cabe",
+ "content-type": "text/xml",
+ "content-length": "1161",
+ "date": "Tue, 26 Feb 2019 21:43:35 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_2.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_2.json
new file mode 100644
index 00000000..3992fd39
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_2.json
@@ -0,0 +1,121 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "EventId": "945b90a0-3a0f-11e9-adaf-0211d8bec7e2",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "LogicalResourceId": "ansible-test-on-create-failure-rollback",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 39,
+ "microsecond": 920000
+ },
+ "ResourceStatus": "ROLLBACK_IN_PROGRESS",
+ "ResourceStatusReason": "The following resource(s) failed to create: [ECRRepo]. . Rollback requested by user."
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "EventId": "ECRRepo-CREATE_FAILED-2019-02-26T21:43:39.210Z",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-1lsnxu2zpb20l",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 39,
+ "microsecond": 210000
+ },
+ "ResourceStatus": "CREATE_FAILED",
+ "ResourceStatusReason": "Invalid parameter at 'PolicyText' failed to satisfy constraint: 'Invalid repository policy provided' (Service: AmazonECR; Status Code: 400; Error Code: InvalidParameterException; Request ID: 93e0bb60-3a0f-11e9-a53c-7162bb423e4d)",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:43:38.793Z",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-1lsnxu2zpb20l",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 38,
+ "microsecond": 793000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "Resource creation Initiated",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:43:38.266Z",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 38,
+ "microsecond": 266000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "EventId": "9140bc10-3a0f-11e9-94bf-0a9edf17d014",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "LogicalResourceId": "ansible-test-on-create-failure-rollback",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 34,
+ "microsecond": 740000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "User Initiated"
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "94e16307-3a0f-11e9-b938-97983b40cabe",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "94e16307-3a0f-11e9-b938-97983b40cabe",
+ "content-type": "text/xml",
+ "content-length": "5241",
+ "vary": "Accept-Encoding",
+ "date": "Tue, 26 Feb 2019 21:43:40 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_3.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_3.json
new file mode 100644
index 00000000..e272c734
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStackEvents_3.json
@@ -0,0 +1,180 @@
+{
+ "status_code": 200,
+ "data": {
+ "StackEvents": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "EventId": "9743bc70-3a0f-11e9-b335-0ade61d04ee6",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "LogicalResourceId": "ansible-test-on-create-failure-rollback",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 44,
+ "microsecond": 797000
+ },
+ "ResourceStatus": "ROLLBACK_COMPLETE"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "EventId": "ECRRepo-DELETE_COMPLETE-2019-02-26T21:43:43.908Z",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-1lsnxu2zpb20l",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 43,
+ "microsecond": 908000
+ },
+ "ResourceStatus": "DELETE_COMPLETE",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "EventId": "ECRRepo-DELETE_IN_PROGRESS-2019-02-26T21:43:43.478Z",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-1lsnxu2zpb20l",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 43,
+ "microsecond": 478000
+ },
+ "ResourceStatus": "DELETE_IN_PROGRESS",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "EventId": "945b90a0-3a0f-11e9-adaf-0211d8bec7e2",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "LogicalResourceId": "ansible-test-on-create-failure-rollback",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 39,
+ "microsecond": 920000
+ },
+ "ResourceStatus": "ROLLBACK_IN_PROGRESS",
+ "ResourceStatusReason": "The following resource(s) failed to create: [ECRRepo]. . Rollback requested by user."
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "EventId": "ECRRepo-CREATE_FAILED-2019-02-26T21:43:39.210Z",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-1lsnxu2zpb20l",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 39,
+ "microsecond": 210000
+ },
+ "ResourceStatus": "CREATE_FAILED",
+ "ResourceStatusReason": "Invalid parameter at 'PolicyText' failed to satisfy constraint: 'Invalid repository policy provided' (Service: AmazonECR; Status Code: 400; Error Code: InvalidParameterException; Request ID: 93e0bb60-3a0f-11e9-a53c-7162bb423e4d)",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:43:38.793Z",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "ansib-ecrre-1lsnxu2zpb20l",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 38,
+ "microsecond": 793000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "Resource creation Initiated",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "EventId": "ECRRepo-CREATE_IN_PROGRESS-2019-02-26T21:43:38.266Z",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "LogicalResourceId": "ECRRepo",
+ "PhysicalResourceId": "",
+ "ResourceType": "AWS::ECR::Repository",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 38,
+ "microsecond": 266000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceProperties": "{\"RepositoryPolicyText\":{\"Version\":\"3000-10-17\",\"Statement\":[{\"Action\":[\"ecr:*\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"}}]}}"
+ },
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "EventId": "9140bc10-3a0f-11e9-94bf-0a9edf17d014",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "LogicalResourceId": "ansible-test-on-create-failure-rollback",
+ "PhysicalResourceId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "ResourceType": "AWS::CloudFormation::Stack",
+ "Timestamp": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 34,
+ "microsecond": 740000
+ },
+ "ResourceStatus": "CREATE_IN_PROGRESS",
+ "ResourceStatusReason": "User Initiated"
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "982d0bff-3a0f-11e9-b938-97983b40cabe",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "982d0bff-3a0f-11e9-b938-97983b40cabe",
+ "content-type": "text/xml",
+ "content-length": "7911",
+ "vary": "Accept-Encoding",
+ "date": "Tue, 26 Feb 2019 21:43:45 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_1.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_1.json
new file mode 100644
index 00000000..25facea1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_1.json
@@ -0,0 +1,42 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "CreationTime": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 34,
+ "microsecond": 740000
+ },
+ "RollbackConfiguration": {},
+ "StackStatus": "CREATE_IN_PROGRESS",
+ "StackStatusReason": "User Initiated",
+ "DisableRollback": false,
+ "NotificationARNs": [],
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "DriftInformation": {
+ "StackDriftStatus": "NOT_CHECKED"
+ }
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "91725383-3a0f-11e9-b938-97983b40cabe",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "91725383-3a0f-11e9-b938-97983b40cabe",
+ "content-type": "text/xml",
+ "content-length": "1045",
+ "date": "Tue, 26 Feb 2019 21:43:35 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_2.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_2.json
new file mode 100644
index 00000000..55a80d8a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_2.json
@@ -0,0 +1,52 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "CreationTime": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 34,
+ "microsecond": 740000
+ },
+ "DeletionTime": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 39,
+ "microsecond": 920000
+ },
+ "RollbackConfiguration": {},
+ "StackStatus": "ROLLBACK_IN_PROGRESS",
+ "StackStatusReason": "The following resource(s) failed to create: [ECRRepo]. . Rollback requested by user.",
+ "DisableRollback": false,
+ "NotificationARNs": [],
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "DriftInformation": {
+ "StackDriftStatus": "NOT_CHECKED"
+ }
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "94bb1651-3a0f-11e9-b938-97983b40cabe",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "94bb1651-3a0f-11e9-b938-97983b40cabe",
+ "content-type": "text/xml",
+ "content-length": "1179",
+ "date": "Tue, 26 Feb 2019 21:43:40 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_3.json b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_3.json
new file mode 100644
index 00000000..7c00a836
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/placebo_recordings/cloudformation/on_create_failure_rollback/cloudformation.DescribeStacks_3.json
@@ -0,0 +1,51 @@
+{
+ "status_code": 200,
+ "data": {
+ "Stacks": [
+ {
+ "StackId": "arn:aws:cloudformation:us-west-2:123456789012:stack/ansible-test-on-create-failure-rollback/914046e0-3a0f-11e9-94bf-0a9edf17d014",
+ "StackName": "ansible-test-on-create-failure-rollback",
+ "CreationTime": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 34,
+ "microsecond": 740000
+ },
+ "DeletionTime": {
+ "__class__": "datetime",
+ "year": 2019,
+ "month": 2,
+ "day": 26,
+ "hour": 21,
+ "minute": 43,
+ "second": 39,
+ "microsecond": 920000
+ },
+ "RollbackConfiguration": {},
+ "StackStatus": "ROLLBACK_COMPLETE",
+ "DisableRollback": false,
+ "NotificationARNs": [],
+ "Tags": [],
+ "EnableTerminationProtection": false,
+ "DriftInformation": {
+ "StackDriftStatus": "NOT_CHECKED"
+ }
+ }
+ ],
+ "ResponseMetadata": {
+ "RequestId": "98016814-3a0f-11e9-b938-97983b40cabe",
+ "HTTPStatusCode": 200,
+ "HTTPHeaders": {
+ "x-amzn-requestid": "98016814-3a0f-11e9-b938-97983b40cabe",
+ "content-type": "text/xml",
+ "content-length": "1044",
+ "date": "Tue, 26 Feb 2019 21:43:45 GMT"
+ },
+ "RetryAttempts": 0
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_aws_s3.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_aws_s3.py
new file mode 100644
index 00000000..7d34a84f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_aws_s3.py
@@ -0,0 +1,38 @@
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+import unittest
+
+try:
+ import ansible_collections.amazon.aws.plugins.modules.aws_s3 as s3
+except ImportError:
+ pytestmark = pytest.mark.skip("This test requires the s3 Python libraries")
+
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+boto3 = pytest.importorskip("boto3")
+
+
+class TestUrlparse(unittest.TestCase):
+
+ def test_urlparse(self):
+ actual = urlparse("http://test.com/here")
+ self.assertEqual("http", actual.scheme)
+ self.assertEqual("test.com", actual.netloc)
+ self.assertEqual("/here", actual.path)
+
+ def test_is_fakes3(self):
+ actual = s3.is_fakes3("fakes3://bla.blubb")
+ self.assertEqual(True, actual)
+
+ def test_get_s3_connection(self):
+ aws_connect_kwargs = dict(aws_access_key_id="access_key",
+ aws_secret_access_key="secret_key")
+ location = None
+ rgw = True
+ s3_url = "http://bla.blubb"
+ actual = s3.get_s3_connection(None, aws_connect_kwargs, location, rgw, s3_url)
+ self.assertEqual(bool("bla.blubb" in str(actual._endpoint)), True)
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_cloudformation.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_cloudformation.py
new file mode 100644
index 00000000..6ee1fcf9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_cloudformation.py
@@ -0,0 +1,213 @@
+# (c) 2017 Red Hat Inc.
+#
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+# Magic...
+from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep, placeboify # pylint: disable=unused-import
+
+from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception
+from ansible_collections.amazon.aws.plugins.modules import cloudformation as cfn_module
+
+basic_yaml_tpl = """
+---
+AWSTemplateFormatVersion: '2010-09-09'
+Description: 'Basic template that creates an S3 bucket'
+Resources:
+ MyBucket:
+ Type: "AWS::S3::Bucket"
+Outputs:
+ TheName:
+ Value:
+ !Ref MyBucket
+"""
+
+bad_json_tpl = """{
+ "AWSTemplateFormatVersion": "2010-09-09",
+ "Description": "Broken template, no comma here ->"
+ "Resources": {
+ "MyBucket": {
+ "Type": "AWS::S3::Bucket"
+ }
+ }
+}"""
+
+failing_yaml_tpl = """
+---
+AWSTemplateFormatVersion: 2010-09-09
+Resources:
+ ECRRepo:
+ Type: AWS::ECR::Repository
+ Properties:
+ RepositoryPolicyText:
+ Version: 3000-10-17 # <--- invalid version
+ Statement:
+ - Effect: Allow
+ Action:
+ - 'ecr:*'
+ Principal:
+ AWS: !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:root
+"""
+
+default_events_limit = 10
+
+
+class FakeModule(object):
+ def __init__(self, **kwargs):
+ self.params = kwargs
+
+ def fail_json(self, *args, **kwargs):
+ self.exit_args = args
+ self.exit_kwargs = kwargs
+ raise Exception('FAIL')
+
+ def fail_json_aws(self, *args, **kwargs):
+ self.exit_args = args
+ self.exit_kwargs = kwargs
+ raise Exception('FAIL')
+
+ def exit_json(self, *args, **kwargs):
+ self.exit_args = args
+ self.exit_kwargs = kwargs
+ raise Exception('EXIT')
+
+
+def test_invalid_template_json(placeboify):
+ connection = placeboify.client('cloudformation')
+ params = {
+ 'StackName': 'ansible-test-wrong-json',
+ 'TemplateBody': bad_json_tpl,
+ }
+ m = FakeModule(disable_rollback=False)
+ with pytest.raises(Exception) as exc_info:
+ cfn_module.create_stack(m, params, connection, default_events_limit)
+ pytest.fail('Expected malformed JSON to have caused the call to fail')
+
+ assert exc_info.match('FAIL')
+ assert "ValidationError" in boto_exception(m.exit_args[0])
+
+
+def test_client_request_token_s3_stack(maybe_sleep, placeboify):
+ connection = placeboify.client('cloudformation')
+ params = {
+ 'StackName': 'ansible-test-client-request-token-yaml',
+ 'TemplateBody': basic_yaml_tpl,
+ 'ClientRequestToken': '3faf3fb5-b289-41fc-b940-44151828f6cf',
+ }
+ m = FakeModule(disable_rollback=False)
+ result = cfn_module.create_stack(m, params, connection, default_events_limit)
+ assert result['changed']
+ assert len(result['events']) > 1
+ # require that the final recorded stack state was CREATE_COMPLETE
+ # events are retrieved newest-first, so 0 is the latest
+ assert 'CREATE_COMPLETE' in result['events'][0]
+ connection.delete_stack(StackName='ansible-test-client-request-token-yaml')
+
+
+def test_basic_s3_stack(maybe_sleep, placeboify):
+ connection = placeboify.client('cloudformation')
+ params = {
+ 'StackName': 'ansible-test-basic-yaml',
+ 'TemplateBody': basic_yaml_tpl
+ }
+ m = FakeModule(disable_rollback=False)
+ result = cfn_module.create_stack(m, params, connection, default_events_limit)
+ assert result['changed']
+ assert len(result['events']) > 1
+ # require that the final recorded stack state was CREATE_COMPLETE
+ # events are retrieved newest-first, so 0 is the latest
+ assert 'CREATE_COMPLETE' in result['events'][0]
+ connection.delete_stack(StackName='ansible-test-basic-yaml')
+
+
+def test_delete_nonexistent_stack(maybe_sleep, placeboify):
+ connection = placeboify.client('cloudformation')
+ result = cfn_module.stack_operation(connection, 'ansible-test-nonexist', 'DELETE', default_events_limit)
+ assert result['changed']
+ assert 'Stack does not exist.' in result['log']
+
+
+def test_get_nonexistent_stack(placeboify):
+ connection = placeboify.client('cloudformation')
+ assert cfn_module.get_stack_facts(connection, 'ansible-test-nonexist') is None
+
+
+def test_missing_template_body():
+ m = FakeModule()
+ with pytest.raises(Exception) as exc_info:
+ cfn_module.create_stack(
+ module=m,
+ stack_params={},
+ cfn=None,
+ events_limit=default_events_limit
+ )
+ pytest.fail('Expected module to have failed with no template')
+
+ assert exc_info.match('FAIL')
+ assert not m.exit_args
+ assert "Either 'template', 'template_body' or 'template_url' is required when the stack does not exist." == m.exit_kwargs['msg']
+
+
+def test_on_create_failure_delete(maybe_sleep, placeboify):
+ m = FakeModule(
+ on_create_failure='DELETE',
+ disable_rollback=False,
+ )
+ connection = placeboify.client('cloudformation')
+ params = {
+ 'StackName': 'ansible-test-on-create-failure-delete',
+ 'TemplateBody': failing_yaml_tpl
+ }
+ result = cfn_module.create_stack(m, params, connection, default_events_limit)
+ assert result['changed']
+ assert result['failed']
+ assert len(result['events']) > 1
+ # require that the final recorded stack state was DELETE_COMPLETE
+ # events are retrieved newest-first, so 0 is the latest
+ assert 'DELETE_COMPLETE' in result['events'][0]
+
+
+def test_on_create_failure_rollback(maybe_sleep, placeboify):
+ m = FakeModule(
+ on_create_failure='ROLLBACK',
+ disable_rollback=False,
+ )
+ connection = placeboify.client('cloudformation')
+ params = {
+ 'StackName': 'ansible-test-on-create-failure-rollback',
+ 'TemplateBody': failing_yaml_tpl
+ }
+ result = cfn_module.create_stack(m, params, connection, default_events_limit)
+ assert result['changed']
+ assert result['failed']
+ assert len(result['events']) > 1
+ # require that the final recorded stack state was ROLLBACK_COMPLETE
+ # events are retrieved newest-first, so 0 is the latest
+ assert 'ROLLBACK_COMPLETE' in result['events'][0]
+ connection.delete_stack(StackName=params['StackName'])
+
+
+def test_on_create_failure_do_nothing(maybe_sleep, placeboify):
+ m = FakeModule(
+ on_create_failure='DO_NOTHING',
+ disable_rollback=False,
+ )
+ connection = placeboify.client('cloudformation')
+ params = {
+ 'StackName': 'ansible-test-on-create-failure-do-nothing',
+ 'TemplateBody': failing_yaml_tpl
+ }
+ result = cfn_module.create_stack(m, params, connection, default_events_limit)
+ assert result['changed']
+ assert result['failed']
+ assert len(result['events']) > 1
+ # require that the final recorded stack state was CREATE_FAILED
+ # events are retrieved newest-first, so 0 is the latest
+ assert 'CREATE_FAILED' in result['events'][0]
+ connection.delete_stack(StackName=params['StackName'])
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_group.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_group.py
new file mode 100644
index 00000000..9b3a14ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_group.py
@@ -0,0 +1,83 @@
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.amazon.aws.plugins.modules import ec2_group as group_module
+
+
+def test_from_permission():
+ internal_http = {
+ u'FromPort': 80,
+ u'IpProtocol': 'tcp',
+ u'IpRanges': [
+ {
+ u'CidrIp': '10.0.0.0/8',
+ u'Description': 'Foo Bar Baz'
+ },
+ ],
+ u'Ipv6Ranges': [
+ {u'CidrIpv6': 'fe80::94cc:8aff:fef6:9cc/64'},
+ ],
+ u'PrefixListIds': [],
+ u'ToPort': 80,
+ u'UserIdGroupPairs': [],
+ }
+ perms = list(group_module.rule_from_group_permission(internal_http))
+ assert len(perms) == 2
+ assert perms[0].target == '10.0.0.0/8'
+ assert perms[0].target_type == 'ipv4'
+ assert perms[0].description == 'Foo Bar Baz'
+ assert perms[1].target == 'fe80::94cc:8aff:fef6:9cc/64'
+
+ global_egress = {
+ 'IpProtocol': '-1',
+ 'IpRanges': [{'CidrIp': '0.0.0.0/0'}],
+ 'Ipv6Ranges': [],
+ 'PrefixListIds': [],
+ 'UserIdGroupPairs': []
+ }
+ perms = list(group_module.rule_from_group_permission(global_egress))
+ assert len(perms) == 1
+ assert perms[0].target == '0.0.0.0/0'
+ assert perms[0].port_range == (None, None)
+
+ internal_prefix_http = {
+ u'FromPort': 80,
+ u'IpProtocol': 'tcp',
+ u'PrefixListIds': [
+ {'PrefixListId': 'p-1234'}
+ ],
+ u'ToPort': 80,
+ u'UserIdGroupPairs': [],
+ }
+ perms = list(group_module.rule_from_group_permission(internal_prefix_http))
+ assert len(perms) == 1
+ assert perms[0].target == 'p-1234'
+
+
+def test_rule_to_permission():
+ tests = [
+ group_module.Rule((22, 22), 'udp', 'sg-1234567890', 'group', None),
+ group_module.Rule((1, 65535), 'tcp', '0.0.0.0/0', 'ipv4', "All TCP from everywhere"),
+ group_module.Rule((443, 443), 'tcp', 'ip-123456', 'ip_prefix', "Traffic to privatelink IPs"),
+ group_module.Rule((443, 443), 'tcp', 'feed:dead:::beef/64', 'ipv6', None),
+ ]
+ for test in tests:
+ perm = group_module.to_permission(test)
+ assert perm['FromPort'], perm['ToPort'] == test.port_range
+ assert perm['IpProtocol'] == test.protocol
+
+
+def test_validate_ip():
+ class Warner(object):
+ def warn(self, msg):
+ return
+ ips = [
+ ('10.1.1.1/24', '10.1.1.0/24'),
+ ('192.168.56.101/16', '192.168.0.0/16'),
+ # Don't modify IPv6 CIDRs, AWS supports /128 and device ranges
+ ('fc00:8fe0:fe80:b897:8990:8a7c:99bf:323d/128', 'fc00:8fe0:fe80:b897:8990:8a7c:99bf:323d/128'),
+ ]
+
+ for ip, net in ips:
+ assert group_module.validate_ip(Warner(), ip) == net
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/utils.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/utils.py
new file mode 100644
index 00000000..058a5b60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/plugins/modules/utils.py
@@ -0,0 +1,50 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible_collections.amazon.aws.tests.unit.compat import unittest
+from ansible_collections.amazon.aws.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+
+
+def set_module_args(args):
+ if '_ansible_remote_tmp' not in args:
+ args['_ansible_remote_tmp'] = '/tmp'
+ if '_ansible_keep_remote_files' not in args:
+ args['_ansible_keep_remote_files'] = False
+
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class AnsibleExitJson(Exception):
+ pass
+
+
+class AnsibleFailJson(Exception):
+ pass
+
+
+def exit_json(*args, **kwargs):
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class ModuleTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
+ self.mock_module.start()
+ self.mock_sleep = patch('time.sleep')
+ self.mock_sleep.start()
+ set_module_args({})
+ self.addCleanup(self.mock_module.stop)
+ self.addCleanup(self.mock_sleep.stop)
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/requirements.txt b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/requirements.txt
new file mode 100644
index 00000000..917ee278
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/requirements.txt
@@ -0,0 +1,2 @@
+boto3
+placebo
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/utils/__init__.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/utils/__init__.py
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/utils/amazon_placebo_fixtures.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/utils/amazon_placebo_fixtures.py
new file mode 100644
index 00000000..6912c2e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/unit/utils/amazon_placebo_fixtures.py
@@ -0,0 +1,213 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import errno
+import os
+import time
+import mock
+import pytest
+
+boto3 = pytest.importorskip("boto3")
+botocore = pytest.importorskip("botocore")
+placebo = pytest.importorskip("placebo")
+
+"""
+Using Placebo to test modules using boto3:
+
+This is an example test, using the placeboify fixture to test that a module
+will fail if resources it depends on don't exist.
+
+> from placebo_fixtures import placeboify, scratch_vpc
+>
+> def test_create_with_nonexistent_launch_config(placeboify):
+> connection = placeboify.client('autoscaling')
+> module = FakeModule('test-asg-created', None, min_size=0, max_size=0, desired_capacity=0)
+> with pytest.raises(FailJSON) as excinfo:
+> asg_module.create_autoscaling_group(connection, module)
+> .... asserts based on module state/exceptions ....
+
+In more advanced cases, use unrecorded resource fixtures to fill in ARNs/IDs of
+things modules depend on, such as:
+
+> def test_create_in_vpc(placeboify, scratch_vpc):
+> connection = placeboify.client('autoscaling')
+> module = FakeModule(name='test-asg-created',
+> min_size=0, max_size=0, desired_capacity=0,
+> availability_zones=[s['az'] for s in scratch_vpc['subnets']],
+> vpc_zone_identifier=[s['id'] for s in scratch_vpc['subnets']],
+> )
+> ..... so on and so forth ....
+"""
+
+
+@pytest.fixture
+def placeboify(request, monkeypatch):
+ """This fixture puts a recording/replaying harness around `boto3_conn`
+
+ Placeboify patches the `boto3_conn` function in ec2 module_utils to return
+ a boto3 session that in recording or replaying mode, depending on the
+ PLACEBO_RECORD environment variable. Unset PLACEBO_RECORD (the common case
+ for just running tests) will put placebo in replay mode, set PLACEBO_RECORD
+ to any value to turn off replay & operate on real AWS resources.
+
+ The recorded sessions are stored in the test file's directory, under the
+ namespace `placebo_recordings/{testfile name}/{test function name}` to
+ distinguish them.
+ """
+ session = boto3.Session(region_name='us-west-2')
+
+ recordings_path = os.path.join(
+ request.fspath.dirname,
+ 'placebo_recordings',
+ request.fspath.basename.replace('.py', ''),
+ request.function.__name__
+ # remove the test_ prefix from the function & file name
+ ).replace('test_', '')
+
+ if not os.getenv('PLACEBO_RECORD'):
+ if not os.path.isdir(recordings_path):
+ raise NotImplementedError('Missing Placebo recordings in directory: %s' % recordings_path)
+ else:
+ try:
+ # make sure the directory for placebo test recordings is available
+ os.makedirs(recordings_path)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ pill = placebo.attach(session, data_path=recordings_path)
+ if os.getenv('PLACEBO_RECORD'):
+ pill.record()
+ else:
+ pill.playback()
+
+ def boto3_middleman_connection(module, conn_type, resource, region='us-west-2', **kwargs):
+ if conn_type != 'client':
+ # TODO support resource-based connections
+ raise ValueError('Mocker only supports client, not %s' % conn_type)
+ return session.client(resource, region_name=region)
+
+ import ansible_collections.amazon.aws.plugins.module_utils.ec2
+ monkeypatch.setattr(
+ ansible_collections.amazon.aws.plugins.module_utils.ec2,
+ 'boto3_conn',
+ boto3_middleman_connection,
+ )
+ yield session
+
+ # tear down
+ pill.stop()
+
+
+@pytest.fixture(scope='module')
+def basic_launch_config():
+ """Create an EC2 launch config whose creation *is not* recorded and return its name
+
+ This fixture is module-scoped, since launch configs are immutable and this
+ can be reused for many tests.
+ """
+ if not os.getenv('PLACEBO_RECORD'):
+ yield 'pytest_basic_lc'
+ return
+
+ # use a *non recording* session to make the launch config
+ # since that's a prereq of the ec2_asg module, and isn't what
+ # we're testing.
+ asg = boto3.client('autoscaling')
+ asg.create_launch_configuration(
+ LaunchConfigurationName='pytest_basic_lc',
+ ImageId='ami-9be6f38c', # Amazon Linux 2016.09 us-east-1 AMI, can be any valid AMI
+ SecurityGroups=[],
+ UserData='#!/bin/bash\necho hello world',
+ InstanceType='t2.micro',
+ InstanceMonitoring={'Enabled': False},
+ AssociatePublicIpAddress=True
+ )
+
+ yield 'pytest_basic_lc'
+
+ try:
+ asg.delete_launch_configuration(LaunchConfigurationName='pytest_basic_lc')
+ except botocore.exceptions.ClientError as e:
+ if 'not found' in e.message:
+ return
+ raise
+
+
+@pytest.fixture(scope='module')
+def scratch_vpc():
+ if not os.getenv('PLACEBO_RECORD'):
+ yield {
+ 'vpc_id': 'vpc-123456',
+ 'cidr_range': '10.0.0.0/16',
+ 'subnets': [
+ {
+ 'id': 'subnet-123456',
+ 'az': 'us-east-1d',
+ },
+ {
+ 'id': 'subnet-654321',
+ 'az': 'us-east-1e',
+ },
+ ]
+ }
+ return
+
+ # use a *non recording* session to make the base VPC and subnets
+ ec2 = boto3.client('ec2')
+ vpc_resp = ec2.create_vpc(
+ CidrBlock='10.0.0.0/16',
+ AmazonProvidedIpv6CidrBlock=False,
+ )
+ subnets = (
+ ec2.create_subnet(
+ VpcId=vpc_resp['Vpc']['VpcId'],
+ CidrBlock='10.0.0.0/24',
+ ),
+ ec2.create_subnet(
+ VpcId=vpc_resp['Vpc']['VpcId'],
+ CidrBlock='10.0.1.0/24',
+ )
+ )
+ time.sleep(3)
+
+ yield {
+ 'vpc_id': vpc_resp['Vpc']['VpcId'],
+ 'cidr_range': '10.0.0.0/16',
+ 'subnets': [
+ {
+ 'id': s['Subnet']['SubnetId'],
+ 'az': s['Subnet']['AvailabilityZone'],
+ } for s in subnets
+ ]
+ }
+
+ try:
+ for s in subnets:
+ try:
+ ec2.delete_subnet(SubnetId=s['Subnet']['SubnetId'])
+ except botocore.exceptions.ClientError as e:
+ if 'not found' in e.message:
+ continue
+ raise
+ ec2.delete_vpc(VpcId=vpc_resp['Vpc']['VpcId'])
+ except botocore.exceptions.ClientError as e:
+ if 'not found' in e.message:
+ return
+ raise
+
+
+@pytest.fixture(scope='module')
+def maybe_sleep():
+ """If placebo is reading saved sessions, make sleep always take 0 seconds.
+
+ AWS modules often perform polling or retries, but when using recorded
+ sessions there's no reason to wait. We can still exercise retry and other
+ code paths without waiting for wall-clock time to pass."""
+ if not os.getenv('PLACEBO_RECORD'):
+ p = mock.patch('time.sleep', return_value=None)
+ p.start()
+ yield
+ p.stop()
+ else:
+ yield
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/aws.sh b/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/aws.sh
new file mode 100755
index 00000000..d76c3228
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/aws.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+cloud="${args[0]}"
+python="${args[1]}"
+group="${args[2]}"
+
+target="shippable/${cloud}/group${group}/"
+
+stage="${S:-prod}"
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote-terminate always --remote-stage "${stage}" \
+ --docker --python "${python}"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/check_matrix.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/check_matrix.py
new file mode 100755
index 00000000..2ec5cc36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/check_matrix.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+"""Verify the currently executing Shippable test matrix matches the one defined in the "shippable.yml" file."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import json
+import os
+import re
+import sys
+import time
+
+try:
+ from typing import NoReturn
+except ImportError:
+ NoReturn = None
+
+try:
+ # noinspection PyCompatibility
+ from urllib2 import urlopen # pylint: disable=ansible-bad-import-from
+except ImportError:
+ # noinspection PyCompatibility
+ from urllib.request import urlopen
+
+
+def main(): # type: () -> None
+ """Main entry point."""
+ repo_full_name = os.environ['REPO_FULL_NAME']
+ required_repo_full_name = 'ansible-collections/amazon.aws'
+
+ if repo_full_name != required_repo_full_name:
+ sys.stderr.write('Skipping matrix check on repo "%s" which is not "%s".\n' % (repo_full_name, required_repo_full_name))
+ return
+
+ with open('shippable.yml', 'rb') as yaml_file:
+ yaml = yaml_file.read().decode('utf-8').splitlines()
+
+ defined_matrix = [match.group(1) for match in [re.search(r'^ *- env: T=(.*)$', line) for line in yaml] if match and match.group(1) != 'none']
+
+ if not defined_matrix:
+ fail('No matrix entries found in the "shippable.yml" file.',
+ 'Did you modify the "shippable.yml" file?')
+
+ run_id = os.environ['SHIPPABLE_BUILD_ID']
+ sleep = 1
+ jobs = []
+
+ for attempts_remaining in range(4, -1, -1):
+ try:
+ jobs = json.loads(urlopen('https://api.shippable.com/jobs?runIds=%s' % run_id).read())
+
+ if not isinstance(jobs, list):
+ raise Exception('Shippable run %s data is not a list.' % run_id)
+
+ break
+ except Exception as ex:
+ if not attempts_remaining:
+ fail('Unable to retrieve Shippable run %s matrix.' % run_id,
+ str(ex))
+
+ sys.stderr.write('Unable to retrieve Shippable run %s matrix: %s\n' % (run_id, ex))
+ sys.stderr.write('Trying again in %d seconds...\n' % sleep)
+ time.sleep(sleep)
+ sleep *= 2
+
+ if len(jobs) != len(defined_matrix):
+ if len(jobs) == 1:
+ hint = '\n\nMake sure you do not use the "Rebuild with SSH" option.'
+ else:
+ hint = ''
+
+ fail('Shippable run %s has %d jobs instead of the expected %d jobs.' % (run_id, len(jobs), len(defined_matrix)),
+ 'Try re-running the entire matrix.%s' % hint)
+
+ actual_matrix = dict((job.get('jobNumber'), dict(tuple(line.split('=', 1)) for line in job.get('env', [])).get('T', '')) for job in jobs)
+ errors = [(job_number, test, actual_matrix.get(job_number)) for job_number, test in enumerate(defined_matrix, 1) if actual_matrix.get(job_number) != test]
+
+ if len(errors):
+ error_summary = '\n'.join('Job %s expected "%s" but found "%s" instead.' % (job_number, expected, actual) for job_number, expected, actual in errors)
+
+ fail('Shippable run %s has a job matrix mismatch.' % run_id,
+ 'Try re-running the entire matrix.\n\n%s' % error_summary)
+
+
+def fail(message, output): # type: (str, str) -> NoReturn
+ # Include a leading newline to improve readability on Shippable "Tests" tab.
+ # Without this, the first line becomes indented.
+ output = '\n' + output.strip()
+
+ timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
+
+ # hack to avoid requiring junit-xml, which isn't pre-installed on Shippable outside our test containers
+ xml = '''
+<?xml version="1.0" encoding="utf-8"?>
+<testsuites disabled="0" errors="1" failures="0" tests="1" time="0.0">
+\t<testsuite disabled="0" errors="1" failures="0" file="None" log="None" name="ansible-test" skipped="0" tests="1" time="0" timestamp="%s" url="None">
+\t\t<testcase classname="timeout" name="timeout">
+\t\t\t<error message="%s" type="error">%s</error>
+\t\t</testcase>
+\t</testsuite>
+</testsuites>
+''' % (timestamp, message, output)
+
+ path = 'shippable/testresults/check-matrix.xml'
+ dir_path = os.path.dirname(path)
+
+ if not os.path.exists(dir_path):
+ os.makedirs(dir_path)
+
+ with open(path, 'w') as junit_fd:
+ junit_fd.write(xml.lstrip())
+
+ sys.stderr.write(message + '\n')
+ sys.stderr.write(output + '\n')
+
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/sanity.sh b/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/sanity.sh
new file mode 100755
index 00000000..dd1e68b9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/sanity.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+if [ "${BASE_BRANCH:-}" ]; then
+ base_branch="origin/${BASE_BRANCH}"
+else
+ base_branch=""
+fi
+
+# shellcheck disable=SC2086
+ansible-test sanity --color -v --junit ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \
+ --docker --base-branch "${base_branch}" \
+ --allow-disabled
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/shippable.sh b/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/shippable.sh
new file mode 100755
index 00000000..dd52e040
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/shippable.sh
@@ -0,0 +1,172 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+script="${args[0]}"
+
+test="$1"
+
+docker images ansible/ansible
+docker images quay.io/ansible/*
+docker ps
+
+for container in $(docker ps --format '{{.Image}} {{.ID}}' | grep -v '^drydock/' | sed 's/^.* //'); do
+ docker rm -f "${container}" || true # ignore errors
+done
+
+docker ps
+
+if [ -d /home/shippable/cache/ ]; then
+ ls -la /home/shippable/cache/
+fi
+
+command -v python
+python -V
+
+command -v pip
+pip --version
+pip list --disable-pip-version-check
+
+export PATH="${PWD}/bin:${PATH}"
+export PYTHONIOENCODING='utf-8'
+
+if [ "${JOB_TRIGGERED_BY_NAME:-}" == "nightly-trigger" ]; then
+ COVERAGE=yes
+ COMPLETE=yes
+fi
+
+if [ -n "${COVERAGE:-}" ]; then
+ # on-demand coverage reporting triggered by setting the COVERAGE environment variable to a non-empty value
+ export COVERAGE="--coverage"
+elif [[ "${COMMIT_MESSAGE}" =~ ci_coverage ]]; then
+ # on-demand coverage reporting triggered by having 'ci_coverage' in the latest commit message
+ export COVERAGE="--coverage"
+else
+ # on-demand coverage reporting disabled (default behavior, always-on coverage reporting remains enabled)
+ export COVERAGE="--coverage-check"
+fi
+
+if [ -n "${COMPLETE:-}" ]; then
+ # disable change detection triggered by setting the COMPLETE environment variable to a non-empty value
+ export CHANGED=""
+elif [[ "${COMMIT_MESSAGE}" =~ ci_complete ]]; then
+ # disable change detection triggered by having 'ci_complete' in the latest commit message
+ export CHANGED=""
+else
+ # enable change detection (default behavior)
+ export CHANGED="--changed"
+fi
+
+if [ "${IS_PULL_REQUEST:-}" == "true" ]; then
+ # run unstable tests which are targeted by focused changes on PRs
+ export UNSTABLE="--allow-unstable-changed"
+else
+ # do not run unstable tests outside PRs
+ export UNSTABLE=""
+fi
+
+virtualenv --python /usr/bin/python3.7 ~/ansible-venv
+set +ux
+. ~/ansible-venv/bin/activate
+set -ux
+
+pip install setuptools==44.1.0
+
+pip install https://github.com/ansible/ansible/archive/"${A_REV:-devel}".tar.gz --disable-pip-version-check
+
+#ansible-galaxy collection install community.general
+mkdir -p "${HOME}/.ansible/collections/ansible_collections/community"
+mkdir -p "${HOME}/.ansible/collections/ansible_collections/google"
+mkdir -p "${HOME}/.ansible/collections/ansible_collections/openstack"
+cwd=$(pwd)
+cd "${HOME}/.ansible/collections/ansible_collections/"
+git clone https://github.com/ansible-collections/community.general community/general
+git clone https://github.com/ansible-collections/community.aws community/aws
+# community.general requires a lot of things we need to manual pull in
+# once community.general is published this will be handled by galaxy cli
+git clone https://github.com/ansible-collections/ansible_collections_google google/cloud
+git clone https://opendev.org/openstack/ansible-collections-openstack openstack/cloud
+ansible-galaxy collection install ansible.netcommon
+cd "${cwd}"
+
+export ANSIBLE_COLLECTIONS_PATHS="${HOME}/.ansible/"
+SHIPPABLE_RESULT_DIR="$(pwd)/shippable"
+TEST_DIR="${HOME}/.ansible/collections/ansible_collections/amazon/aws/"
+mkdir -p "${TEST_DIR}"
+cp -aT "${SHIPPABLE_BUILD_DIR}" "${TEST_DIR}"
+cd "${TEST_DIR}"
+
+function cleanup
+{
+ if [ -d tests/output/coverage/ ]; then
+ if find tests/output/coverage/ -mindepth 1 -name '.*' -prune -o -print -quit | grep -q .; then
+ # for complete on-demand coverage generate a report for all files with no coverage on the "other" job so we only have one copy
+ if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ] && [ "${test}" == "sanity/1" ]; then
+ stub="--stub"
+ else
+ stub=""
+ fi
+
+ # shellcheck disable=SC2086
+ ansible-test coverage xml --color --requirements --group-by command --group-by version ${stub:+"$stub"}
+ cp -a tests/output/reports/coverage=*.xml "$SHIPPABLE_RESULT_DIR/codecoverage/"
+
+ # analyze and capture code coverage aggregated by integration test target if not on 2.9, default if unspecified is devel
+ if [ -z "${A_REV:-}" ] || [ "${A_REV:-}" != "stable-2.9" ]; then
+ ansible-test coverage analyze targets generate -v "$SHIPPABLE_RESULT_DIR/testresults/coverage-analyze-targets.json"
+ fi
+
+ # upload coverage report to codecov.io only when using complete on-demand coverage
+ if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ]; then
+ for file in tests/output/reports/coverage=*.xml; do
+ flags="${file##*/coverage=}"
+ flags="${flags%-powershell.xml}"
+ flags="${flags%.xml}"
+ # remove numbered component from stub files when converting to tags
+ flags="${flags//stub-[0-9]*/stub}"
+ flags="${flags//=/,}"
+ flags="${flags//[^a-zA-Z0-9_,]/_}"
+
+ bash <(curl -s https://codecov.io/bash) \
+ -f "${file}" \
+ -F "${flags}" \
+ -n "${test}" \
+ -t bc371da7-e5d2-4743-93b5-309f81d457a4 \
+ -X coveragepy \
+ -X gcov \
+ -X fix \
+ -X search \
+ -X xcode \
+ || echo "Failed to upload code coverage report to codecov.io: ${file}"
+ done
+ fi
+ fi
+ fi
+ if [ -d tests/output/junit/ ]; then
+ cp -aT tests/output/junit/ "$SHIPPABLE_RESULT_DIR/testresults/"
+ fi
+
+ if [ -d tests/output/data/ ]; then
+ cp -a tests/output/data/ "$SHIPPABLE_RESULT_DIR/testresults/"
+ fi
+
+ if [ -d tests/output/bot/ ]; then
+ cp -aT tests/output/bot/ "$SHIPPABLE_RESULT_DIR/testresults/"
+ fi
+}
+
+trap cleanup EXIT
+
+if [[ "${COVERAGE:-}" == "--coverage" ]]; then
+ timeout=60
+else
+ timeout=45
+fi
+
+ansible-test env --dump --show --timeout "${timeout}" --color -v
+
+"tests/utils/shippable/check_matrix.py"
+"tests/utils/shippable/${script}.sh" "${test}"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/timing.py b/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/timing.py
new file mode 100755
index 00000000..fb538271
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/timing.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python3.7
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import time
+
+start = time.time()
+
+sys.stdin.reconfigure(errors='surrogateescape')
+sys.stdout.reconfigure(errors='surrogateescape')
+
+for line in sys.stdin:
+ seconds = time.time() - start
+ sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
+ sys.stdout.flush()
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/timing.sh b/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/timing.sh
new file mode 100755
index 00000000..77e25783
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/timing.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eu
+
+"$@" 2>&1 | "$(dirname "$0")/timing.py"
diff --git a/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/units.sh b/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/units.sh
new file mode 100755
index 00000000..dc115dec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/amazon/aws/tests/utils/shippable/units.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+version="${args[1]}"
+
+# shellcheck disable=SC2086
+ansible-test units --color -v --docker default --python "${version}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \